[
  {
    "path": ".circleci/config.yml",
    "content": "version: 2.1\n\nparameters:\n  nightly-toolchain:\n    type: string\n    default: \"nightly-2021-04-24\"\n\nexecutors:\n  default:\n    docker:\n      - image: filecoin/rust:latest\n    working_directory: /mnt/crate\n    resource_class: 2xlarge+\n  gpu:\n    machine:\n      image: ubuntu-1604-cuda-10.1:201909-23\n    working_directory: ~/gpuci\n    resource_class: gpu.nvidia.medium\n  arm:\n    machine:\n      image: ubuntu-2004:202101-01\n    resource_class: arm.large\n\nsetup-env: &setup-env\n  FIL_PROOFS_PARAMETER_CACHE: \"/tmp/filecoin-proof-parameters/\"\n  RUST_LOG: info\n\n\njobs:\n  ensure_groth_parameters_and_keys_linux:\n    executor: default\n    environment: *setup-env\n    steps:\n      - checkout\n      - restore_parameter_cache\n      - ensure_filecoin_parameters\n      - save_parameter_cache\n\n  cargo_fetch:\n    executor: default\n    environment: *setup-env\n    steps:\n      - checkout\n      - run:\n          name: Calculate dependencies\n          command: cargo generate-lockfile\n          no_output_timeout: 30m\n      - restore_rustup_cache\n      - run: rustup install $(cat rust-toolchain)\n      - run: rustup default $(cat rust-toolchain)\n      - run: rustup install << pipeline.parameters.nightly-toolchain >>\n      - run: rustup component add rustfmt-preview\n      - run: rustup component add clippy\n      - run: cargo update\n      - run: cargo fetch\n      - run: rustc +$(cat rust-toolchain) --version\n      - run: rustup toolchain list --verbose\n      - persist_to_workspace:\n          root: \".\"\n          paths:\n            - Cargo.lock\n      - save_rustup_cache\n  test:\n    executor: default\n    environment: *setup-env\n    parameters:\n      crate:\n        type: string\n    steps:\n      - checkout\n      - attach_workspace:\n          at: \".\"\n      - restore_rustup_cache\n      - restore_parameter_cache\n      - run:\n          name: Test (<< parameters.crate >>)\n          command: cargo +$(cat rust-toolchain) test --verbose --package << parameters.crate >>\n          no_output_timeout: 30m\n\n  test_release:\n    executor: default\n    environment: *setup-env\n    steps:\n      - checkout\n      - attach_workspace:\n          at: \".\"\n      - restore_rustup_cache\n      - restore_parameter_cache\n      - run:\n          name: Test in release profile\n          command: |\n            ulimit -n 20000\n            ulimit -u 20000\n            ulimit -n 20000\n            cargo +$(cat rust-toolchain) test --verbose --release --workspace\n            RUSTFLAGS=\"-D warnings\" cargo +$(cat rust-toolchain) build --examples --release --workspace\n          no_output_timeout: 30m\n\n  test_ignored_release:\n    executor: default\n    environment: *setup-env\n    parameters:\n      crate:\n        type: string\n      features:\n        type: string\n        default: \"\"\n    steps:\n      - checkout\n      - attach_workspace:\n          at: \".\"\n      - restore_rustup_cache\n      - restore_parameter_cache\n      - run:\n          name: Test ignored in release profile\n          command: |\n              ulimit -n 20000\n              ulimit -u 20000\n              ulimit -n 20000\n              cd << parameters.crate >>\n              cargo test --release << parameters.features >> -- --ignored --nocapture\n          environment:\n            RUST_TEST_THREADS: 1\n          no_output_timeout: 30m\n\n\n  # Running with `use_multicore_sdr=true` should be integrated directly into the test code. For now we\n  # just re-run the lifecycle tests to exercise the use_multicore_sdr code path with that setting set.\n  test_multicore_sdr:\n    executor: default\n    environment: *setup-env\n    steps:\n      - checkout\n      - attach_workspace:\n          at: \".\"\n      - restore_rustup_cache\n      - restore_parameter_cache\n      - run:\n          name: Test with use_multicore_sdr pairing enabled\n          command: |\n            ulimit -n 20000\n            ulimit -u 20000\n            ulimit -n 20000\n            cargo +<< pipeline.parameters.nightly-toolchain >> test --all --verbose --release lifecycle -- --ignored --nocapture\n            cargo +<< pipeline.parameters.nightly-toolchain >> test -p storage-proofs-porep --features single-threaded --release checkout_cores -- --test-threads=1\n          no_output_timeout: 30m\n          environment:\n            RUST_TEST_THREADS: 1\n            FIL_PROOFS_USE_MULTICORE_SDR: true\n\n      - run:\n          name: Test with use_multicore_sdr and blst enabled\n          command: |\n            ulimit -n 20000\n            ulimit -u 20000\n            ulimit -n 20000\n            cargo +<< pipeline.parameters.nightly-toolchain >> test --all --no-default-features --features gpu,blst --verbose --release  lifecycle -- --ignored --nocapture\n          no_output_timeout: 30m\n          environment:\n            RUST_TEST_THREADS: 1\n            FIL_PROOFS_USE_MULTICORE_SDR: true\n\n  test_gpu_tree_building:\n    executor: gpu\n    environment: *setup-env\n    steps:\n      - checkout\n      - attach_workspace:\n          at: \".\"\n      - restore_rustup_cache\n      - restore_parameter_cache\n      - run:\n          name: Set the PATH env variable\n          command: |\n            echo 'export PATH=\"~/.cargo/bin:$PATH\"' | tee --append $BASH_ENV\n            source $BASH_ENV\n      - run:\n          name: Install required libraries for GPU support\n          command: |\n            sudo apt-get update -y\n            sudo apt install -y ocl-icd-opencl-dev libhwloc-dev\n      - run:\n          name: Test with GPU column and tree builders.\n          command: |\n            ulimit -n 20000\n            ulimit -u 20000\n            ulimit -n 20000\n            cargo +<< pipeline.parameters.nightly-toolchain >> test --all --no-default-features --features gpu,blst --verbose --release lifecycle -- --ignored --nocapture\n          no_output_timeout: 30m\n          environment:\n            RUST_TEST_THREADS: 1\n            FIL_PROOFS_USE_GPU_COLUMN_BUILDER: true\n            FIL_PROOFS_USE_GPU_TREE_BUILDER: true\n\n  test_no_gpu:\n    executor: default\n    environment: *setup-env\n    parameters:\n      features:\n        type: string\n    steps:\n      - checkout\n      - attach_workspace:\n          at: \".\"\n      - restore_rustup_cache\n      - restore_parameter_cache\n      - run:\n          name: Test with no gpu (<< parameters.features >>)\n          command: |\n            cargo +<< pipeline.parameters.nightly-toolchain >> test --all --verbose --no-default-features --features << parameters.features >>\n          no_output_timeout: 30m\n\n  test_arm_no_gpu:\n    executor: arm\n    environment: *setup-env\n    parameters:\n      features:\n        type: string\n    steps:\n      - checkout\n      - attach_workspace:\n          at: \".\"\n      - restore_rustup_cache\n      - restore_parameter_cache\n      - run:\n          name: Install Rust\n          command: |\n            curl https://sh.rustup.rs -sSf | sh -s -- -y\n      - run: rustup install $(cat rust-toolchain)\n      - run: rustup default $(cat rust-toolchain)\n      - run: rustup install << pipeline.parameters.nightly-toolchain >>\n      - run: cargo update\n      - run: cargo fetch\n      - run:\n          name: Install required libraries\n          command: |\n            sudo apt-get update -y\n            sudo apt install -y libhwloc-dev\n      - run:\n          name: Test arm with no gpu (<< parameters.features >>)\n          command: |\n            cargo +<< pipeline.parameters.nightly-toolchain >> -Zpackage-features test --release --all --verbose --no-default-features --features << parameters.features >>\n          no_output_timeout: 90m\n\n  test_blst:\n    executor: default\n    environment: *setup-env\n    parameters:\n      crate:\n        type: string\n      features:\n        type: string\n        default: \"gpu,blst\"\n    steps:\n      - checkout\n      - attach_workspace:\n          at: \".\"\n      - restore_rustup_cache\n      - restore_parameter_cache\n      - run:\n          name: Test ignored with blst enabled (<< parameters.crate >>)\n          command: |\n            ulimit -n 20000\n            ulimit -u 20000\n            ulimit -n 20000\n            RUST_LOG=trace cargo +<< pipeline.parameters.nightly-toolchain >> test --no-default-features --features << parameters.features >> --verbose --release --package << parameters.crate >> -- --nocapture\n          no_output_timeout: 30m\n          environment:\n            RUST_TEST_THREADS: 1\n\n  test_blst_ignored:\n    executor: default\n    environment: *setup-env\n    parameters:\n      crate:\n        type: string\n    steps:\n      - checkout\n      - attach_workspace:\n          at: \".\"\n      - restore_rustup_cache\n      - restore_parameter_cache\n\n      - run:\n          name: Test with blst enabled (<< parameters.crate >>)\n          command: |\n            ulimit -n 20000\n            ulimit -u 20000\n            ulimit -n 20000\n            cargo +<< pipeline.parameters.nightly-toolchain >> test --no-default-features --features gpu,blst --verbose --package << parameters.crate >> --release -- --ignored --nocapture\n          no_output_timeout: 30m\n\n\n  bench:\n    executor: default\n    environment: *setup-env\n    steps:\n      - checkout\n      - attach_workspace:\n          at: \".\"\n      - restore_rustup_cache\n      - restore_parameter_cache\n      - run:\n          name: Benchmarks\n          command: cargo +$(cat rust-toolchain) build --benches --verbose --workspace\n          no_output_timeout: 15m\n\n  rustfmt:\n    executor: default\n    environment: *setup-env\n    steps:\n      - checkout\n      - attach_workspace:\n          at: \".\"\n      - restore_rustup_cache\n      - run:\n          name: Run cargo fmt\n          command: cargo fmt --all -- --check\n\n  clippy:\n    executor: default\n    environment: *setup-env\n    steps:\n      - checkout\n      - attach_workspace:\n          at: \".\"\n      - restore_rustup_cache\n      - run:\n          name: Run cargo clippy\n          command: cargo +$(cat rust-toolchain) clippy --workspace\n  test_darwin:\n    macos:\n      xcode: \"10.0.0\"\n    working_directory: ~/crate\n    resource_class: large\n    environment: *setup-env\n    steps:\n      - checkout\n      - run:\n          name: Install hwloc 2.3.0\n          command: |\n            cd /tmp\n            curl https://download.open-mpi.org/release/hwloc/v2.3/hwloc-2.3.0.tar.gz --location --output /tmp/hwloc-2.3.0.tar.gz\n            tar xzvf hwloc-2.3.0.tar.gz\n            cd hwloc-2.3.0\n            ./configure\n            make\n            sudo make install\n      - run:\n          name: Install Rust\n          command: |\n            curl https://sh.rustup.rs -sSf | sh -s -- -y\n      - run: rustup install $(cat rust-toolchain)\n      - run: rustup default $(cat rust-toolchain)\n      - run: cargo update\n      - run: cargo fetch\n      - run:\n          name: Test Darwin\n          command: |\n            sudo ulimit -n 20000\n            sudo ulimit -u 20000\n            ulimit -n 20000\n            cargo +$(cat rust-toolchain) test --release --verbose --workspace -- --nocapture\n          no_output_timeout: 2h\n\ncommands:\n  ensure_filecoin_parameters:\n    steps:\n      - run:\n          name: Build paramcache if it doesn't already exist\n          command: |\n            set -x; test -f /tmp/paramcache.awesome \\\n            || (cargo build --release --workspace && find . -type f -name paramcache | xargs -I {} mv {} /tmp/paramcache.awesome)\n      - run:\n          name: Obtain filecoin groth parameters\n          command: /tmp/paramcache.awesome --sector-sizes='2048,4096,16384,32768'\n          no_output_timeout: 60m\n      - run:\n          name: Make the parameters world readable\n          command: chmod -R 755 ${FIL_PROOFS_PARAMETER_CACHE}\n  save_rustup_cache:\n    steps:\n      # Move things from the home directory to `/tmp` first, so that it can be\n      # restored on executors that have a different home directory.\n      - run: cp -R ~/.cargo ~/.rustup /tmp/\n      - save_cache:\n          name: \"Save rustup cache\"\n          key: cargo-v28-e-{{ checksum \"rust-toolchain\" }}-{{ checksum \"Cargo.toml\" }}-{{ checksum \"Cargo.lock\" }}\n          paths:\n            - /tmp/.cargo\n            - /tmp/.rustup\n  restore_rustup_cache:\n    steps:\n      - restore_cache:\n          name: \"Restore rustup cache\"\n          key: cargo-v28-e-{{ checksum \"rust-toolchain\" }}-{{ checksum \"Cargo.toml\" }}-{{ checksum \"Cargo.lock\" }}\n      # Cache might not be created yet, hence ignore if the move fails\n      - run: cp -R /tmp/.cargo /tmp/.rustup ~/ || true\n  save_parameter_cache:\n    steps:\n      - save_cache:\n          name: \"Save parameter cache\"\n          key: proof-params-v28-e-{{ checksum \"filecoin-proofs/parameters.json\" }}\n          paths:\n            - \"/tmp/paramcache.awesome\"\n            - \"/tmp/filecoin-proof-parameters/\"\n  restore_parameter_cache:\n    steps:\n      - restore_cache:\n          name: \"Restore parameter cache\"\n          key: proof-params-v28-e-{{ checksum \"filecoin-proofs/parameters.json\" }}\n\nworkflows:\n  version: 2.1\n  test_all:\n    jobs:\n      - ensure_groth_parameters_and_keys_linux\n      - cargo_fetch\n      - rustfmt:\n          requires:\n            - cargo_fetch\n      - clippy:\n          requires:\n            - cargo_fetch\n\n      - test_release:\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n            \n      - test_ignored_release:\n          name: test_ignored_release_storage_proofs_post\n          crate: \"storage-proofs-post\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n            \n      - test_ignored_release:\n          name: test_ignored_release_storage_proofs_core\n          crate: \"storage-proofs-core\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n            \n      - test_ignored_release:\n          name: test_ignored_release_storage_proofs_porep\n          crate: \"storage-proofs-porep\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n            \n      - test_ignored_release:\n          name: test_ignored_release_filecoin_proofs\n          crate: \"filecoin-proofs\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test_gpu_tree_building:\n          requires:\n            #- cargo_fetch_gpu\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test_multicore_sdr:\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test_blst:\n          name: test_blst_filecoin_proofs\n          crate: \"filecoin-proofs\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test_blst_ignored:\n          name: test_blst_ignored_filecoin_proofs\n          crate: \"filecoin-proofs\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test:\n          name: test_filecoin_proofs\n          crate: \"filecoin-proofs\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test_blst:\n          name: test_blst_storage_proofs_core\n          crate: \"storage-proofs-core\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test_blst_ignored:\n          name: test_blst_ignored_storage_proofs_core\n          crate: \"storage-proofs-core\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test:\n          name: test_storage_proofs_core\n          crate: \"storage-proofs-core\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test_blst:\n          name: test_blst_storage_proofs_post\n          crate: \"storage-proofs-post\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test_blst_ignored:\n          name: test_blst_ignored_storage_proofs_post\n          crate: \"storage-proofs-post\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test:\n          name: test_storage_proofs_post\n          crate: \"storage-proofs-post\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n\n      - test_blst:\n          name: test_blst_storage_proofs_porep\n          crate: \"storage-proofs-porep\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test_blst_ignored:\n          name: test_blst_ignored_storage_proofs_porep\n          crate: \"storage-proofs-porep\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test:\n          name: test_storage_proofs_porep\n          crate: \"storage-proofs-porep\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n\n      - test_blst:\n          name: test_blst_fil_proofs_tooling\n          crate: \"fil-proofs-tooling\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test:\n          name: test_fil_proofs_tooling\n          crate: \"fil-proofs-tooling\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test:\n          name: test_sha2raw\n          crate: \"sha2raw\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n            \n      - test_blst:\n          name: test_blst_filecoin_hashers\n          crate: \"filecoin-hashers\"\n          features: \"blst,gpu,poseidon,sha256,blake2s\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test:\n          name: test_filecoin_hashers\n          crate: \"filecoin-hashers\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n\n      - test_blst:\n          name: test_blst_fil_proofs_param\n          crate: \"fil-proofs-param\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test:\n          name: test_fil_proofs_param\n          crate: \"fil-proofs-param\"\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test_no_gpu:\n          name: test_no_gpu_pairing\n          features: 'pairing'\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test_no_gpu:\n          name: test_no_gpu_blst\n          features: 'blst'\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test_arm_no_gpu:\n          name: test_arm_no_gpu_pairing\n          features: 'pairing'\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - test_arm_no_gpu:\n          name: test_arm_no_gpu_blst\n          features: 'blst'\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n\n      - bench:\n          requires:\n            - cargo_fetch\n            - ensure_groth_parameters_and_keys_linux\n      - test_darwin\n\n      - test:\n          name: test_fr32\n          crate: \"fr32\"\n          requires:\n            - cargo_fetch\n\n      - test_blst:\n          name: test_blst_fr32\n          crate: \"fr32\"\n          requires:\n            - cargo_fetch\n"
  },
  {
    "path": ".clippy.toml",
    "content": "type-complexity-threshold = 400"
  },
  {
    "path": ".dockerignore",
    "content": ".git\n/target/*\n"
  },
  {
    "path": ".gitignore",
    "content": "/target\n**/*.rs.bk\nCargo.lock\n.criterion\n**/*.h\nheaptrack*\n.bencher\n*.profile\n*.heap\nrust-fil-proofs.config.toml\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to rust-fil-proofs will be documented in this file.\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),\nand this project adheres to [Semantic Versioning](https://book.async.rs/overview/stability-guarantees.html).\n\n## Unreleased\n\n## [7.0.1] - 2021-05-06\n\n- Added Apple M1 asm support via updated sha2 dependency [#1457](https://github.com/filecoin-project/rust-fil-proofs/pull/1457)\n- Remove additional build warnings and update CI nightly toolchain [#1456](https://github.com/filecoin-project/rust-fil-proofs/pull/1456)\n- Fix aarch64/Linux build regression [#1455](https://github.com/filecoin-project/rust-fil-proofs/pull/1455)\n- Fix changelog errors and typos [#1451](https://github.com/filecoin-project/rust-fil-proofs/pull/1451)\n- Fix initial value for cache_count [#1454](https://github.com/filecoin-project/rust-fil-proofs/pull/1454)\n\n## [7.0.0] - 2021-04-28\n\n- Split up non-gpu tests for improved CI [#1448](https://github.com/filecoin-project/rust-fil-proofs/pull/1448)\n- Use latest version of dialoguer [#1447](https://github.com/filecoin-project/rust-fil-proofs/pull/1447)\n- Fix circuitinfo's binary name [#1443](https://github.com/filecoin-project/rust-fil-proofs/pull/1443)\n- Remove deprecated calls and clean-up warnings; add parallelization [#1436](https://github.com/filecoin-project/rust-fil-proofs/pull/1436)\n- Migrate gpu2 to default gpu code; Update rust toolchain to 1.51.0 [#1441](https://github.com/filecoin-project/rust-fil-proofs/pull/1441)\n- Improve unsealing memory performance [#1401](https://github.com/filecoin-project/rust-fil-proofs/pull/1401)\n- Update codeowners to current [#1432](https://github.com/filecoin-project/rust-fil-proofs/pull/1432)\n- Update config.json for the benches [#1431](https://github.com/filecoin-project/rust-fil-proofs/pull/1431)\n\n## [6.1.0] - 2021-03-09\n\n- Update bellperson to the latest version [#1430](https://github.com/filecoin-project/rust-fil-proofs/pull/1430)\n- Remove unused metrics capture CI job [#1428](https://github.com/filecoin-project/rust-fil-proofs/pull/1428)\n- Split up pc1/pc2 in the Window PoSt bench [#1427](https://github.com/filecoin-project/rust-fil-proofs/pull/1427)\n- Use `compress,asm` features of sha2 for aarch64 [#1404](https://github.com/filecoin-project/rust-fil-proofs/pull/1404)\n- Add gpu2, an optional feature that uses `neptune`'s opencl backend [#1397](https://github.com/filecoin-project/rust-fil-proofs/pull/1397)\n- Clean-up imports and remove globs [#1394](https://github.com/filecoin-project/rust-fil-proofs/pull/1394)\n- Remove `storage-proofs` sub-crate [#1393](https://github.com/filecoin-project/rust-fil-proofs/pull/1393)\n- Re-factor parameter related binaries [#1392](https://github.com/filecoin-project/rust-fil-proofs/pull/1392)\n- Fix merkle bench for poseidon hashing [#1389](https://github.com/filecoin-project/rust-fil-proofs/pull/1389)\n- Move `phase2` code into its own crate [#1388](https://github.com/filecoin-project/rust-fil-proofs/pull/1388)\n- Move `fr32` into its own crate [#1387](https://github.com/filecoin-project/rust-fil-proofs/pull/1387)\n- Ensure that builds without gpu support work [#1386](https://github.com/filecoin-project/rust-fil-proofs/pull/1386)\n- Increase parallelism in fallback PoSt [#1384](https://github.com/filecoin-project/rust-fil-proofs/pull/1384)\n- Move checkout_cores test behing a single-threaded feature [#1383](https://github.com/filecoin-project/rust-fil-proofs/pull/1383)\n- Improve the cache preservation in Window PoSt bench [#1382](https://github.com/filecoin-project/rust-fil-proofs/pull/1382)\n- Correct some typos in the Changelog [#1381](https://github.com/filecoin-project/rust-fil-proofs/pull/1381)\n\n## [6.0.0] - 2020-12-01\n\n- Add PoR gadget that does not add a public input [#1374](https://github.com/filecoin-project/rust-fil-proofs/pull/1374)\n- Update README and fix some typos [#1377](https://github.com/filecoin-project/rust-fil-proofs/pull/1377)\n- Update bellperson using new blstrs, which in turn now uses`blst@0.3.2` [#1376](https://github.com/filecoin-project/rust-fil-proofs/pull/1376)\n- Fix tree_c and tree_r_last generation in GPU mode [#1375](https://github.com/filecoin-project/rust-fil-proofs/pull/1375)\n- Add API version enum for determining runtime behaviour [#1362](https://github.com/filecoin-project/rust-fil-proofs/pull/1362)\n- Parallelize CI test runs across packages [#1358](https://github.com/filecoin-project/rust-fil-proofs/pull/1358)\n- Update paramcache run for metrics capture CI job [#1363](https://github.com/filecoin-project/rust-fil-proofs/pull/1363)\n- Re-organize filecoin-proofs source [#1352](https://github.com/filecoin-project/rust-fil-proofs/pull/1352)\n- Move hashers into `filecoin-hashers` crate [#1356](https://github.com/filecoin-project/rust-fil-proofs/pull/1356)\n- Speed up Fr32Reader [#1341](https://github.com/filecoin-project/rust-fil-proofs/pull/1341)\n- Serialize GPU tree building with GPU lock [#1335](https://github.com/filecoin-project/rust-fil-proofs/pull/1335)\n- Disable `phase2` tests that require external files [#1342](https://github.com/filecoin-project/rust-fil-proofs/pull/1342)\n- Move `phase2` into its own crate [#1340](https://github.com/filecoin-project/rust-fil-proofs/pull/1340)\n- Raise soft fdlimit to max at runtime (OS X/Linux) [#1338](https://github.com/filecoin-project/rust-fil-proofs/pull/1338)\n- Improve clippy lints (rust 2018 idioms) [#1337](https://github.com/filecoin-project/rust-fil-proofs/pull/1337)\n\n## [5.4.0] - 2020-11-02\n\n- Fix graph generation [#1336](https://github.com/filecoin-project/rust-fil-proofs/pull/1336)\n\n## [5.3.0] - 2020-10-29\n\n- Integrate blst backend and proof verification optimizations [#1332](https://github.com/filecoin-project/rust-fil-proofs/pull/1332)\n- Remove unused pedersen hasher [#1331](https://github.com/filecoin-project/rust-fil-proofs/pull/1331)\n- Sanity check commitments [#1330](https://github.com/filecoin-project/rust-fil-proofs/pull/1330)\n- Install hwloc to fix metrics capture on CI [#1328](https://github.com/filecoin-project/rust-fil-proofs/pull/1328)\n- Remove no longer used exports [#1315](https://github.com/filecoin-project/rust-fil-proofs/pull/1315)\n- Add tests for resumable sealing [#1309](https://github.com/filecoin-project/rust-fil-proofs/pull/1309)\n- Add circuitinfo CLI tool to count circuit constraints [#1325](https://github.com/filecoin-project/rust-fil-proofs/pull/1325)\n- Remove mutex from settings access [#1321](https://github.com/filecoin-project/rust-fil-proofs/pull/1321)\n- Add SECURITY.md [#1317](https://github.com/filecoin-project/rust-fil-proofs/pull/1317)\n- Update hwloc dependency for CI [#1316](https://github.com/filecoin-project/rust-fil-proofs/pull/1316)\n\n## [5.2.3] - 2020-10-13\n\n- Update neptune dependency version\n\n## [5.2.2] - 2020-10-13\n\n- Add notes about param and cache verification [#1313](https://github.com/filecoin-project/rust-fil-proofs/pull/1313)\n- Update incorrect log message [#1312](https://github.com/filecoin-project/rust-fil-proofs/pull/1312)\n- Bind threads to cores in multicore SDR [#1305](https://github.com/filecoin-project/rust-fil-proofs/pull/1305)\n- Add hwloc dependency to CI [#1307](https://github.com/filecoin-project/rust-fil-proofs/pull/1307)\n\n## [5.2.1] - 2020-10-01\n\n- Pin neptune to version 1.2.x [#1302](https://github.com/filecoin-project/rust-fil-proofs/pull/1302)\n- Add correct sizes for metrics capture CI [#1301](https://github.com/filecoin-project/rust-fil-proofs/pull/1301)\n- Ensure all PoSt code paths are tested [#1299](https://github.com/filecoin-project/rust-fil-proofs/pull/1299)\n- Add byte_unit dep for handling benchy input sizes [#1297](https://github.com/filecoin-project/rust-fil-proofs/pull/1297)\n- Implement prefetch macro for aarch64 [#1294](https://github.com/filecoin-project/rust-fil-proofs/pull/1294)\n\n## [5.2.0] - 2020-09-28\n\n- Add Seal resume by skipping existing layers [#1292](https://github.com/filecoin-project/rust-fil-proofs/pull/1292)\n- Use two producers in all layers [#1296](https://github.com/filecoin-project/rust-fil-proofs/pull/1296)\n- Re-export some methods that moved for api access [#1291](https://github.com/filecoin-project/rust-fil-proofs/pull/1291)\n- Update rustc to 1.46.0 [#1290](https://github.com/filecoin-project/rust-fil-proofs/pull/1290)\n- Optimize Phase 1 (Replication) [#1289](https://github.com/filecoin-project/rust-fil-proofs/pull/1289)\n- Add Seal resume testing to the Window PoSt bench [#1288](https://github.com/filecoin-project/rust-fil-proofs/pull/1288)\n- Add labeling test vectors [#1285](https://github.com/filecoin-project/rust-fil-proofs/pull/1285)\n- Remove artificial requirement that sector count be 1 for single vanilla proof [#1283](https://github.com/filecoin-project/rust-fil-proofs/pull/1283)\n- Add Parent Cache and parameter verification and settings to enable [#1265](https://github.com/filecoin-project/rust-fil-proofs/pull/1265)\n- Improve SectorId logging [#1280](https://github.com/filecoin-project/rust-fil-proofs/pull/1280)\n- Split up Window PoSt API into separate calls [#1278](https://github.com/filecoin-project/rust-fil-proofs/pull/1278)\n- Destructure settings [#1273](https://github.com/filecoin-project/rust-fil-proofs/pull/1273)\n\n## [5.1.4] - 2020-09-08\n\n- Add FaultySectors error to Fallback PoSt [#1274](https://github.com/filecoin-project/rust-fil-proofs/pull/1274)\n\n## [5.1.3] - 2020-09-07\n\n- Make fil-blst usage in Window PoSt possible [#1272](https://github.com/filecoin-project/rust-fil-proofs/pull/1272)\n\n## [5.1.2] - 2020-09-03\n\n- Accelerate SNARK verification [#1271](https://github.com/filecoin-project/rust-fil-proofs/pull/1271)\n- Decompress proofs in parallel [#1268](https://github.com/filecoin-project/rust-fil-proofs/pull/1268)\n- Eliminate wasteful public-input conversions [#1267](https://github.com/filecoin-project/rust-fil-proofs/pull/1267)\n- Remove usage of unwrap [#1260](https://github.com/filecoin-project/rust-fil-proofs/pull/1260)\n- Pin params to the filecoin collab cluster [#1263](https://github.com/filecoin-project/rust-fil-proofs/pull/1263)\n\n## [5.1.1] - 2020-08-12\n\n- Only perform subgroup check on 'after' params [#1258](https://github.com/filecoin-project/rust-fil-proofs/pull/1258)\n\n## [5.1.0] - 2020-08-12\n\n- Add Phase2 cli verify raw g1 point command [#1256](https://github.com/filecoin-project/rust-fil-proofs/pull/1256)\n\n## [5.0.0] - 2020-08-10\n\n- Publish v28 parameters and update Changelog for release [#1254](https://github.com/filecoin-project/rust-fil-proofs/pull/1254)\n- Fix benchmark examples in README [#1253](https://github.com/filecoin-project/rust-fil-proofs/pull/1253)\n- Remove unused dependencies [#1124](https://github.com/filecoin-project/rust-fil-proofs/pull/1124) and [#1252](https://github.com/filecoin-project/rust-fil-proofs/pull/1252)\n- Add script to validate parameter checksums in parameters.json [#1251](https://github.com/filecoin-project/rust-fil-proofs/pull/1251)\n- phase2-cli force small-raw contributions [#1248](https://github.com/filecoin-project/rust-fil-proofs/pull/1248)\n- phase2-cli parse command [#1247](https://github.com/filecoin-project/rust-fil-proofs/pull/1247)\n- phase2-cli merge command [#1242](https://github.com/filecoin-project/rust-fil-proofs/pull/1242)\n- phase2-cli paramgen and filename parsing [#1240](https://github.com/filecoin-project/rust-fil-proofs/pull/1240)\n- Verify transitions from non-raw to raw parameters in phase2-cli [#1239](https://github.com/filecoin-project/rust-fil-proofs/pull/1239)\n- Add a check parameter command that maps parameter files [#1238](https://github.com/filecoin-project/rust-fil-proofs/pull/1238)\n- Add tool to split phase2 parameters [#1235](https://github.com/filecoin-project/rust-fil-proofs/pull/1235)\n\n## [4.0.5] - 2020-07-28\n\n- Include proofs and snark security audit documents, with updated references [#1233](https://github.com/filecoin-project/rust-fil-proofs/pull/1233)\n- Remove `stacked` benchmark from benchy (broken) [#1229](https://github.com/filecoin-project/rust-fil-proofs/pull/1229)\n- Update range for feistel tests [#1228](https://github.com/filecoin-project/rust-fil-proofs/pull/1228)\n- Allow for compilation on aarch64 [#1204](https://github.com/filecoin-project/rust-fil-proofs/pull/1204)\n- Implement `fauxrep2`: a testable fake replication API [#1218](https://github.com/filecoin-project/rust-fil-proofs/pull/1218)\n- Fix CI `metrics_capture` jobs from consistently failing [#1215](https://github.com/filecoin-project/rust-fil-proofs/pull/1215)\n- Correct `rows_to_discard` value during post [#1220](https://github.com/filecoin-project/rust-fil-proofs/pull/1220)\n\n## [4.0.4] - 2020-07-15\n\n- Default parent cache path to use FIL_PROOFS_CACHE_DIR if set [#1207](https://github.com/filecoin-project/rust-fil-proofs/pull/1207)\n- Investigate CI metrics capture [#1212](https://github.com/filecoin-project/rust-fil-proofs/pull/1212) and [#1213](https://github.com/filecoin-project/rust-fil-proofs/pull/1213)\n- Additional README updates and corrections [#1211](https://github.com/filecoin-project/rust-fil-proofs/pull/1211)\n- Update README [#1208](https://github.com/filecoin-project/rust-fil-proofs/pull/1208)\n- Swap buffers instead of memcpy in generate_labels [#1197](https://github.com/filecoin-project/rust-fil-proofs/pull/1197)\n- Apply suggested security audit fixes [#1196](https://github.com/filecoin-project/rust-fil-proofs/pull/1196)\n- Make pieces::Stack methods private [#1202](https://github.com/filecoin-project/rust-fil-proofs/pull/1202)\n- Remove dead code [#1201](https://github.com/filecoin-project/rust-fil-proofs/pull/1201)\n- Test feistel implementation is a valid permutation [#1193](https://github.com/filecoin-project/rust-fil-proofs/pull/1193)\n\n## [4.0.3] - 2020-07-01\n\n- Add fauxrep to API for fake sealing [#1194](https://github.com/filecoin-project/rust-fil-proofs/pull/1194)\n- Streaming phase2 contribution and fast I/O [#1188](https://github.com/filecoin-project/rust-fil-proofs/pull/1188)\n- Add omitted changelog updates [#1190](https://github.com/filecoin-project/rust-fil-proofs/pull/1190)\n\n## [4.0.2] - 2020-06-25\n\n- Allow parameters map to be accessible externally [#1186](https://github.com/filecoin-project/rust-fil-proofs/pull/1186)\n- Extend update_tree_r_cache command with new features [#1175](https://github.com/filecoin-project/rust-fil-proofs/pull/1175)\n- Add OpenCL to the build instructions [#1112](https://github.com/filecoin-project/rust-fil-proofs/pull/1112)\n- Use file locking for cache generation [#1179](https://github.com/filecoin-project/rust-fil-proofs/pull/1179)\n- Add logging to all public API functions [#1137](https://github.com/filecoin-project/rust-fil-proofs/pull/1137)\n- Upgrade some dependencies [#1126](https://github.com/filecoin-project/rust-fil-proofs/pull/1126)\n- Fix clippy warnings [#1147](https://github.com/filecoin-project/rust-fil-proofs/pull/1147)\n- Partial caching for SDR [#1163](https://github.com/filecoin-project/rust-fil-proofs/pull/1163)\n- Add tool to rebuild tree_r_last from a replica [#1170](https://github.com/filecoin-project/rust-fil-proofs/pull/1170)\n- Verify consistent use of porep_id when sealing [#1167](https://github.com/filecoin-project/rust-fil-proofs/pull/1167)\n\n## [4.0.1] - 2020-06-22\n\n- This release is a hotfix that pinned dependencies to avoid a build break [#1182](https://github.com/filecoin-project/rust-fil-proofs/pull/1182)\n\n## [4.0.0] - 2020-06-15\n\n- Change default rows_to_discard for cached oct-trees [#1165](https://github.com/filecoin-project/rust-fil-proofs/pull/1165)\n- Remove validate commit message [#1164](https://github.com/filecoin-project/rust-fil-proofs/pull/1164)\n- Modularized window-post bench [#1162](https://github.com/filecoin-project/rust-fil-proofs/pull/1162)\n- Updated reported PoSt constraints (in comments) [#1161](https://github.com/filecoin-project/rust-fil-proofs/pull/1161)\n\n## [3.0.0] - 2020-06-08\n\n- Publish v27 parameters: [#1158](https://github.com/filecoin-project/rust-fil-proofs/pull/1158)\n- Update toolchain to rust stable: [#1149](https://github.com/filecoin-project/rust-fil-proofs/pull/1149)\n- Allow tree_r_last to be built on the GPU: [#1138](https://github.com/filecoin-project/rust-fil-proofs/pull/1138)\n  - Improve performance of building tree_c on the GPU\n  - Properly remove tree_c when no longer needed\n  - Update circuit test constraints\n- Update neptune dependency version: [#1159](https://github.com/filecoin-project/rust-fil-proofs/pull/1159)\n- Update total challenge count and increase partitions: [#1153](https://github.com/filecoin-project/rust-fil-proofs/pull/1153)\n- Improve UX of paramcache: [#1152](https://github.com/filecoin-project/rust-fil-proofs/pull/1152)\n- Add porep_id to construct replica_id and graph seeds: [#1144](https://github.com/filecoin-project/rust-fil-proofs/pull/1144)\n- Include layer index before node when creating label preimage: [#1139](https://github.com/filecoin-project/rust-fil-proofs/pull/1139)\n- Circuit optimizations for oct/quad insertion: [#1125](https://github.com/filecoin-project/rust-fil-proofs/pull/1125)\n\n## [2.0.0] - 2020-05-27\n\n- Add a method 'unseal_range' to unseal a sector to a file descriptor\n- Calculate required config count based on tree shape\n- Update merkle tree cached tree usage (fixing an incorrect size usage)\n- Replace merkle_light 'height' property usage with 'row_count'\n- Update stacked bench usage of recent replica changes\n\n## [1.0.0] - 2020-05-19\n\n- Initial stable release\n\n[Unreleased]: https://github.com/filecoin-project/rust-fil-proofs/compare/v7.0.1...HEAD\n[7.0.1]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v7.0.1\n[7.0.0]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v7.0.0\n[6.1.0]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v6.1.0\n[6.0.0]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v6.0.0\n[5.4.0]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v5.4.0\n[5.3.0]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v5.3.0\n[5.2.3]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v5.2.3\n[5.2.2]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v5.2.2\n[5.2.1]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v5.2.1\n[5.2.0]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v5.2.0\n[5.1.4]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v5.1.4\n[5.1.3]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v5.1.3\n[5.1.2]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v5.1.2\n[5.1.1]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v5.1.1\n[5.1.0]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v5.1.0\n[5.0.0]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v5.0.0\n[4.0.5]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v4.0.5\n[4.0.4]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v4.0.4\n[4.0.3]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v4.0.3\n[4.0.2]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v4.0.2\n[4.0.1]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v4.0.0\n[3.0.0]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v3.0.0\n[2.0.0]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v2.0.0\n[1.0.0]: https://github.com/filecoin-project/rust-fil-proofs/tree/releases/v1.0.0\n"
  },
  {
    "path": "CODEOWNERS",
    "content": "# Global Owners\n* @dignifiedquire\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing\n\nWelcome, it is great that you found your way here. In order to make the best of all our time, we have gathered some notes\nbelow which we think can be helpful when contributing to this project.\n\n## Getting Started\n\nPlease start by reviewing this file.\n\n## Coding Standards\n\n- No compiler warnings.\n- No [clippy](https://github.com/rust-lang/rust-clippy) warnings.\n- Minimize use of `unsafe` and justify usage in comments.\n- Prefer `expect` with a good description to `unwrap`.\n- Write unit tests in the same file.\n- Format your code with `rustfmt`\n- Code should compile on `stable` and `nightly`. If adding `nightly` only features they should be behind a flag.\n- Write benchmarks for performance sensitive areas. We use [criterion.rs](https://github.com/japaric/criterion.rs).\n\n\n## General Guidelines\n- PRs require code owner approval to merge.\n- Please scope PRs to areas in which you have expertise. This code is still close to research.\n- Please follow our commit guideline described below.\n- Welcome contribution areas might include:\n  - SNARKs\n  - Proof-of-replication\n  - Rust improvements\n  - Optimizations\n  - Documentation (expertise would require careful reading of the code)\n\n\n## PR Merge Policy (Git topology)\n\n### Allowed (white list)\n - Single fast-forward merge commit, with all internal commits squashed.\n - Non-fast-forward merge commit, with all internal commits squashed -- rebased to branch from the previous commit to master.\n - Non-fast-forward merge commit, with curated (as appropriate), linear, internal commits preserved -- rebased to branch from the previous commit to master.\n\n### Disallowed (black list)\n - Non-rebased merge commits which branch from anywhere but the previous commit to master.\n - Merge commits whose internal history contains merge commits (except in rare circumstances).\n - Multiple fast-forward merge commits for a single PR.\n - Internal junk commits — (e.g. strings of WIP).\n\n### In Practice\n - In general, please rebase PRs before merging.\n - To avoid having approvals dismissed by rebasing, authors may instead choose to:\n   - First use GitHub's 'resolve conflicts' button;\n   - Then merge with GitHub's 'squash and merge' button.\n\nIf automated conflict resolution is not possible, you will need to rebase and seek re-approval. In any event, please note the guidelines and prefer either a single commit or a usefully curated set of commits.\n\n## Resources for learning Rust\n\n- Beginners\n  - [The Rust Book](https://doc.rust-lang.org/book/)\n  - [Rust Playground](https://play.rust-lang.org/)\n  - [Rust Docs](https://doc.rust-lang.org/)\n  - [Clippy](https://github.com/rust-lang/rust-clippy)\n  - [Rustfmt](https://github.com/rust-lang/rustfmt)\n- Advanced\n  - What does the Rust compiler do with my code? [Godbolt compiler explorer](https://rust.godbolt.org/)\n  - How to safely write unsafe Rust: [The Rustonomicon](https://doc.rust-lang.org/nomicon/)\n  - Did someone say macros? [The Little Book of Rust Macros](https://danielkeep.github.io/tlborm/book/index.html)\n\n\n## Commit Message Guidelines\n\nWe have very precise rules over how our git commit messages can be formatted. This leads to **more\nreadable messages** that are easy to follow when looking through the **project history**. But also,\nwe use the git commit messages to **generate the change log programmatically**.\n\n### Commit Message Format\n\nEach commit message consists of a **header**, a **body** and a **footer**.  The header has a special\nformat that includes a **type**, a **scope** and a **subject**:\n\n```\n<type>(<scope>): <subject>\n<BLANK LINE>\n<body>\n<BLANK LINE>\n<footer>\n```\n\nThe **header** is mandatory and the **scope** of the header is optional.\n\nAny line of the commit message cannot be longer 100 characters! This allows the message to be easier\nto read on GitHub as well as in various git tools.\n\nThe footer should contain a [closing reference to an issue](https://help.github.com/articles/closing-issues-via-commit-messages/) if any.\n\nSamples: (even more [samples](https://github.com/filecoin-project/rust-fil-proofs/commits/master))\n\n```\ndocs(changelog): update changelog to beta.5\n```\n```\nfix(release): need to depend on latest rxjs and zone.js\nThe version in our package.json gets copied to the one we publish, and users need the latest of these.\n```\n\n### Revert\nIf the commit reverts a previous commit, it should begin with `revert: `, followed by the header of the reverted commit. In the body it should say: `This reverts commit <hash>.`, where the hash is the SHA of the commit being reverted.\n\n### Type\nMust be one of the following:\n\n* **build**: Changes that affect the build system or external dependencies (example scopes: cargo, benchmarks)\n* **ci**: Changes to our CI configuration files and scripts (example scopes: Circle)\n* **docs**: Documentation only changes\n* **feat**: A new feature\n* **fix**: A bug fix\n* **perf**: A code change that improves performance\n* **refactor**: A code change that neither fixes a bug nor adds a feature\n* **style**: Changes that do not affect the meaning of the code (white-space, formatting, missing semi-colons, etc)\n* **test**: Adding missing tests or correcting existing tests\n* **revert**: Used only for `git revert` commits.\n\n### Scope\nThe scope should be the name of the crate affected (as perceived by the person reading the changelog generated from commit messages.\n\nThe following is the list of supported scopes:\n\n* **fil-proofs-tooling**\n* **filecoin-proofs**\n* **storage-proofs**\n\nThere are currently a few exceptions to the \"use package name\" rule:\n\n* **cargo**: used for changes that change the cargo workspace layout, e.g.\n  public path changes, Cargo.toml changes done to all packages, etc.\n* **changelog**: used for updating the release notes in CHANGELOG.md\n* none/empty string: useful for `style`, `test` and `refactor` changes that are done across all\n  packages (e.g. `style: add missing semicolons`) and for docs changes that are not related to a\n  specific package (e.g. `docs: fix typo in tutorial`).\n\n> If you find yourself wanting to use other scopes regularly, please open an issue so we can discuss and extend this list.\n\n### Subject\nThe subject contains a succinct description of the change:\n\n* use the imperative, present tense: \"change\" not \"changed\" nor \"changes\"\n* don't capitalize the first letter\n* no dot (.) at the end\n\n### Body\nJust as in the **subject**, use the imperative, present tense: \"change\" not \"changed\" nor \"changes\".\nThe body should include the motivation for the change and contrast this with previous behavior.\n\n### Footer\nThe footer should contain any information about **Breaking Changes** and is also the place to\nreference GitHub issues that this commit **Closes**.\n\n**Breaking Changes** should start with the word `BREAKING CHANGE:` with a space or two newlines. The rest of the commit message is then used for this.\n\nThis guideline was adopted from the [Angular project](https://github.com/angular/angular/blob/master/CONTRIBUTING.md#commit).\n\n## Licensing\n\nAs mentioned in the [readme](README.md) all contributions are dual licensed under Apache 2 and MIT.\n"
  },
  {
    "path": "COPYRIGHT",
    "content": "This library is dual-licensed under Apache 2.0 and MIT terms.\n"
  },
  {
    "path": "Cargo.toml",
    "content": "[workspace]\n\nmembers = [\n  \"filecoin-proofs\",\n  \"storage-proofs-core\",\n  \"storage-proofs-porep\",\n  \"storage-proofs-post\",\n  \"fil-proofs-tooling\",\n  \"fil-proofs-param\",\n  \"fr32\",\n  \"sha2raw\",\n  \"filecoin-hashers\",\n]\n"
  },
  {
    "path": "Dockerfile-ci",
    "content": "# Dockerfile for CircleCI\n# build with\n# `docker build -t filecoin/rust:latest -f ./Dockerfile-ci .`\n# rebuild: `docker build --pull --no-cache -t filecoin/rust:latest -f ./Dockerfile-ci .`\n\nFROM debian:stretch\n\n# Some of the dependencies I need to build a few libraries,\n# personalize to your needs. You can use multi-stage builds\n# to produce a lightweight image.\nRUN apt-get update && \\\n    apt-get install -y curl file gcc g++ git make openssh-client \\\n    autoconf automake cmake libtool libcurl4-openssl-dev libssl-dev \\\n    libelf-dev libdw-dev binutils-dev zlib1g-dev libiberty-dev wget \\\n    xz-utils pkg-config python clang ocl-icd-opencl-dev libhwloc-dev\n\nRUN curl https://sh.rustup.rs -sSf | sh -s -- -y\n\nENV PATH \"$PATH:/root/.cargo/bin\"\nENV RUSTFLAGS \"-C link-dead-code\"\nENV CFG_RELEASE_CHANNEL \"nightly\"\n\nRUN bash -l -c 'echo $(rustc --print sysroot)/lib >> /etc/ld.so.conf'\nRUN bash -l -c 'echo /usr/local/lib >> /etc/ld.so.conf'\nRUN ldconfig\n"
  },
  {
    "path": "Dockerfile-profile",
    "content": "# How to build and run this Dockerfile:\n#\n# ```\n# RUST_FIL_PROOFS=`pwd` # path to `rust-fil-proofs`\n# docker --log-level debug build --progress tty --file Dockerfile-profile --tag rust-cpu-profile .\n# docker run -it -v $RUST_FIL_PROOFS:/code/ rust-cpu-profile\n# ```\n\nFROM rust\n\n# Get all the dependencies\n# ------------------------\n\n# Copied from: github.com/filecoin-project/rust-fil-proofs/blob/master/Dockerfile-ci\nRUN apt-get update && \\\n    apt-get install -y curl file gcc g++ git make openssh-client \\\n    autoconf automake cmake libtool libcurl4-openssl-dev libssl-dev \\\n    libelf-dev libdw-dev binutils-dev zlib1g-dev libiberty-dev wget \\\n    xz-utils pkg-config python clang\n\n# `gperftools` and dependencies (`libunwind`)\n# -------------------------------------------\n\nENV GPERFTOOLS_VERSION=\"2.7\"\nENV LIBUNWIND_VERSION=\"0.99-beta\"\n\nENV HOME=\"/root\"\nENV DOWNLOADS=${HOME}/downloads\nRUN mkdir -p ${DOWNLOADS}\nRUN echo ${DOWNLOADS}\nWORKDIR ${DOWNLOADS}\n\nRUN wget http://download.savannah.gnu.org/releases/libunwind/libunwind-${LIBUNWIND_VERSION}.tar.gz --output-document ${DOWNLOADS}/libunwind-${LIBUNWIND_VERSION}.tar.gz\nRUN tar -xvf ${DOWNLOADS}/libunwind-${LIBUNWIND_VERSION}.tar.gz\nWORKDIR ${DOWNLOADS}/libunwind-${LIBUNWIND_VERSION}\nRUN ./configure\nRUN make\nRUN make install\nWORKDIR ${DOWNLOADS}\n\nRUN wget https://github.com/gperftools/gperftools/releases/download/gperftools-${GPERFTOOLS_VERSION}/gperftools-${GPERFTOOLS_VERSION}.tar.gz  --output-document ${DOWNLOADS}/gperftools-${GPERFTOOLS_VERSION}.tar.gz\nRUN tar -xvf ${DOWNLOADS}/gperftools-${GPERFTOOLS_VERSION}.tar.gz\nWORKDIR ${DOWNLOADS}/gperftools-${GPERFTOOLS_VERSION}\nRUN ./configure\nRUN make install\nWORKDIR ${DOWNLOADS}\n\nENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib\n# FIXME: `gperftools` installs the library (`make install`) in\n# `/usr/local/lib` by default but Debian/Ubuntu don't look there\n# now, the correct `--prefix` should be added to the command.\n\n# Install latest toolchain used by `rust-fil-proofs`\n# --------------------------------------------------\n\nRUN rustup default nightly-2019-07-15\n# FIXME: The lastest version used should be dynamically obtained form the `rust-fil-proofs` repo\n# and not hard-coded here.\n\n# Ready to run\n# ------------\n\nWORKDIR /code\n\nCMD                                                                           \\\ncargo update                                                                  \\\n&&                                                                            \\\ncargo build                                                                   \\\n  -p filecoin-proofs                                                          \\\n  --release                                                                   \\\n  --example stacked                                                            \\\n  --features                                                                  \\\n    cpu-profile                                                               \\\n  -Z package-features                                                         \\\n&&                                                                            \\\nRUST_BACKTRACE=full                                                           \\\nRUST_LOG=trace                                                                \\\ntarget/release/examples/stacked                                                \\\n  --size 1024                                                                 \\\n&&                                                                            \\\npprof target/release/examples/stacked replicate.profile || bash\n"
  },
  {
    "path": "LICENSE-APACHE",
    "content": "                         Copyright (c) 2018 Filecoin Project\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n"
  },
  {
    "path": "LICENSE-MIT",
    "content": "Permission is hereby granted, free of charge, to any\nperson obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the\nSoftware without restriction, including without\nlimitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice\nshall be included in all copies or substantial portions\nof the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\nANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\nSHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\nIN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE."
  },
  {
    "path": "README.md",
    "content": "# worlddatabase Proving Subsystem\n"
  },
  {
    "path": "SECURITY.md",
    "content": "# Security Policy\n\n## Reporting a Vulnerability\n\nFor reporting *critical* and *security* bugs, please consult our [Security Policy and Responsible Disclosure Program information](https://github.com/filecoin-project/community/blob/master/SECURITY.md)\n\n## Reporting a non security bug\n\nFor non-critical bugs, please simply file a GitHub issue on this repo.\n"
  },
  {
    "path": "bench.config.toml",
    "content": "\n[merklepor]\nchallenges = [ 1 ] #, 10, 50, 100, 200 ]\nsize = [ \"1 KiB\" ] #, \"64 MiB\", \"128 MiB\", \"256 MiB\", \"512 MiB\", \"1024 MiB\"]\ncommand = \"groth\"\nsloth = [ 0 ]\nm = [ 6 ]\n\n[drgporep]\nchallenges = [ 1 ] #, 10, 50, 100, 200 ]\nsize = [ \"1 KiB\" ] #, \"64 MiB\", \"128 MiB\", \"256 MiB\", \"512 MiB\", \"1024 MiB\"]\ncommand = \"groth\"\nsloth = [ 0 ]\nm = [ 6 ]\n\n[drgporep-vanilla]\nchallenges = [ 1 ] #, 10, 50, 100, 200 ]\nsize = [ \"1 KiB\" ] #, \"64 MiB\", \"128 MiB\", \"256 MiB\", \"512 MiB\", \"1024 MiB\"]\nhasher = [ \"pedersen\", \"blake2s\", \"sha256\"]\nsloth = [ 0 ]\nm = [ 6 ]\n\n[zigzag]\nchallenges = [ 1 ]\nsize = [\"1 KiB\"]\nhasher = [\"pedersen\"]\ncommand = \"--groth\"\nsloth  = [ 0 ]\nm = [ 5 ]\nexpansion = [ 6 ]\n"
  },
  {
    "path": "build.rs",
    "content": "fn is_compiled_for_64_bit_arch() -> bool {\n    cfg!(target_pointer_width = \"64\")\n}\n\nfn main() {\n    assert!(\n        is_compiled_for_64_bit_arch(),\n        \"must be built for 64-bit architectures\"\n    );\n}\n"
  },
  {
    "path": "console/bridge.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage console\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/dop251/goja\"\n\t\"github.com/ethereum/go-ethereum/accounts/scwallet\"\n\t\"github.com/ethereum/go-ethereum/accounts/usbwallet\"\n\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n\t\"github.com/ethereum/go-ethereum/console/prompt\"\n\t\"github.com/ethereum/go-ethereum/internal/jsre\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\n// bridge is a collection of JavaScript utility methods to bride the .js runtime\n// environment and the Go RPC connection backing the remote method calls.\ntype bridge struct {\n\tclient   *rpc.Client         // RPC client to execute Ethereum requests through\n\tprompter prompt.UserPrompter // Input prompter to allow interactive user feedback\n\tprinter  io.Writer           // Output writer to serialize any display strings to\n}\n\n// newBridge creates a new JavaScript wrapper around an RPC client.\nfunc newBridge(client *rpc.Client, prompter prompt.UserPrompter, printer io.Writer) *bridge {\n\treturn &bridge{\n\t\tclient:   client,\n\t\tprompter: prompter,\n\t\tprinter:  printer,\n\t}\n}\n\nfunc getJeth(vm *goja.Runtime) *goja.Object {\n\tjeth := vm.Get(\"jeth\")\n\tif jeth == nil {\n\t\tpanic(vm.ToValue(\"jeth object does not exist\"))\n\t}\n\treturn jeth.ToObject(vm)\n}\n\n// NewAccount is a wrapper around the personal.newAccount RPC method that uses a\n// non-echoing password prompt to acquire the passphrase and executes the original\n// RPC method (saved in jeth.newAccount) with it to actually execute the RPC call.\nfunc (b *bridge) NewAccount(call jsre.Call) (goja.Value, error) {\n\tvar (\n\t\tpassword string\n\t\tconfirm  string\n\t\terr      error\n\t)\n\tswitch {\n\t// No password was specified, prompt the user for it\n\tcase len(call.Arguments) == 0:\n\t\tif password, err = b.prompter.PromptPassword(\"Passphrase: \"); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif confirm, err = b.prompter.PromptPassword(\"Repeat passphrase: \"); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif password != confirm {\n\t\t\treturn nil, fmt.Errorf(\"passwords don't match!\")\n\t\t}\n\t// A single string password was specified, use that\n\tcase len(call.Arguments) == 1 && call.Argument(0).ToString() != nil:\n\t\tpassword = call.Argument(0).ToString().String()\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"expected 0 or 1 string argument\")\n\t}\n\t// Password acquired, execute the call and return\n\tnewAccount, callable := goja.AssertFunction(getJeth(call.VM).Get(\"newAccount\"))\n\tif !callable {\n\t\treturn nil, fmt.Errorf(\"jeth.newAccount is not callable\")\n\t}\n\tret, err := newAccount(goja.Null(), call.VM.ToValue(password))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}\n\n// OpenWallet is a wrapper around personal.openWallet which can interpret and\n// react to certain error messages, such as the Trezor PIN matrix request.\nfunc (b *bridge) OpenWallet(call jsre.Call) (goja.Value, error) {\n\t// Make sure we have a wallet specified to open\n\tif call.Argument(0).ToObject(call.VM).ClassName() != \"String\" {\n\t\treturn nil, fmt.Errorf(\"first argument must be the wallet URL to open\")\n\t}\n\twallet := call.Argument(0)\n\n\tvar passwd goja.Value\n\tif goja.IsUndefined(call.Argument(1)) || goja.IsNull(call.Argument(1)) {\n\t\tpasswd = call.VM.ToValue(\"\")\n\t} else {\n\t\tpasswd = call.Argument(1)\n\t}\n\t// Open the wallet and return if successful in itself\n\topenWallet, callable := goja.AssertFunction(getJeth(call.VM).Get(\"openWallet\"))\n\tif !callable {\n\t\treturn nil, fmt.Errorf(\"jeth.openWallet is not callable\")\n\t}\n\tval, err := openWallet(goja.Null(), wallet, passwd)\n\tif err == nil {\n\t\treturn val, nil\n\t}\n\n\t// Wallet open failed, report error unless it's a PIN or PUK entry\n\tswitch {\n\tcase strings.HasSuffix(err.Error(), usbwallet.ErrTrezorPINNeeded.Error()):\n\t\tval, err = b.readPinAndReopenWallet(call)\n\t\tif err == nil {\n\t\t\treturn val, nil\n\t\t}\n\t\tval, err = b.readPassphraseAndReopenWallet(call)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tcase strings.HasSuffix(err.Error(), scwallet.ErrPairingPasswordNeeded.Error()):\n\t\t// PUK input requested, fetch from the user and call open again\n\t\tinput, err := b.prompter.PromptPassword(\"Please enter the pairing password: \")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpasswd = call.VM.ToValue(input)\n\t\tif val, err = openWallet(goja.Null(), wallet, passwd); err != nil {\n\t\t\tif !strings.HasSuffix(err.Error(), scwallet.ErrPINNeeded.Error()) {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t// PIN input requested, fetch from the user and call open again\n\t\t\tinput, err := b.prompter.PromptPassword(\"Please enter current PIN: \")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(input)); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\tcase strings.HasSuffix(err.Error(), scwallet.ErrPINUnblockNeeded.Error()):\n\t\t// PIN unblock requested, fetch PUK and new PIN from the user\n\t\tvar pukpin string\n\t\tinput, err := b.prompter.PromptPassword(\"Please enter current PUK: \")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpukpin = input\n\t\tinput, err = b.prompter.PromptPassword(\"Please enter new PIN: \")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpukpin += input\n\n\t\tif val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(pukpin)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tcase strings.HasSuffix(err.Error(), scwallet.ErrPINNeeded.Error()):\n\t\t// PIN input requested, fetch from the user and call open again\n\t\tinput, err := b.prompter.PromptPassword(\"Please enter current PIN: \")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(input)); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\tdefault:\n\t\t// Unknown error occurred, drop to the user\n\t\treturn nil, err\n\t}\n\treturn val, nil\n}\n\nfunc (b *bridge) readPassphraseAndReopenWallet(call jsre.Call) (goja.Value, error) {\n\twallet := call.Argument(0)\n\tinput, err := b.prompter.PromptPassword(\"Please enter your passphrase: \")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\topenWallet, callable := goja.AssertFunction(getJeth(call.VM).Get(\"openWallet\"))\n\tif !callable {\n\t\treturn nil, fmt.Errorf(\"jeth.openWallet is not callable\")\n\t}\n\treturn openWallet(goja.Null(), wallet, call.VM.ToValue(input))\n}\n\nfunc (b *bridge) readPinAndReopenWallet(call jsre.Call) (goja.Value, error) {\n\twallet := call.Argument(0)\n\t// Trezor PIN matrix input requested, display the matrix to the user and fetch the data\n\tfmt.Fprintf(b.printer, \"Look at the device for number positions\\n\\n\")\n\tfmt.Fprintf(b.printer, \"7 | 8 | 9\\n\")\n\tfmt.Fprintf(b.printer, \"--+---+--\\n\")\n\tfmt.Fprintf(b.printer, \"4 | 5 | 6\\n\")\n\tfmt.Fprintf(b.printer, \"--+---+--\\n\")\n\tfmt.Fprintf(b.printer, \"1 | 2 | 3\\n\\n\")\n\n\tinput, err := b.prompter.PromptPassword(\"Please enter current PIN: \")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\topenWallet, callable := goja.AssertFunction(getJeth(call.VM).Get(\"openWallet\"))\n\tif !callable {\n\t\treturn nil, fmt.Errorf(\"jeth.openWallet is not callable\")\n\t}\n\treturn openWallet(goja.Null(), wallet, call.VM.ToValue(input))\n}\n\n// UnlockAccount is a wrapper around the personal.unlockAccount RPC method that\n// uses a non-echoing password prompt to acquire the passphrase and executes the\n// original RPC method (saved in jeth.unlockAccount) with it to actually execute\n// the RPC call.\nfunc (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) {\n\tif len(call.Arguments) < 1 {\n\t\treturn nil, fmt.Errorf(\"usage: unlockAccount(account, [ password, duration ])\")\n\t}\n\n\taccount := call.Argument(0)\n\t// Make sure we have an account specified to unlock.\n\tif goja.IsUndefined(account) || goja.IsNull(account) || account.ExportType().Kind() != reflect.String {\n\t\treturn nil, fmt.Errorf(\"first argument must be the account to unlock\")\n\t}\n\n\t// If password is not given or is the null value, prompt the user for it.\n\tvar passwd goja.Value\n\tif goja.IsUndefined(call.Argument(1)) || goja.IsNull(call.Argument(1)) {\n\t\tfmt.Fprintf(b.printer, \"Unlock account %s\\n\", account)\n\t\tinput, err := b.prompter.PromptPassword(\"Passphrase: \")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpasswd = call.VM.ToValue(input)\n\t} else {\n\t\tif call.Argument(1).ExportType().Kind() != reflect.String {\n\t\t\treturn nil, fmt.Errorf(\"password must be a string\")\n\t\t}\n\t\tpasswd = call.Argument(1)\n\t}\n\n\t// Third argument is the duration how long the account should be unlocked.\n\tduration := goja.Null()\n\tif !goja.IsUndefined(call.Argument(2)) && !goja.IsNull(call.Argument(2)) {\n\t\tif !isNumber(call.Argument(2)) {\n\t\t\treturn nil, fmt.Errorf(\"unlock duration must be a number\")\n\t\t}\n\t\tduration = call.Argument(2)\n\t}\n\n\t// Send the request to the backend and return.\n\tunlockAccount, callable := goja.AssertFunction(getJeth(call.VM).Get(\"unlockAccount\"))\n\tif !callable {\n\t\treturn nil, fmt.Errorf(\"jeth.unlockAccount is not callable\")\n\t}\n\treturn unlockAccount(goja.Null(), account, passwd, duration)\n}\n\n// Sign is a wrapper around the personal.sign RPC method that uses a non-echoing password\n// prompt to acquire the passphrase and executes the original RPC method (saved in\n// jeth.sign) with it to actually execute the RPC call.\nfunc (b *bridge) Sign(call jsre.Call) (goja.Value, error) {\n\tif nArgs := len(call.Arguments); nArgs < 2 {\n\t\treturn nil, fmt.Errorf(\"usage: sign(message, account, [ password ])\")\n\t}\n\tvar (\n\t\tmessage = call.Argument(0)\n\t\taccount = call.Argument(1)\n\t\tpasswd  = call.Argument(2)\n\t)\n\n\tif goja.IsUndefined(message) || message.ExportType().Kind() != reflect.String {\n\t\treturn nil, fmt.Errorf(\"first argument must be the message to sign\")\n\t}\n\tif goja.IsUndefined(account) || account.ExportType().Kind() != reflect.String {\n\t\treturn nil, fmt.Errorf(\"second argument must be the account to sign with\")\n\t}\n\n\t// if the password is not given or null ask the user and ensure password is a string\n\tif goja.IsUndefined(passwd) || goja.IsNull(passwd) {\n\t\tfmt.Fprintf(b.printer, \"Give password for account %s\\n\", account)\n\t\tinput, err := b.prompter.PromptPassword(\"Password: \")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpasswd = call.VM.ToValue(input)\n\t} else if passwd.ExportType().Kind() != reflect.String {\n\t\treturn nil, fmt.Errorf(\"third argument must be the password to unlock the account\")\n\t}\n\n\t// Send the request to the backend and return\n\tsign, callable := goja.AssertFunction(getJeth(call.VM).Get(\"sign\"))\n\tif !callable {\n\t\treturn nil, fmt.Errorf(\"jeth.sign is not callable\")\n\t}\n\treturn sign(goja.Null(), message, account, passwd)\n}\n\n// Sleep will block the console for the specified number of seconds.\nfunc (b *bridge) Sleep(call jsre.Call) (goja.Value, error) {\n\tif nArgs := len(call.Arguments); nArgs < 1 {\n\t\treturn nil, fmt.Errorf(\"usage: sleep(<number of seconds>)\")\n\t}\n\tsleepObj := call.Argument(0)\n\tif goja.IsUndefined(sleepObj) || goja.IsNull(sleepObj) || !isNumber(sleepObj) {\n\t\treturn nil, fmt.Errorf(\"usage: sleep(<number of seconds>)\")\n\t}\n\tsleep := sleepObj.ToFloat()\n\ttime.Sleep(time.Duration(sleep * float64(time.Second)))\n\treturn call.VM.ToValue(true), nil\n}\n\n// SleepBlocks will block the console for a specified number of new blocks optionally\n// until the given timeout is reached.\nfunc (b *bridge) SleepBlocks(call jsre.Call) (goja.Value, error) {\n\t// Parse the input parameters for the sleep.\n\tvar (\n\t\tblocks = int64(0)\n\t\tsleep  = int64(9999999999999999) // indefinitely\n\t)\n\tnArgs := len(call.Arguments)\n\tif nArgs == 0 {\n\t\treturn nil, fmt.Errorf(\"usage: sleepBlocks(<n blocks>[, max sleep in seconds])\")\n\t}\n\tif nArgs >= 1 {\n\t\tif goja.IsNull(call.Argument(0)) || goja.IsUndefined(call.Argument(0)) || !isNumber(call.Argument(0)) {\n\t\t\treturn nil, fmt.Errorf(\"expected number as first argument\")\n\t\t}\n\t\tblocks = call.Argument(0).ToInteger()\n\t}\n\tif nArgs >= 2 {\n\t\tif goja.IsNull(call.Argument(1)) || goja.IsUndefined(call.Argument(1)) || !isNumber(call.Argument(1)) {\n\t\t\treturn nil, fmt.Errorf(\"expected number as second argument\")\n\t\t}\n\t\tsleep = call.Argument(1).ToInteger()\n\t}\n\n\t// Poll the current block number until either it or a timeout is reached.\n\tdeadline := time.Now().Add(time.Duration(sleep) * time.Second)\n\tvar lastNumber hexutil.Uint64\n\tif err := b.client.Call(&lastNumber, \"eth_blockNumber\"); err != nil {\n\t\treturn nil, err\n\t}\n\tfor time.Now().Before(deadline) {\n\t\tvar number hexutil.Uint64\n\t\tif err := b.client.Call(&number, \"eth_blockNumber\"); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif number != lastNumber {\n\t\t\tlastNumber = number\n\t\t\tblocks--\n\t\t}\n\t\tif blocks <= 0 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n\treturn call.VM.ToValue(true), nil\n}\n\ntype jsonrpcCall struct {\n\tID     int64\n\tMethod string\n\tParams []interface{}\n}\n\n// Send implements the web3 provider \"send\" method.\nfunc (b *bridge) Send(call jsre.Call) (goja.Value, error) {\n\t// Remarshal the request into a Go value.\n\treqVal, err := call.Argument(0).ToObject(call.VM).MarshalJSON()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\trawReq = string(reqVal)\n\t\tdec    = json.NewDecoder(strings.NewReader(rawReq))\n\t\treqs   []jsonrpcCall\n\t\tbatch  bool\n\t)\n\tdec.UseNumber() // avoid float64s\n\tif rawReq[0] == '[' {\n\t\tbatch = true\n\t\tdec.Decode(&reqs)\n\t} else {\n\t\tbatch = false\n\t\treqs = make([]jsonrpcCall, 1)\n\t\tdec.Decode(&reqs[0])\n\t}\n\n\t// Execute the requests.\n\tvar resps []*goja.Object\n\tfor _, req := range reqs {\n\t\tresp := call.VM.NewObject()\n\t\tresp.Set(\"jsonrpc\", \"2.0\")\n\t\tresp.Set(\"id\", req.ID)\n\n\t\tvar result json.RawMessage\n\t\tif err = b.client.Call(&result, req.Method, req.Params...); err == nil {\n\t\t\tif result == nil {\n\t\t\t\t// Special case null because it is decoded as an empty\n\t\t\t\t// raw message for some reason.\n\t\t\t\tresp.Set(\"result\", goja.Null())\n\t\t\t} else {\n\t\t\t\tJSON := call.VM.Get(\"JSON\").ToObject(call.VM)\n\t\t\t\tparse, callable := goja.AssertFunction(JSON.Get(\"parse\"))\n\t\t\t\tif !callable {\n\t\t\t\t\treturn nil, fmt.Errorf(\"JSON.parse is not a function\")\n\t\t\t\t}\n\t\t\t\tresultVal, err := parse(goja.Null(), call.VM.ToValue(string(result)))\n\t\t\t\tif err != nil {\n\t\t\t\t\tsetError(resp, -32603, err.Error(), nil)\n\t\t\t\t} else {\n\t\t\t\t\tresp.Set(\"result\", resultVal)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tcode := -32603\n\t\t\tvar data interface{}\n\t\t\tif err, ok := err.(rpc.Error); ok {\n\t\t\t\tcode = err.ErrorCode()\n\t\t\t}\n\t\t\tif err, ok := err.(rpc.DataError); ok {\n\t\t\t\tdata = err.ErrorData()\n\t\t\t}\n\t\t\tsetError(resp, code, err.Error(), data)\n\t\t}\n\t\tresps = append(resps, resp)\n\t}\n\t// Return the responses either to the callback (if supplied)\n\t// or directly as the return value.\n\tvar result goja.Value\n\tif batch {\n\t\tresult = call.VM.ToValue(resps)\n\t} else {\n\t\tresult = resps[0]\n\t}\n\tif fn, isFunc := goja.AssertFunction(call.Argument(1)); isFunc {\n\t\tfn(goja.Null(), goja.Null(), result)\n\t\treturn goja.Undefined(), nil\n\t}\n\treturn result, nil\n}\n\nfunc setError(resp *goja.Object, code int, msg string, data interface{}) {\n\terr := make(map[string]interface{})\n\terr[\"code\"] = code\n\terr[\"message\"] = msg\n\tif data != nil {\n\t\terr[\"data\"] = data\n\t}\n\tresp.Set(\"error\", err)\n}\n\n// isNumber returns true if input value is a JS number.\nfunc isNumber(v goja.Value) bool {\n\tk := v.ExportType().Kind()\n\treturn k >= reflect.Int && k <= reflect.Float64\n}\n\nfunc getObject(vm *goja.Runtime, name string) *goja.Object {\n\tv := vm.Get(name)\n\tif v == nil {\n\t\treturn nil\n\t}\n\treturn v.ToObject(vm)\n}\n"
  },
  {
    "path": "console/bridge_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage console\n\nimport (\n\t\"testing\"\n\n\t\"github.com/dop251/goja\"\n\t\"github.com/ethereum/go-ethereum/internal/jsre\"\n)\n\n// TestUndefinedAsParam ensures that personal functions can receive\n// `undefined` as a parameter.\nfunc TestUndefinedAsParam(t *testing.T) {\n\tb := bridge{}\n\tcall := jsre.Call{}\n\tcall.Arguments = []goja.Value{goja.Undefined()}\n\n\tb.UnlockAccount(call)\n\tb.Sign(call)\n\tb.Sleep(call)\n}\n\n// TestNullAsParam ensures that personal functions can receive\n// `null` as a parameter.\nfunc TestNullAsParam(t *testing.T) {\n\tb := bridge{}\n\tcall := jsre.Call{}\n\tcall.Arguments = []goja.Value{goja.Null()}\n\n\tb.UnlockAccount(call)\n\tb.Sign(call)\n\tb.Sleep(call)\n}\n"
  },
  {
    "path": "console/console.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage console\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"os/signal\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com/dop251/goja\"\n\t\"github.com/ethereum/go-ethereum/console/prompt\"\n\t\"github.com/ethereum/go-ethereum/internal/jsre\"\n\t\"github.com/ethereum/go-ethereum/internal/jsre/deps\"\n\t\"github.com/ethereum/go-ethereum/internal/web3ext\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n\t\"github.com/mattn/go-colorable\"\n\t\"github.com/peterh/liner\"\n)\n\nvar (\n\t// u: unlock, s: signXX, sendXX, n: newAccount, i: importXX\n\tpasswordRegexp = regexp.MustCompile(`personal.[nusi]`)\n\tonlyWhitespace = regexp.MustCompile(`^\\s*$`)\n\texit           = regexp.MustCompile(`^\\s*exit\\s*;*\\s*$`)\n)\n\n// HistoryFile is the file within the data directory to store input scrollback.\nconst HistoryFile = \"history\"\n\n// DefaultPrompt is the default prompt line prefix to use for user input querying.\nconst DefaultPrompt = \"> \"\n\n// Config is the collection of configurations to fine tune the behavior of the\n// JavaScript console.\ntype Config struct {\n\tDataDir  string              // Data directory to store the console history at\n\tDocRoot  string              // Filesystem path from where to load JavaScript files from\n\tClient   *rpc.Client         // RPC client to execute Ethereum requests through\n\tPrompt   string              // Input prompt prefix string (defaults to DefaultPrompt)\n\tPrompter prompt.UserPrompter // Input prompter to allow interactive user feedback (defaults to TerminalPrompter)\n\tPrinter  io.Writer           // Output writer to serialize any display strings to (defaults to os.Stdout)\n\tPreload  []string            // Absolute paths to JavaScript files to preload\n}\n\n// Console is a JavaScript interpreted runtime environment. It is a fully fledged\n// JavaScript console attached to a running node via an external or in-process RPC\n// client.\ntype Console struct {\n\tclient   *rpc.Client         // RPC client to execute Ethereum requests through\n\tjsre     *jsre.JSRE          // JavaScript runtime environment running the interpreter\n\tprompt   string              // Input prompt prefix string\n\tprompter prompt.UserPrompter // Input prompter to allow interactive user feedback\n\thistPath string              // Absolute path to the console scrollback history\n\thistory  []string            // Scroll history maintained by the console\n\tprinter  io.Writer           // Output writer to serialize any display strings to\n}\n\n// New initializes a JavaScript interpreted runtime environment and sets defaults\n// with the config struct.\nfunc New(config Config) (*Console, error) {\n\t// Handle unset config values gracefully\n\tif config.Prompter == nil {\n\t\tconfig.Prompter = prompt.Stdin\n\t}\n\tif config.Prompt == \"\" {\n\t\tconfig.Prompt = DefaultPrompt\n\t}\n\tif config.Printer == nil {\n\t\tconfig.Printer = colorable.NewColorableStdout()\n\t}\n\n\t// Initialize the console and return\n\tconsole := &Console{\n\t\tclient:   config.Client,\n\t\tjsre:     jsre.New(config.DocRoot, config.Printer),\n\t\tprompt:   config.Prompt,\n\t\tprompter: config.Prompter,\n\t\tprinter:  config.Printer,\n\t\thistPath: filepath.Join(config.DataDir, HistoryFile),\n\t}\n\tif err := os.MkdirAll(config.DataDir, 0700); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := console.init(config.Preload); err != nil {\n\t\treturn nil, err\n\t}\n\treturn console, nil\n}\n\n// init retrieves the available APIs from the remote RPC provider and initializes\n// the console's JavaScript namespaces based on the exposed modules.\nfunc (c *Console) init(preload []string) error {\n\tc.initConsoleObject()\n\n\t// Initialize the JavaScript <-> Go RPC bridge.\n\tbridge := newBridge(c.client, c.prompter, c.printer)\n\tif err := c.initWeb3(bridge); err != nil {\n\t\treturn err\n\t}\n\tif err := c.initExtensions(); err != nil {\n\t\treturn err\n\t}\n\n\t// Add bridge overrides for web3.js functionality.\n\tc.jsre.Do(func(vm *goja.Runtime) {\n\t\tc.initAdmin(vm, bridge)\n\t\tc.initPersonal(vm, bridge)\n\t})\n\n\t// Preload JavaScript files.\n\tfor _, path := range preload {\n\t\tif err := c.jsre.Exec(path); err != nil {\n\t\t\tfailure := err.Error()\n\t\t\tif gojaErr, ok := err.(*goja.Exception); ok {\n\t\t\t\tfailure = gojaErr.String()\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"%s: %v\", path, failure)\n\t\t}\n\t}\n\n\t// Configure the input prompter for history and tab completion.\n\tif c.prompter != nil {\n\t\tif content, err := ioutil.ReadFile(c.histPath); err != nil {\n\t\t\tc.prompter.SetHistory(nil)\n\t\t} else {\n\t\t\tc.history = strings.Split(string(content), \"\\n\")\n\t\t\tc.prompter.SetHistory(c.history)\n\t\t}\n\t\tc.prompter.SetWordCompleter(c.AutoCompleteInput)\n\t}\n\treturn nil\n}\n\nfunc (c *Console) initConsoleObject() {\n\tc.jsre.Do(func(vm *goja.Runtime) {\n\t\tconsole := vm.NewObject()\n\t\tconsole.Set(\"log\", c.consoleOutput)\n\t\tconsole.Set(\"error\", c.consoleOutput)\n\t\tvm.Set(\"console\", console)\n\t})\n}\n\nfunc (c *Console) initWeb3(bridge *bridge) error {\n\tbnJS := string(deps.MustAsset(\"bignumber.js\"))\n\tweb3JS := string(deps.MustAsset(\"web3.js\"))\n\tif err := c.jsre.Compile(\"bignumber.js\", bnJS); err != nil {\n\t\treturn fmt.Errorf(\"bignumber.js: %v\", err)\n\t}\n\tif err := c.jsre.Compile(\"web3.js\", web3JS); err != nil {\n\t\treturn fmt.Errorf(\"web3.js: %v\", err)\n\t}\n\tif _, err := c.jsre.Run(\"var Web3 = require('web3');\"); err != nil {\n\t\treturn fmt.Errorf(\"web3 require: %v\", err)\n\t}\n\tvar err error\n\tc.jsre.Do(func(vm *goja.Runtime) {\n\t\ttransport := vm.NewObject()\n\t\ttransport.Set(\"send\", jsre.MakeCallback(vm, bridge.Send))\n\t\ttransport.Set(\"sendAsync\", jsre.MakeCallback(vm, bridge.Send))\n\t\tvm.Set(\"_consoleWeb3Transport\", transport)\n\t\t_, err = vm.RunString(\"var web3 = new Web3(_consoleWeb3Transport)\")\n\t})\n\treturn err\n}\n\n// initExtensions loads and registers web3.js extensions.\nfunc (c *Console) initExtensions() error {\n\t// Compute aliases from server-provided modules.\n\tapis, err := c.client.SupportedModules()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"api modules: %v\", err)\n\t}\n\taliases := map[string]struct{}{\"eth\": {}, \"personal\": {}}\n\tfor api := range apis {\n\t\tif api == \"web3\" {\n\t\t\tcontinue\n\t\t}\n\t\taliases[api] = struct{}{}\n\t\tif file, ok := web3ext.Modules[api]; ok {\n\t\t\tif err = c.jsre.Compile(api+\".js\", file); err != nil {\n\t\t\t\treturn fmt.Errorf(\"%s.js: %v\", api, err)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Apply aliases.\n\tc.jsre.Do(func(vm *goja.Runtime) {\n\t\tweb3 := getObject(vm, \"web3\")\n\t\tfor name := range aliases {\n\t\t\tif v := web3.Get(name); v != nil {\n\t\t\t\tvm.Set(name, v)\n\t\t\t}\n\t\t}\n\t})\n\treturn nil\n}\n\n// initAdmin creates additional admin APIs implemented by the bridge.\nfunc (c *Console) initAdmin(vm *goja.Runtime, bridge *bridge) {\n\tif admin := getObject(vm, \"admin\"); admin != nil {\n\t\tadmin.Set(\"sleepBlocks\", jsre.MakeCallback(vm, bridge.SleepBlocks))\n\t\tadmin.Set(\"sleep\", jsre.MakeCallback(vm, bridge.Sleep))\n\t\tadmin.Set(\"clearHistory\", c.clearHistory)\n\t}\n}\n\n// initPersonal redirects account-related API methods through the bridge.\n//\n// If the console is in interactive mode and the 'personal' API is available, override\n// the openWallet, unlockAccount, newAccount and sign methods since these require user\n// interaction. The original web3 callbacks are stored in 'jeth'. These will be called\n// by the bridge after the prompt and send the original web3 request to the backend.\nfunc (c *Console) initPersonal(vm *goja.Runtime, bridge *bridge) {\n\tpersonal := getObject(vm, \"personal\")\n\tif personal == nil || c.prompter == nil {\n\t\treturn\n\t}\n\tjeth := vm.NewObject()\n\tvm.Set(\"jeth\", jeth)\n\tjeth.Set(\"openWallet\", personal.Get(\"openWallet\"))\n\tjeth.Set(\"unlockAccount\", personal.Get(\"unlockAccount\"))\n\tjeth.Set(\"newAccount\", personal.Get(\"newAccount\"))\n\tjeth.Set(\"sign\", personal.Get(\"sign\"))\n\tpersonal.Set(\"openWallet\", jsre.MakeCallback(vm, bridge.OpenWallet))\n\tpersonal.Set(\"unlockAccount\", jsre.MakeCallback(vm, bridge.UnlockAccount))\n\tpersonal.Set(\"newAccount\", jsre.MakeCallback(vm, bridge.NewAccount))\n\tpersonal.Set(\"sign\", jsre.MakeCallback(vm, bridge.Sign))\n}\n\nfunc (c *Console) clearHistory() {\n\tc.history = nil\n\tc.prompter.ClearHistory()\n\tif err := os.Remove(c.histPath); err != nil {\n\t\tfmt.Fprintln(c.printer, \"can't delete history file:\", err)\n\t} else {\n\t\tfmt.Fprintln(c.printer, \"history file deleted\")\n\t}\n}\n\n// consoleOutput is an override for the console.log and console.error methods to\n// stream the output into the configured output stream instead of stdout.\nfunc (c *Console) consoleOutput(call goja.FunctionCall) goja.Value {\n\tvar output []string\n\tfor _, argument := range call.Arguments {\n\t\toutput = append(output, fmt.Sprintf(\"%v\", argument))\n\t}\n\tfmt.Fprintln(c.printer, strings.Join(output, \" \"))\n\treturn goja.Null()\n}\n\n// AutoCompleteInput is a pre-assembled word completer to be used by the user\n// input prompter to provide hints to the user about the methods available.\nfunc (c *Console) AutoCompleteInput(line string, pos int) (string, []string, string) {\n\t// No completions can be provided for empty inputs\n\tif len(line) == 0 || pos == 0 {\n\t\treturn \"\", nil, \"\"\n\t}\n\t// Chunck data to relevant part for autocompletion\n\t// E.g. in case of nested lines eth.getBalance(eth.coinb<tab><tab>\n\tstart := pos - 1\n\tfor ; start > 0; start-- {\n\t\t// Skip all methods and namespaces (i.e. including the dot)\n\t\tif line[start] == '.' || (line[start] >= 'a' && line[start] <= 'z') || (line[start] >= 'A' && line[start] <= 'Z') {\n\t\t\tcontinue\n\t\t}\n\t\t// Handle web3 in a special way (i.e. other numbers aren't auto completed)\n\t\tif start >= 3 && line[start-3:start] == \"web3\" {\n\t\t\tstart -= 3\n\t\t\tcontinue\n\t\t}\n\t\t// We've hit an unexpected character, autocomplete form here\n\t\tstart++\n\t\tbreak\n\t}\n\treturn line[:start], c.jsre.CompleteKeywords(line[start:pos]), line[pos:]\n}\n\n// Welcome show summary of current Geth instance and some metadata about the\n// console's available modules.\nfunc (c *Console) Welcome() {\n\tmessage := \"Welcome to the Geth JavaScript console!\\n\\n\"\n\n\t// Print some generic Geth metadata\n\tif res, err := c.jsre.Run(`\n\t\tvar message = \"instance: \" + web3.version.node + \"\\n\";\n\t\ttry {\n\t\t\tmessage += \"coinbase: \" + eth.coinbase + \"\\n\";\n\t\t} catch (err) {}\n\t\tmessage += \"at block: \" + eth.blockNumber + \" (\" + new Date(1000 * eth.getBlock(eth.blockNumber).timestamp) + \")\\n\";\n\t\ttry {\n\t\t\tmessage += \" datadir: \" + admin.datadir + \"\\n\";\n\t\t} catch (err) {}\n\t\tmessage\n\t`); err == nil {\n\t\tmessage += res.String()\n\t}\n\t// List all the supported modules for the user to call\n\tif apis, err := c.client.SupportedModules(); err == nil {\n\t\tmodules := make([]string, 0, len(apis))\n\t\tfor api, version := range apis {\n\t\t\tmodules = append(modules, fmt.Sprintf(\"%s:%s\", api, version))\n\t\t}\n\t\tsort.Strings(modules)\n\t\tmessage += \" modules: \" + strings.Join(modules, \" \") + \"\\n\"\n\t}\n\tmessage += \"\\nTo exit, press ctrl-d\"\n\tfmt.Fprintln(c.printer, message)\n}\n\n// Evaluate executes code and pretty prints the result to the specified output\n// stream.\nfunc (c *Console) Evaluate(statement string) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Fprintf(c.printer, \"[native] error: %v\\n\", r)\n\t\t}\n\t}()\n\tc.jsre.Evaluate(statement, c.printer)\n}\n\n// Interactive starts an interactive user session, where input is propted from\n// the configured user prompter.\nfunc (c *Console) Interactive() {\n\tvar (\n\t\tprompt      = c.prompt             // the current prompt line (used for multi-line inputs)\n\t\tindents     = 0                    // the current number of input indents (used for multi-line inputs)\n\t\tinput       = \"\"                   // the current user input\n\t\tinputLine   = make(chan string, 1) // receives user input\n\t\tinputErr    = make(chan error, 1)  // receives liner errors\n\t\trequestLine = make(chan string)    // requests a line of input\n\t\tinterrupt   = make(chan os.Signal, 1)\n\t)\n\n\t// Monitor Ctrl-C. While liner does turn on the relevant terminal mode bits to avoid\n\t// the signal, a signal can still be received for unsupported terminals. Unfortunately\n\t// there is no way to cancel the line reader when this happens. The readLines\n\t// goroutine will be leaked in this case.\n\tsignal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)\n\tdefer signal.Stop(interrupt)\n\n\t// The line reader runs in a separate goroutine.\n\tgo c.readLines(inputLine, inputErr, requestLine)\n\tdefer close(requestLine)\n\n\tfor {\n\t\t// Send the next prompt, triggering an input read.\n\t\trequestLine <- prompt\n\n\t\tselect {\n\t\tcase <-interrupt:\n\t\t\tfmt.Fprintln(c.printer, \"caught interrupt, exiting\")\n\t\t\treturn\n\n\t\tcase err := <-inputErr:\n\t\t\tif err == liner.ErrPromptAborted {\n\t\t\t\t// When prompting for multi-line input, the first Ctrl-C resets\n\t\t\t\t// the multi-line state.\n\t\t\t\tprompt, indents, input = c.prompt, 0, \"\"\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn\n\n\t\tcase line := <-inputLine:\n\t\t\t// User input was returned by the prompter, handle special cases.\n\t\t\tif indents <= 0 && exit.MatchString(line) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif onlyWhitespace.MatchString(line) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Append the line to the input and check for multi-line interpretation.\n\t\t\tinput += line + \"\\n\"\n\t\t\tindents = countIndents(input)\n\t\t\tif indents <= 0 {\n\t\t\t\tprompt = c.prompt\n\t\t\t} else {\n\t\t\t\tprompt = strings.Repeat(\".\", indents*3) + \" \"\n\t\t\t}\n\t\t\t// If all the needed lines are present, save the command and run it.\n\t\t\tif indents <= 0 {\n\t\t\t\tif len(input) > 0 && input[0] != ' ' && !passwordRegexp.MatchString(input) {\n\t\t\t\t\tif command := strings.TrimSpace(input); len(c.history) == 0 || command != c.history[len(c.history)-1] {\n\t\t\t\t\t\tc.history = append(c.history, command)\n\t\t\t\t\t\tif c.prompter != nil {\n\t\t\t\t\t\t\tc.prompter.AppendHistory(command)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tc.Evaluate(input)\n\t\t\t\tinput = \"\"\n\t\t\t}\n\t\t}\n\t}\n}\n\n// readLines runs in its own goroutine, prompting for input.\nfunc (c *Console) readLines(input chan<- string, errc chan<- error, prompt <-chan string) {\n\tfor p := range prompt {\n\t\tline, err := c.prompter.PromptInput(p)\n\t\tif err != nil {\n\t\t\terrc <- err\n\t\t} else {\n\t\t\tinput <- line\n\t\t}\n\t}\n}\n\n// countIndents returns the number of identations for the given input.\n// In case of invalid input such as var a = } the result can be negative.\nfunc countIndents(input string) int {\n\tvar (\n\t\tindents     = 0\n\t\tinString    = false\n\t\tstrOpenChar = ' '   // keep track of the string open char to allow var str = \"I'm ....\";\n\t\tcharEscaped = false // keep track if the previous char was the '\\' char, allow var str = \"abc\\\"def\";\n\t)\n\n\tfor _, c := range input {\n\t\tswitch c {\n\t\tcase '\\\\':\n\t\t\t// indicate next char as escaped when in string and previous char isn't escaping this backslash\n\t\t\tif !charEscaped && inString {\n\t\t\t\tcharEscaped = true\n\t\t\t}\n\t\tcase '\\'', '\"':\n\t\t\tif inString && !charEscaped && strOpenChar == c { // end string\n\t\t\t\tinString = false\n\t\t\t} else if !inString && !charEscaped { // begin string\n\t\t\t\tinString = true\n\t\t\t\tstrOpenChar = c\n\t\t\t}\n\t\t\tcharEscaped = false\n\t\tcase '{', '(':\n\t\t\tif !inString { // ignore brackets when in string, allow var str = \"a{\"; without indenting\n\t\t\t\tindents++\n\t\t\t}\n\t\t\tcharEscaped = false\n\t\tcase '}', ')':\n\t\t\tif !inString {\n\t\t\t\tindents--\n\t\t\t}\n\t\t\tcharEscaped = false\n\t\tdefault:\n\t\t\tcharEscaped = false\n\t\t}\n\t}\n\n\treturn indents\n}\n\n// Execute runs the JavaScript file specified as the argument.\nfunc (c *Console) Execute(path string) error {\n\treturn c.jsre.Exec(path)\n}\n\n// Stop cleans up the console and terminates the runtime environment.\nfunc (c *Console) Stop(graceful bool) error {\n\tif err := ioutil.WriteFile(c.histPath, []byte(strings.Join(c.history, \"\\n\")), 0600); err != nil {\n\t\treturn err\n\t}\n\tif err := os.Chmod(c.histPath, 0600); err != nil { // Force 0600, even if it was different previously\n\t\treturn err\n\t}\n\tc.jsre.Stop(graceful)\n\treturn nil\n}\n"
  },
  {
    "path": "console/console_test.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage console\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/console/prompt\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/eth\"\n\t\"github.com/ethereum/go-ethereum/eth/ethconfig\"\n\t\"github.com/ethereum/go-ethereum/internal/jsre\"\n\t\"github.com/ethereum/go-ethereum/miner\"\n\t\"github.com/ethereum/go-ethereum/node\"\n)\n\nconst (\n\ttestInstance = \"console-tester\"\n\ttestAddress  = \"0x8605cdbbdb6d264aa742e77020dcbc58fcdce182\"\n)\n\n// hookedPrompter implements UserPrompter to simulate use input via channels.\ntype hookedPrompter struct {\n\tscheduler chan string\n}\n\nfunc (p *hookedPrompter) PromptInput(prompt string) (string, error) {\n\t// Send the prompt to the tester\n\tselect {\n\tcase p.scheduler <- prompt:\n\tcase <-time.After(time.Second):\n\t\treturn \"\", errors.New(\"prompt timeout\")\n\t}\n\t// Retrieve the response and feed to the console\n\tselect {\n\tcase input := <-p.scheduler:\n\t\treturn input, nil\n\tcase <-time.After(time.Second):\n\t\treturn \"\", errors.New(\"input timeout\")\n\t}\n}\n\nfunc (p *hookedPrompter) PromptPassword(prompt string) (string, error) {\n\treturn \"\", errors.New(\"not implemented\")\n}\nfunc (p *hookedPrompter) PromptConfirm(prompt string) (bool, error) {\n\treturn false, errors.New(\"not implemented\")\n}\nfunc (p *hookedPrompter) SetHistory(history []string)                     {}\nfunc (p *hookedPrompter) AppendHistory(command string)                    {}\nfunc (p *hookedPrompter) ClearHistory()                                   {}\nfunc (p *hookedPrompter) SetWordCompleter(completer prompt.WordCompleter) {}\n\n// tester is a console test environment for the console tests to operate on.\ntype tester struct {\n\tworkspace string\n\tstack     *node.Node\n\tethereum  *eth.Ethereum\n\tconsole   *Console\n\tinput     *hookedPrompter\n\toutput    *bytes.Buffer\n}\n\n// newTester creates a test environment based on which the console can operate.\n// Please ensure you call Close() on the returned tester to avoid leaks.\nfunc newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester {\n\t// Create a temporary storage for the node keys and initialize it\n\tworkspace, err := ioutil.TempDir(\"\", \"console-tester-\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create temporary keystore: %v\", err)\n\t}\n\n\t// Create a networkless protocol stack and start an Ethereum service within\n\tstack, err := node.New(&node.Config{DataDir: workspace, UseLightweightKDF: true, Name: testInstance})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create node: %v\", err)\n\t}\n\tethConf := &ethconfig.Config{\n\t\tGenesis: core.DeveloperGenesisBlock(15, common.Address{}),\n\t\tMiner: miner.Config{\n\t\t\tEtherbase: common.HexToAddress(testAddress),\n\t\t},\n\t\tEthash: ethash.Config{\n\t\t\tPowMode: ethash.ModeTest,\n\t\t},\n\t}\n\tif confOverride != nil {\n\t\tconfOverride(ethConf)\n\t}\n\tethBackend, err := eth.New(stack, ethConf)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to register Ethereum protocol: %v\", err)\n\t}\n\t// Start the node and assemble the JavaScript console around it\n\tif err = stack.Start(); err != nil {\n\t\tt.Fatalf(\"failed to start test stack: %v\", err)\n\t}\n\tclient, err := stack.Attach()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to attach to node: %v\", err)\n\t}\n\tprompter := &hookedPrompter{scheduler: make(chan string)}\n\tprinter := new(bytes.Buffer)\n\n\tconsole, err := New(Config{\n\t\tDataDir:  stack.DataDir(),\n\t\tDocRoot:  \"testdata\",\n\t\tClient:   client,\n\t\tPrompter: prompter,\n\t\tPrinter:  printer,\n\t\tPreload:  []string{\"preload.js\"},\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create JavaScript console: %v\", err)\n\t}\n\t// Create the final tester and return\n\treturn &tester{\n\t\tworkspace: workspace,\n\t\tstack:     stack,\n\t\tethereum:  ethBackend,\n\t\tconsole:   console,\n\t\tinput:     prompter,\n\t\toutput:    printer,\n\t}\n}\n\n// Close cleans up any temporary data folders and held resources.\nfunc (env *tester) Close(t *testing.T) {\n\tif err := env.console.Stop(false); err != nil {\n\t\tt.Errorf(\"failed to stop embedded console: %v\", err)\n\t}\n\tif err := env.stack.Close(); err != nil {\n\t\tt.Errorf(\"failed to tear down embedded node: %v\", err)\n\t}\n\tos.RemoveAll(env.workspace)\n}\n\n// Tests that the node lists the correct welcome message, notably that it contains\n// the instance name, coinbase account, block number, data directory and supported\n// console modules.\nfunc TestWelcome(t *testing.T) {\n\ttester := newTester(t, nil)\n\tdefer tester.Close(t)\n\n\ttester.console.Welcome()\n\n\toutput := tester.output.String()\n\tif want := \"Welcome\"; !strings.Contains(output, want) {\n\t\tt.Fatalf(\"console output missing welcome message: have\\n%s\\nwant also %s\", output, want)\n\t}\n\tif want := fmt.Sprintf(\"instance: %s\", testInstance); !strings.Contains(output, want) {\n\t\tt.Fatalf(\"console output missing instance: have\\n%s\\nwant also %s\", output, want)\n\t}\n\tif want := fmt.Sprintf(\"coinbase: %s\", testAddress); !strings.Contains(output, want) {\n\t\tt.Fatalf(\"console output missing coinbase: have\\n%s\\nwant also %s\", output, want)\n\t}\n\tif want := \"at block: 0\"; !strings.Contains(output, want) {\n\t\tt.Fatalf(\"console output missing sync status: have\\n%s\\nwant also %s\", output, want)\n\t}\n\tif want := fmt.Sprintf(\"datadir: %s\", tester.workspace); !strings.Contains(output, want) {\n\t\tt.Fatalf(\"console output missing coinbase: have\\n%s\\nwant also %s\", output, want)\n\t}\n}\n\n// Tests that JavaScript statement evaluation works as intended.\nfunc TestEvaluate(t *testing.T) {\n\ttester := newTester(t, nil)\n\tdefer tester.Close(t)\n\n\ttester.console.Evaluate(\"2 + 2\")\n\tif output := tester.output.String(); !strings.Contains(output, \"4\") {\n\t\tt.Fatalf(\"statement evaluation failed: have %s, want %s\", output, \"4\")\n\t}\n}\n\n// Tests that the console can be used in interactive mode.\nfunc TestInteractive(t *testing.T) {\n\t// Create a tester and run an interactive console in the background\n\ttester := newTester(t, nil)\n\tdefer tester.Close(t)\n\n\tgo tester.console.Interactive()\n\n\t// Wait for a prompt and send a statement back\n\tselect {\n\tcase <-tester.input.scheduler:\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"initial prompt timeout\")\n\t}\n\tselect {\n\tcase tester.input.scheduler <- \"2+2\":\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"input feedback timeout\")\n\t}\n\t// Wait for the second prompt and ensure first statement was evaluated\n\tselect {\n\tcase <-tester.input.scheduler:\n\tcase <-time.After(time.Second):\n\t\tt.Fatalf(\"secondary prompt timeout\")\n\t}\n\tif output := tester.output.String(); !strings.Contains(output, \"4\") {\n\t\tt.Fatalf(\"statement evaluation failed: have %s, want %s\", output, \"4\")\n\t}\n}\n\n// Tests that preloaded JavaScript files have been executed before user is given\n// input.\nfunc TestPreload(t *testing.T) {\n\ttester := newTester(t, nil)\n\tdefer tester.Close(t)\n\n\ttester.console.Evaluate(\"preloaded\")\n\tif output := tester.output.String(); !strings.Contains(output, \"some-preloaded-string\") {\n\t\tt.Fatalf(\"preloaded variable missing: have %s, want %s\", output, \"some-preloaded-string\")\n\t}\n}\n\n// Tests that JavaScript scripts can be executes from the configured asset path.\nfunc TestExecute(t *testing.T) {\n\ttester := newTester(t, nil)\n\tdefer tester.Close(t)\n\n\ttester.console.Execute(\"exec.js\")\n\n\ttester.console.Evaluate(\"execed\")\n\tif output := tester.output.String(); !strings.Contains(output, \"some-executed-string\") {\n\t\tt.Fatalf(\"execed variable missing: have %s, want %s\", output, \"some-executed-string\")\n\t}\n}\n\n// Tests that the JavaScript objects returned by statement executions are properly\n// pretty printed instead of just displaying \"[object]\".\nfunc TestPrettyPrint(t *testing.T) {\n\ttester := newTester(t, nil)\n\tdefer tester.Close(t)\n\n\ttester.console.Evaluate(\"obj = {int: 1, string: 'two', list: [3, 3, 3], obj: {null: null, func: function(){}}}\")\n\n\t// Define some specially formatted fields\n\tvar (\n\t\tone   = jsre.NumberColor(\"1\")\n\t\ttwo   = jsre.StringColor(\"\\\"two\\\"\")\n\t\tthree = jsre.NumberColor(\"3\")\n\t\tnull  = jsre.SpecialColor(\"null\")\n\t\tfun   = jsre.FunctionColor(\"function()\")\n\t)\n\t// Assemble the actual output we're after and verify\n\twant := `{\n  int: ` + one + `,\n  list: [` + three + `, ` + three + `, ` + three + `],\n  obj: {\n    null: ` + null + `,\n    func: ` + fun + `\n  },\n  string: ` + two + `\n}\n`\n\tif output := tester.output.String(); output != want {\n\t\tt.Fatalf(\"pretty print mismatch: have %s, want %s\", output, want)\n\t}\n}\n\n// Tests that the JavaScript exceptions are properly formatted and colored.\nfunc TestPrettyError(t *testing.T) {\n\ttester := newTester(t, nil)\n\tdefer tester.Close(t)\n\ttester.console.Evaluate(\"throw 'hello'\")\n\n\twant := jsre.ErrorColor(\"hello\") + \"\\n\\tat <eval>:1:7(1)\\n\\n\"\n\tif output := tester.output.String(); output != want {\n\t\tt.Fatalf(\"pretty error mismatch: have %s, want %s\", output, want)\n\t}\n}\n\n// Tests that tests if the number of indents for JS input is calculated correct.\nfunc TestIndenting(t *testing.T) {\n\ttestCases := []struct {\n\t\tinput               string\n\t\texpectedIndentCount int\n\t}{\n\t\t{`var a = 1;`, 0},\n\t\t{`\"some string\"`, 0},\n\t\t{`\"some string with (parenthesis`, 0},\n\t\t{`\"some string with newline\n\t\t(\"`, 0},\n\t\t{`function v(a,b) {}`, 0},\n\t\t{`function f(a,b) { var str = \"asd(\"; };`, 0},\n\t\t{`function f(a) {`, 1},\n\t\t{`function f(a, function(b) {`, 2},\n\t\t{`function f(a, function(b) {\n\t\t     var str = \"a)}\";\n\t\t  });`, 0},\n\t\t{`function f(a,b) {\n\t\t   var str = \"a{b(\" + a, \", \" + b;\n\t\t   }`, 0},\n\t\t{`var str = \"\\\"{\"`, 0},\n\t\t{`var str = \"'(\"`, 0},\n\t\t{`var str = \"\\\\{\"`, 0},\n\t\t{`var str = \"\\\\\\\\{\"`, 0},\n\t\t{`var str = 'a\"{`, 0},\n\t\t{`var obj = {`, 1},\n\t\t{`var obj = { {a:1`, 2},\n\t\t{`var obj = { {a:1}`, 1},\n\t\t{`var obj = { {a:1}, b:2}`, 0},\n\t\t{`var obj = {}`, 0},\n\t\t{`var obj = {\n\t\t\ta: 1, b: 2\n\t\t}`, 0},\n\t\t{`var test = }`, -1},\n\t\t{`var str = \"a\\\"\"; var obj = {`, 1},\n\t}\n\n\tfor i, tt := range testCases {\n\t\tcounted := countIndents(tt.input)\n\t\tif counted != tt.expectedIndentCount {\n\t\t\tt.Errorf(\"test %d: invalid indenting: have %d, want %d\", i, counted, tt.expectedIndentCount)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "console/prompt/prompter.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage prompt\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/peterh/liner\"\n)\n\n// Stdin holds the stdin line reader (also using stdout for printing prompts).\n// Only this reader may be used for input because it keeps an internal buffer.\nvar Stdin = newTerminalPrompter()\n\n// UserPrompter defines the methods needed by the console to prompt the user for\n// various types of inputs.\ntype UserPrompter interface {\n\t// PromptInput displays the given prompt to the user and requests some textual\n\t// data to be entered, returning the input of the user.\n\tPromptInput(prompt string) (string, error)\n\n\t// PromptPassword displays the given prompt to the user and requests some textual\n\t// data to be entered, but one which must not be echoed out into the terminal.\n\t// The method returns the input provided by the user.\n\tPromptPassword(prompt string) (string, error)\n\n\t// PromptConfirm displays the given prompt to the user and requests a boolean\n\t// choice to be made, returning that choice.\n\tPromptConfirm(prompt string) (bool, error)\n\n\t// SetHistory sets the input scrollback history that the prompter will allow\n\t// the user to scroll back to.\n\tSetHistory(history []string)\n\n\t// AppendHistory appends an entry to the scrollback history. It should be called\n\t// if and only if the prompt to append was a valid command.\n\tAppendHistory(command string)\n\n\t// ClearHistory clears the entire history\n\tClearHistory()\n\n\t// SetWordCompleter sets the completion function that the prompter will call to\n\t// fetch completion candidates when the user presses tab.\n\tSetWordCompleter(completer WordCompleter)\n}\n\n// WordCompleter takes the currently edited line with the cursor position and\n// returns the completion candidates for the partial word to be completed. If\n// the line is \"Hello, wo!!!\" and the cursor is before the first '!', (\"Hello,\n// wo!!!\", 9) is passed to the completer which may returns (\"Hello, \", {\"world\",\n// \"Word\"}, \"!!!\") to have \"Hello, world!!!\".\ntype WordCompleter func(line string, pos int) (string, []string, string)\n\n// terminalPrompter is a UserPrompter backed by the liner package. It supports\n// prompting the user for various input, among others for non-echoing password\n// input.\ntype terminalPrompter struct {\n\t*liner.State\n\twarned     bool\n\tsupported  bool\n\tnormalMode liner.ModeApplier\n\trawMode    liner.ModeApplier\n}\n\n// newTerminalPrompter creates a liner based user input prompter working off the\n// standard input and output streams.\nfunc newTerminalPrompter() *terminalPrompter {\n\tp := new(terminalPrompter)\n\t// Get the original mode before calling NewLiner.\n\t// This is usually regular \"cooked\" mode where characters echo.\n\tnormalMode, _ := liner.TerminalMode()\n\t// Turn on liner. It switches to raw mode.\n\tp.State = liner.NewLiner()\n\trawMode, err := liner.TerminalMode()\n\tif err != nil || !liner.TerminalSupported() {\n\t\tp.supported = false\n\t} else {\n\t\tp.supported = true\n\t\tp.normalMode = normalMode\n\t\tp.rawMode = rawMode\n\t\t// Switch back to normal mode while we're not prompting.\n\t\tnormalMode.ApplyMode()\n\t}\n\tp.SetCtrlCAborts(true)\n\tp.SetTabCompletionStyle(liner.TabPrints)\n\tp.SetMultiLineMode(true)\n\treturn p\n}\n\n// PromptInput displays the given prompt to the user and requests some textual\n// data to be entered, returning the input of the user.\nfunc (p *terminalPrompter) PromptInput(prompt string) (string, error) {\n\tif p.supported {\n\t\tp.rawMode.ApplyMode()\n\t\tdefer p.normalMode.ApplyMode()\n\t} else {\n\t\t// liner tries to be smart about printing the prompt\n\t\t// and doesn't print anything if input is redirected.\n\t\t// Un-smart it by printing the prompt always.\n\t\tfmt.Print(prompt)\n\t\tprompt = \"\"\n\t\tdefer fmt.Println()\n\t}\n\treturn p.State.Prompt(prompt)\n}\n\n// PromptPassword displays the given prompt to the user and requests some textual\n// data to be entered, but one which must not be echoed out into the terminal.\n// The method returns the input provided by the user.\nfunc (p *terminalPrompter) PromptPassword(prompt string) (passwd string, err error) {\n\tif p.supported {\n\t\tp.rawMode.ApplyMode()\n\t\tdefer p.normalMode.ApplyMode()\n\t\treturn p.State.PasswordPrompt(prompt)\n\t}\n\tif !p.warned {\n\t\tfmt.Println(\"!! Unsupported terminal, password will be echoed.\")\n\t\tp.warned = true\n\t}\n\t// Just as in Prompt, handle printing the prompt here instead of relying on liner.\n\tfmt.Print(prompt)\n\tpasswd, err = p.State.Prompt(\"\")\n\tfmt.Println()\n\treturn passwd, err\n}\n\n// PromptConfirm displays the given prompt to the user and requests a boolean\n// choice to be made, returning that choice.\nfunc (p *terminalPrompter) PromptConfirm(prompt string) (bool, error) {\n\tinput, err := p.Prompt(prompt + \" [y/n] \")\n\tif len(input) > 0 && strings.ToUpper(input[:1]) == \"Y\" {\n\t\treturn true, nil\n\t}\n\treturn false, err\n}\n\n// SetHistory sets the input scrollback history that the prompter will allow\n// the user to scroll back to.\nfunc (p *terminalPrompter) SetHistory(history []string) {\n\tp.State.ReadHistory(strings.NewReader(strings.Join(history, \"\\n\")))\n}\n\n// AppendHistory appends an entry to the scrollback history.\nfunc (p *terminalPrompter) AppendHistory(command string) {\n\tp.State.AppendHistory(command)\n}\n\n// ClearHistory clears the entire history\nfunc (p *terminalPrompter) ClearHistory() {\n\tp.State.ClearHistory()\n}\n\n// SetWordCompleter sets the completion function that the prompter will call to\n// fetch completion candidates when the user presses tab.\nfunc (p *terminalPrompter) SetWordCompleter(completer WordCompleter) {\n\tp.State.SetWordCompleter(liner.WordCompleter(completer))\n}\n"
  },
  {
    "path": "console/testdata/exec.js",
    "content": "var execed = \"some-executed-string\";\n"
  },
  {
    "path": "console/testdata/preload.js",
    "content": "var preloaded = \"some-preloaded-string\";\n"
  },
  {
    "path": "eth/api.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"compress/gzip\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math/big\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/internal/ethapi\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\n// PublicEthereumAPI provides an API to access Ethereum full node-related\n// information.\ntype PublicEthereumAPI struct {\n\te *Ethereum\n}\n\n// NewPublicEthereumAPI creates a new Ethereum protocol API for full nodes.\nfunc NewPublicEthereumAPI(e *Ethereum) *PublicEthereumAPI {\n\treturn &PublicEthereumAPI{e}\n}\n\n// Etherbase is the address that mining rewards will be send to\nfunc (api *PublicEthereumAPI) Etherbase() (common.Address, error) {\n\treturn api.e.Etherbase()\n}\n\n// Coinbase is the address that mining rewards will be send to (alias for Etherbase)\nfunc (api *PublicEthereumAPI) Coinbase() (common.Address, error) {\n\treturn api.Etherbase()\n}\n\n// ChainId is the EIP-155 replay-protection chain id for the current ethereum chain config.\nfunc (api *PublicEthereumAPI) ChainId() (hexutil.Uint64, error) {\n\t// if current block is at or past the EIP-155 replay-protection fork block, return chainID from config\n\tif config := api.e.blockchain.Config(); config.IsEIP155(api.e.blockchain.CurrentBlock().Number()) {\n\t\treturn (hexutil.Uint64)(config.ChainID.Uint64()), nil\n\t}\n\treturn hexutil.Uint64(0), fmt.Errorf(\"chain not synced beyond EIP-155 replay-protection fork block\")\n}\n\n// PublicMinerAPI provides an API to control the miner.\n// It offers only methods that operate on data that pose no security risk when it is publicly accessible.\ntype PublicMinerAPI struct {\n\te *Ethereum\n}\n\n// NewPublicMinerAPI create a new PublicMinerAPI instance.\nfunc NewPublicMinerAPI(e *Ethereum) *PublicMinerAPI {\n\treturn &PublicMinerAPI{e}\n}\n\n// Mining returns an indication if this node is currently mining.\nfunc (api *PublicMinerAPI) Mining() bool {\n\treturn api.e.IsMining()\n}\n\n// PrivateMinerAPI provides private RPC methods to control the miner.\n// These methods can be abused by external users and must be considered insecure for use by untrusted users.\ntype PrivateMinerAPI struct {\n\te *Ethereum\n}\n\n// NewPrivateMinerAPI create a new RPC service which controls the miner of this node.\nfunc NewPrivateMinerAPI(e *Ethereum) *PrivateMinerAPI {\n\treturn &PrivateMinerAPI{e: e}\n}\n\n// Start starts the miner with the given number of threads. If threads is nil,\n// the number of workers started is equal to the number of logical CPUs that are\n// usable by this process. If mining is already running, this method adjust the\n// number of threads allowed to use and updates the minimum price required by the\n// transaction pool.\nfunc (api *PrivateMinerAPI) Start(threads *int) error {\n\tif threads == nil {\n\t\treturn api.e.StartMining(runtime.NumCPU())\n\t}\n\treturn api.e.StartMining(*threads)\n}\n\n// Stop terminates the miner, both at the consensus engine level as well as at\n// the block creation level.\nfunc (api *PrivateMinerAPI) Stop() {\n\tapi.e.StopMining()\n}\n\n// SetExtra sets the extra data string that is included when this miner mines a block.\nfunc (api *PrivateMinerAPI) SetExtra(extra string) (bool, error) {\n\tif err := api.e.Miner().SetExtra([]byte(extra)); err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\n// SetGasPrice sets the minimum accepted gas price for the miner.\nfunc (api *PrivateMinerAPI) SetGasPrice(gasPrice hexutil.Big) bool {\n\tapi.e.lock.Lock()\n\tapi.e.gasPrice = (*big.Int)(&gasPrice)\n\tapi.e.lock.Unlock()\n\n\tapi.e.txPool.SetGasPrice((*big.Int)(&gasPrice))\n\treturn true\n}\n\n// SetEtherbase sets the etherbase of the miner\nfunc (api *PrivateMinerAPI) SetEtherbase(etherbase common.Address) bool {\n\tapi.e.SetEtherbase(etherbase)\n\treturn true\n}\n\n// SetRecommitInterval updates the interval for miner sealing work recommitting.\nfunc (api *PrivateMinerAPI) SetRecommitInterval(interval int) {\n\tapi.e.Miner().SetRecommitInterval(time.Duration(interval) * time.Millisecond)\n}\n\n// PrivateAdminAPI is the collection of Ethereum full node-related APIs\n// exposed over the private admin endpoint.\ntype PrivateAdminAPI struct {\n\teth *Ethereum\n}\n\n// NewPrivateAdminAPI creates a new API definition for the full node private\n// admin methods of the Ethereum service.\nfunc NewPrivateAdminAPI(eth *Ethereum) *PrivateAdminAPI {\n\treturn &PrivateAdminAPI{eth: eth}\n}\n\n// ExportChain exports the current blockchain into a local file,\n// or a range of blocks if first and last are non-nil\nfunc (api *PrivateAdminAPI) ExportChain(file string, first *uint64, last *uint64) (bool, error) {\n\tif first == nil && last != nil {\n\t\treturn false, errors.New(\"last cannot be specified without first\")\n\t}\n\tif first != nil && last == nil {\n\t\thead := api.eth.BlockChain().CurrentHeader().Number.Uint64()\n\t\tlast = &head\n\t}\n\tif _, err := os.Stat(file); err == nil {\n\t\t// File already exists. Allowing overwrite could be a DoS vecotor,\n\t\t// since the 'file' may point to arbitrary paths on the drive\n\t\treturn false, errors.New(\"location would overwrite an existing file\")\n\t}\n\t// Make sure we can create the file to export into\n\tout, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer out.Close()\n\n\tvar writer io.Writer = out\n\tif strings.HasSuffix(file, \".gz\") {\n\t\twriter = gzip.NewWriter(writer)\n\t\tdefer writer.(*gzip.Writer).Close()\n\t}\n\n\t// Export the blockchain\n\tif first != nil {\n\t\tif err := api.eth.BlockChain().ExportN(writer, *first, *last); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t} else if err := api.eth.BlockChain().Export(writer); err != nil {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\nfunc hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool {\n\tfor _, b := range bs {\n\t\tif !chain.HasBlock(b.Hash(), b.NumberU64()) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n// ImportChain imports a blockchain from a local file.\nfunc (api *PrivateAdminAPI) ImportChain(file string) (bool, error) {\n\t// Make sure the can access the file to import\n\tin, err := os.Open(file)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer in.Close()\n\n\tvar reader io.Reader = in\n\tif strings.HasSuffix(file, \".gz\") {\n\t\tif reader, err = gzip.NewReader(reader); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\t// Run actual the import in pre-configured batches\n\tstream := rlp.NewStream(reader, 0)\n\n\tblocks, index := make([]*types.Block, 0, 2500), 0\n\tfor batch := 0; ; batch++ {\n\t\t// Load a batch of blocks from the input file\n\t\tfor len(blocks) < cap(blocks) {\n\t\t\tblock := new(types.Block)\n\t\t\tif err := stream.Decode(block); err == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else if err != nil {\n\t\t\t\treturn false, fmt.Errorf(\"block %d: failed to parse: %v\", index, err)\n\t\t\t}\n\t\t\tblocks = append(blocks, block)\n\t\t\tindex++\n\t\t}\n\t\tif len(blocks) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tif hasAllBlocks(api.eth.BlockChain(), blocks) {\n\t\t\tblocks = blocks[:0]\n\t\t\tcontinue\n\t\t}\n\t\t// Import the batch and reset the buffer\n\t\tif _, err := api.eth.BlockChain().InsertChain(blocks); err != nil {\n\t\t\treturn false, fmt.Errorf(\"batch %d: failed to insert: %v\", batch, err)\n\t\t}\n\t\tblocks = blocks[:0]\n\t}\n\treturn true, nil\n}\n\n// PublicDebugAPI is the collection of Ethereum full node APIs exposed\n// over the public debugging endpoint.\ntype PublicDebugAPI struct {\n\teth *Ethereum\n}\n\n// NewPublicDebugAPI creates a new API definition for the full node-\n// related public debug methods of the Ethereum service.\nfunc NewPublicDebugAPI(eth *Ethereum) *PublicDebugAPI {\n\treturn &PublicDebugAPI{eth: eth}\n}\n\n// DumpBlock retrieves the entire state of the database at a given block.\nfunc (api *PublicDebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) {\n\tif blockNr == rpc.PendingBlockNumber {\n\t\t// If we're dumping the pending state, we need to request\n\t\t// both the pending block as well as the pending state from\n\t\t// the miner and operate on those\n\t\t_, stateDb := api.eth.miner.Pending()\n\t\treturn stateDb.RawDump(false, false, true), nil\n\t}\n\tvar block *types.Block\n\tif blockNr == rpc.LatestBlockNumber {\n\t\tblock = api.eth.blockchain.CurrentBlock()\n\t} else {\n\t\tblock = api.eth.blockchain.GetBlockByNumber(uint64(blockNr))\n\t}\n\tif block == nil {\n\t\treturn state.Dump{}, fmt.Errorf(\"block #%d not found\", blockNr)\n\t}\n\tstateDb, err := api.eth.BlockChain().StateAt(block.Root())\n\tif err != nil {\n\t\treturn state.Dump{}, err\n\t}\n\treturn stateDb.RawDump(false, false, true), nil\n}\n\n// PrivateDebugAPI is the collection of Ethereum full node APIs exposed over\n// the private debugging endpoint.\ntype PrivateDebugAPI struct {\n\teth *Ethereum\n}\n\n// NewPrivateDebugAPI creates a new API definition for the full node-related\n// private debug methods of the Ethereum service.\nfunc NewPrivateDebugAPI(eth *Ethereum) *PrivateDebugAPI {\n\treturn &PrivateDebugAPI{eth: eth}\n}\n\n// Preimage is a debug API function that returns the preimage for a sha3 hash, if known.\nfunc (api *PrivateDebugAPI) Preimage(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) {\n\tif preimage := rawdb.ReadPreimage(api.eth.ChainDb(), hash); preimage != nil {\n\t\treturn preimage, nil\n\t}\n\treturn nil, errors.New(\"unknown preimage\")\n}\n\n// BadBlockArgs represents the entries in the list returned when bad blocks are queried.\ntype BadBlockArgs struct {\n\tHash  common.Hash            `json:\"hash\"`\n\tBlock map[string]interface{} `json:\"block\"`\n\tRLP   string                 `json:\"rlp\"`\n}\n\n// GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network\n// and returns them as a JSON list of block-hashes\nfunc (api *PrivateDebugAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, error) {\n\tvar (\n\t\terr     error\n\t\tblocks  = rawdb.ReadAllBadBlocks(api.eth.chainDb)\n\t\tresults = make([]*BadBlockArgs, 0, len(blocks))\n\t)\n\tfor _, block := range blocks {\n\t\tvar (\n\t\t\tblockRlp  string\n\t\t\tblockJSON map[string]interface{}\n\t\t)\n\t\tif rlpBytes, err := rlp.EncodeToBytes(block); err != nil {\n\t\t\tblockRlp = err.Error() // Hacky, but hey, it works\n\t\t} else {\n\t\t\tblockRlp = fmt.Sprintf(\"0x%x\", rlpBytes)\n\t\t}\n\t\tif blockJSON, err = ethapi.RPCMarshalBlock(block, true, true); err != nil {\n\t\t\tblockJSON = map[string]interface{}{\"error\": err.Error()}\n\t\t}\n\t\tresults = append(results, &BadBlockArgs{\n\t\t\tHash:  block.Hash(),\n\t\t\tRLP:   blockRlp,\n\t\t\tBlock: blockJSON,\n\t\t})\n\t}\n\treturn results, nil\n}\n\n// AccountRangeMaxResults is the maximum number of results to be returned per call\nconst AccountRangeMaxResults = 256\n\n// AccountRange enumerates all accounts in the given block and start point in paging request\nfunc (api *PublicDebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start []byte, maxResults int, nocode, nostorage, incompletes bool) (state.IteratorDump, error) {\n\tvar stateDb *state.StateDB\n\tvar err error\n\n\tif number, ok := blockNrOrHash.Number(); ok {\n\t\tif number == rpc.PendingBlockNumber {\n\t\t\t// If we're dumping the pending state, we need to request\n\t\t\t// both the pending block as well as the pending state from\n\t\t\t// the miner and operate on those\n\t\t\t_, stateDb = api.eth.miner.Pending()\n\t\t} else {\n\t\t\tvar block *types.Block\n\t\t\tif number == rpc.LatestBlockNumber {\n\t\t\t\tblock = api.eth.blockchain.CurrentBlock()\n\t\t\t} else {\n\t\t\t\tblock = api.eth.blockchain.GetBlockByNumber(uint64(number))\n\t\t\t}\n\t\t\tif block == nil {\n\t\t\t\treturn state.IteratorDump{}, fmt.Errorf(\"block #%d not found\", number)\n\t\t\t}\n\t\t\tstateDb, err = api.eth.BlockChain().StateAt(block.Root())\n\t\t\tif err != nil {\n\t\t\t\treturn state.IteratorDump{}, err\n\t\t\t}\n\t\t}\n\t} else if hash, ok := blockNrOrHash.Hash(); ok {\n\t\tblock := api.eth.blockchain.GetBlockByHash(hash)\n\t\tif block == nil {\n\t\t\treturn state.IteratorDump{}, fmt.Errorf(\"block %s not found\", hash.Hex())\n\t\t}\n\t\tstateDb, err = api.eth.BlockChain().StateAt(block.Root())\n\t\tif err != nil {\n\t\t\treturn state.IteratorDump{}, err\n\t\t}\n\t} else {\n\t\treturn state.IteratorDump{}, errors.New(\"either block number or block hash must be specified\")\n\t}\n\n\tif maxResults > AccountRangeMaxResults || maxResults <= 0 {\n\t\tmaxResults = AccountRangeMaxResults\n\t}\n\treturn stateDb.IteratorDump(nocode, nostorage, incompletes, start, maxResults), nil\n}\n\n// StorageRangeResult is the result of a debug_storageRangeAt API call.\ntype StorageRangeResult struct {\n\tStorage storageMap   `json:\"storage\"`\n\tNextKey *common.Hash `json:\"nextKey\"` // nil if Storage includes the last key in the trie.\n}\n\ntype storageMap map[common.Hash]storageEntry\n\ntype storageEntry struct {\n\tKey   *common.Hash `json:\"key\"`\n\tValue common.Hash  `json:\"value\"`\n}\n\n// StorageRangeAt returns the storage at the given block height and transaction index.\nfunc (api *PrivateDebugAPI) StorageRangeAt(blockHash common.Hash, txIndex int, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) {\n\t// Retrieve the block\n\tblock := api.eth.blockchain.GetBlockByHash(blockHash)\n\tif block == nil {\n\t\treturn StorageRangeResult{}, fmt.Errorf(\"block %#x not found\", blockHash)\n\t}\n\t_, _, statedb, release, err := api.eth.stateAtTransaction(block, txIndex, 0)\n\tif err != nil {\n\t\treturn StorageRangeResult{}, err\n\t}\n\tdefer release()\n\tst := statedb.StorageTrie(contractAddress)\n\tif st == nil {\n\t\treturn StorageRangeResult{}, fmt.Errorf(\"account %x doesn't exist\", contractAddress)\n\t}\n\treturn storageRangeAt(st, keyStart, maxResult)\n}\n\nfunc storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeResult, error) {\n\tit := trie.NewIterator(st.NodeIterator(start))\n\tresult := StorageRangeResult{Storage: storageMap{}}\n\tfor i := 0; i < maxResult && it.Next(); i++ {\n\t\t_, content, _, err := rlp.Split(it.Value)\n\t\tif err != nil {\n\t\t\treturn StorageRangeResult{}, err\n\t\t}\n\t\te := storageEntry{Value: common.BytesToHash(content)}\n\t\tif preimage := st.GetKey(it.Key); preimage != nil {\n\t\t\tpreimage := common.BytesToHash(preimage)\n\t\t\te.Key = &preimage\n\t\t}\n\t\tresult.Storage[common.BytesToHash(it.Key)] = e\n\t}\n\t// Add the 'next key' so clients can continue downloading.\n\tif it.Next() {\n\t\tnext := common.BytesToHash(it.Key)\n\t\tresult.NextKey = &next\n\t}\n\treturn result, nil\n}\n\n// GetModifiedAccountsByNumber returns all accounts that have changed between the\n// two blocks specified. A change is defined as a difference in nonce, balance,\n// code hash, or storage hash.\n//\n// With one parameter, returns the list of accounts modified in the specified block.\nfunc (api *PrivateDebugAPI) GetModifiedAccountsByNumber(startNum uint64, endNum *uint64) ([]common.Address, error) {\n\tvar startBlock, endBlock *types.Block\n\n\tstartBlock = api.eth.blockchain.GetBlockByNumber(startNum)\n\tif startBlock == nil {\n\t\treturn nil, fmt.Errorf(\"start block %x not found\", startNum)\n\t}\n\n\tif endNum == nil {\n\t\tendBlock = startBlock\n\t\tstartBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())\n\t\tif startBlock == nil {\n\t\t\treturn nil, fmt.Errorf(\"block %x has no parent\", endBlock.Number())\n\t\t}\n\t} else {\n\t\tendBlock = api.eth.blockchain.GetBlockByNumber(*endNum)\n\t\tif endBlock == nil {\n\t\t\treturn nil, fmt.Errorf(\"end block %d not found\", *endNum)\n\t\t}\n\t}\n\treturn api.getModifiedAccounts(startBlock, endBlock)\n}\n\n// GetModifiedAccountsByHash returns all accounts that have changed between the\n// two blocks specified. A change is defined as a difference in nonce, balance,\n// code hash, or storage hash.\n//\n// With one parameter, returns the list of accounts modified in the specified block.\nfunc (api *PrivateDebugAPI) GetModifiedAccountsByHash(startHash common.Hash, endHash *common.Hash) ([]common.Address, error) {\n\tvar startBlock, endBlock *types.Block\n\tstartBlock = api.eth.blockchain.GetBlockByHash(startHash)\n\tif startBlock == nil {\n\t\treturn nil, fmt.Errorf(\"start block %x not found\", startHash)\n\t}\n\n\tif endHash == nil {\n\t\tendBlock = startBlock\n\t\tstartBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash())\n\t\tif startBlock == nil {\n\t\t\treturn nil, fmt.Errorf(\"block %x has no parent\", endBlock.Number())\n\t\t}\n\t} else {\n\t\tendBlock = api.eth.blockchain.GetBlockByHash(*endHash)\n\t\tif endBlock == nil {\n\t\t\treturn nil, fmt.Errorf(\"end block %x not found\", *endHash)\n\t\t}\n\t}\n\treturn api.getModifiedAccounts(startBlock, endBlock)\n}\n\nfunc (api *PrivateDebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]common.Address, error) {\n\tif startBlock.Number().Uint64() >= endBlock.Number().Uint64() {\n\t\treturn nil, fmt.Errorf(\"start block height (%d) must be less than end block height (%d)\", startBlock.Number().Uint64(), endBlock.Number().Uint64())\n\t}\n\ttriedb := api.eth.BlockChain().StateCache().TrieDB()\n\n\toldTrie, err := trie.NewSecure(startBlock.Root(), triedb)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewTrie, err := trie.NewSecure(endBlock.Root(), triedb)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdiff, _ := trie.NewDifferenceIterator(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{}))\n\titer := trie.NewIterator(diff)\n\n\tvar dirty []common.Address\n\tfor iter.Next() {\n\t\tkey := newTrie.GetKey(iter.Key)\n\t\tif key == nil {\n\t\t\treturn nil, fmt.Errorf(\"no preimage found for hash %x\", iter.Key)\n\t\t}\n\t\tdirty = append(dirty, common.BytesToAddress(key))\n\t}\n\treturn dirty, nil\n}\n"
  },
  {
    "path": "eth/api_backend.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"math/big\"\n\n\t\"github.com/ethereum/go-ethereum/accounts\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/bloombits\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/eth/gasprice\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/miner\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\n// EthAPIBackend implements ethapi.Backend for full nodes\ntype EthAPIBackend struct {\n\textRPCEnabled       bool\n\tallowUnprotectedTxs bool\n\teth                 *Ethereum\n\tgpo                 *gasprice.Oracle\n}\n\n// ChainConfig returns the active chain configuration.\nfunc (b *EthAPIBackend) ChainConfig() *params.ChainConfig {\n\treturn b.eth.blockchain.Config()\n}\n\nfunc (b *EthAPIBackend) CurrentBlock() *types.Block {\n\treturn b.eth.blockchain.CurrentBlock()\n}\n\nfunc (b *EthAPIBackend) SetHead(number uint64) {\n\tb.eth.handler.downloader.Cancel()\n\tb.eth.blockchain.SetHead(number)\n}\n\nfunc (b *EthAPIBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) {\n\t// Pending block is only known by the miner\n\tif number == rpc.PendingBlockNumber {\n\t\tblock := b.eth.miner.PendingBlock()\n\t\treturn block.Header(), nil\n\t}\n\t// Otherwise resolve and return the block\n\tif number == rpc.LatestBlockNumber {\n\t\treturn b.eth.blockchain.CurrentBlock().Header(), nil\n\t}\n\treturn b.eth.blockchain.GetHeaderByNumber(uint64(number)), nil\n}\n\nfunc (b *EthAPIBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) {\n\tif blockNr, ok := blockNrOrHash.Number(); ok {\n\t\treturn b.HeaderByNumber(ctx, blockNr)\n\t}\n\tif hash, ok := blockNrOrHash.Hash(); ok {\n\t\theader := b.eth.blockchain.GetHeaderByHash(hash)\n\t\tif header == nil {\n\t\t\treturn nil, errors.New(\"header for hash not found\")\n\t\t}\n\t\tif blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(header.Number.Uint64()) != hash {\n\t\t\treturn nil, errors.New(\"hash is not currently canonical\")\n\t\t}\n\t\treturn header, nil\n\t}\n\treturn nil, errors.New(\"invalid arguments; neither block nor hash specified\")\n}\n\nfunc (b *EthAPIBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {\n\treturn b.eth.blockchain.GetHeaderByHash(hash), nil\n}\n\nfunc (b *EthAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {\n\t// Pending block is only known by the miner\n\tif number == rpc.PendingBlockNumber {\n\t\tblock := b.eth.miner.PendingBlock()\n\t\treturn block, nil\n\t}\n\t// Otherwise resolve and return the block\n\tif number == rpc.LatestBlockNumber {\n\t\treturn b.eth.blockchain.CurrentBlock(), nil\n\t}\n\treturn b.eth.blockchain.GetBlockByNumber(uint64(number)), nil\n}\n\nfunc (b *EthAPIBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {\n\treturn b.eth.blockchain.GetBlockByHash(hash), nil\n}\n\nfunc (b *EthAPIBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) {\n\tif blockNr, ok := blockNrOrHash.Number(); ok {\n\t\treturn b.BlockByNumber(ctx, blockNr)\n\t}\n\tif hash, ok := blockNrOrHash.Hash(); ok {\n\t\theader := b.eth.blockchain.GetHeaderByHash(hash)\n\t\tif header == nil {\n\t\t\treturn nil, errors.New(\"header for hash not found\")\n\t\t}\n\t\tif blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(header.Number.Uint64()) != hash {\n\t\t\treturn nil, errors.New(\"hash is not currently canonical\")\n\t\t}\n\t\tblock := b.eth.blockchain.GetBlock(hash, header.Number.Uint64())\n\t\tif block == nil {\n\t\t\treturn nil, errors.New(\"header found, but block body is missing\")\n\t\t}\n\t\treturn block, nil\n\t}\n\treturn nil, errors.New(\"invalid arguments; neither block nor hash specified\")\n}\n\nfunc (b *EthAPIBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) {\n\t// Pending state is only known by the miner\n\tif number == rpc.PendingBlockNumber {\n\t\tblock, state := b.eth.miner.Pending()\n\t\treturn state, block.Header(), nil\n\t}\n\t// Otherwise resolve the block number and return its state\n\theader, err := b.HeaderByNumber(ctx, number)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif header == nil {\n\t\treturn nil, nil, errors.New(\"header not found\")\n\t}\n\tstateDb, err := b.eth.BlockChain().StateAt(header.Root)\n\treturn stateDb, header, err\n}\n\nfunc (b *EthAPIBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) {\n\tif blockNr, ok := blockNrOrHash.Number(); ok {\n\t\treturn b.StateAndHeaderByNumber(ctx, blockNr)\n\t}\n\tif hash, ok := blockNrOrHash.Hash(); ok {\n\t\theader, err := b.HeaderByHash(ctx, hash)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tif header == nil {\n\t\t\treturn nil, nil, errors.New(\"header for hash not found\")\n\t\t}\n\t\tif blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(header.Number.Uint64()) != hash {\n\t\t\treturn nil, nil, errors.New(\"hash is not currently canonical\")\n\t\t}\n\t\tstateDb, err := b.eth.BlockChain().StateAt(header.Root)\n\t\treturn stateDb, header, err\n\t}\n\treturn nil, nil, errors.New(\"invalid arguments; neither block nor hash specified\")\n}\n\nfunc (b *EthAPIBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {\n\treturn b.eth.blockchain.GetReceiptsByHash(hash), nil\n}\n\nfunc (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {\n\treceipts := b.eth.blockchain.GetReceiptsByHash(hash)\n\tif receipts == nil {\n\t\treturn nil, nil\n\t}\n\tlogs := make([][]*types.Log, len(receipts))\n\tfor i, receipt := range receipts {\n\t\tlogs[i] = receipt.Logs\n\t}\n\treturn logs, nil\n}\n\nfunc (b *EthAPIBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int {\n\treturn b.eth.blockchain.GetTdByHash(hash)\n}\n\nfunc (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) {\n\tvmError := func() error { return nil }\n\n\ttxContext := core.NewEVMTxContext(msg)\n\tcontext := core.NewEVMBlockContext(header, b.eth.BlockChain(), nil)\n\treturn vm.NewEVM(context, txContext, state, b.eth.blockchain.Config(), *b.eth.blockchain.GetVMConfig()), vmError, nil\n}\n\nfunc (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {\n\treturn b.eth.BlockChain().SubscribeRemovedLogsEvent(ch)\n}\n\nfunc (b *EthAPIBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {\n\treturn b.eth.miner.SubscribePendingLogs(ch)\n}\n\nfunc (b *EthAPIBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {\n\treturn b.eth.BlockChain().SubscribeChainEvent(ch)\n}\n\nfunc (b *EthAPIBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {\n\treturn b.eth.BlockChain().SubscribeChainHeadEvent(ch)\n}\n\nfunc (b *EthAPIBackend) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription {\n\treturn b.eth.BlockChain().SubscribeChainSideEvent(ch)\n}\n\nfunc (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {\n\treturn b.eth.BlockChain().SubscribeLogsEvent(ch)\n}\n\nfunc (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {\n\treturn b.eth.txPool.AddLocal(signedTx)\n}\n\nfunc (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) {\n\tpending, err := b.eth.txPool.Pending()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar txs types.Transactions\n\tfor _, batch := range pending {\n\t\ttxs = append(txs, batch...)\n\t}\n\treturn txs, nil\n}\n\nfunc (b *EthAPIBackend) GetPoolTransaction(hash common.Hash) *types.Transaction {\n\treturn b.eth.txPool.Get(hash)\n}\n\nfunc (b *EthAPIBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {\n\ttx, blockHash, blockNumber, index := rawdb.ReadTransaction(b.eth.ChainDb(), txHash)\n\treturn tx, blockHash, blockNumber, index, nil\n}\n\nfunc (b *EthAPIBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) {\n\treturn b.eth.txPool.Nonce(addr), nil\n}\n\nfunc (b *EthAPIBackend) Stats() (pending int, queued int) {\n\treturn b.eth.txPool.Stats()\n}\n\nfunc (b *EthAPIBackend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {\n\treturn b.eth.TxPool().Content()\n}\n\nfunc (b *EthAPIBackend) TxPool() *core.TxPool {\n\treturn b.eth.TxPool()\n}\n\nfunc (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {\n\treturn b.eth.TxPool().SubscribeNewTxsEvent(ch)\n}\n\nfunc (b *EthAPIBackend) Downloader() *downloader.Downloader {\n\treturn b.eth.Downloader()\n}\n\nfunc (b *EthAPIBackend) SuggestPrice(ctx context.Context) (*big.Int, error) {\n\treturn b.gpo.SuggestPrice(ctx)\n}\n\nfunc (b *EthAPIBackend) ChainDb() ethdb.Database {\n\treturn b.eth.ChainDb()\n}\n\nfunc (b *EthAPIBackend) EventMux() *event.TypeMux {\n\treturn b.eth.EventMux()\n}\n\nfunc (b *EthAPIBackend) AccountManager() *accounts.Manager {\n\treturn b.eth.AccountManager()\n}\n\nfunc (b *EthAPIBackend) ExtRPCEnabled() bool {\n\treturn b.extRPCEnabled\n}\n\nfunc (b *EthAPIBackend) UnprotectedAllowed() bool {\n\treturn b.allowUnprotectedTxs\n}\n\nfunc (b *EthAPIBackend) RPCGasCap() uint64 {\n\treturn b.eth.config.RPCGasCap\n}\n\nfunc (b *EthAPIBackend) RPCTxFeeCap() float64 {\n\treturn b.eth.config.RPCTxFeeCap\n}\n\nfunc (b *EthAPIBackend) BloomStatus() (uint64, uint64) {\n\tsections, _, _ := b.eth.bloomIndexer.Sections()\n\treturn params.BloomBitsBlocks, sections\n}\n\nfunc (b *EthAPIBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {\n\tfor i := 0; i < bloomFilterThreads; i++ {\n\t\tgo session.Multiplex(bloomRetrievalBatch, bloomRetrievalWait, b.eth.bloomRequests)\n\t}\n}\n\nfunc (b *EthAPIBackend) Engine() consensus.Engine {\n\treturn b.eth.engine\n}\n\nfunc (b *EthAPIBackend) CurrentHeader() *types.Header {\n\treturn b.eth.blockchain.CurrentHeader()\n}\n\nfunc (b *EthAPIBackend) Miner() *miner.Miner {\n\treturn b.eth.Miner()\n}\n\nfunc (b *EthAPIBackend) StartMining(threads int) error {\n\treturn b.eth.StartMining(threads)\n}\n\nfunc (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error) {\n\treturn b.eth.stateAtBlock(block, reexec)\n}\n\nfunc (b *EthAPIBackend) StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error) {\n\treturn b.eth.statesInRange(fromBlock, toBlock, reexec)\n}\n\nfunc (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) {\n\treturn b.eth.stateAtTransaction(block, txIndex, reexec)\n}\n"
  },
  {
    "path": "eth/api_test.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n)\n\nvar dumper = spew.ConfigState{Indent: \"    \"}\n\nfunc accountRangeTest(t *testing.T, trie *state.Trie, statedb *state.StateDB, start common.Hash, requestedNum int, expectedNum int) state.IteratorDump {\n\tresult := statedb.IteratorDump(true, true, false, start.Bytes(), requestedNum)\n\n\tif len(result.Accounts) != expectedNum {\n\t\tt.Fatalf(\"expected %d results, got %d\", expectedNum, len(result.Accounts))\n\t}\n\tfor address := range result.Accounts {\n\t\tif address == (common.Address{}) {\n\t\t\tt.Fatalf(\"empty address returned\")\n\t\t}\n\t\tif !statedb.Exist(address) {\n\t\t\tt.Fatalf(\"account not found in state %s\", address.Hex())\n\t\t}\n\t}\n\treturn result\n}\n\ntype resultHash []common.Hash\n\nfunc (h resultHash) Len() int           { return len(h) }\nfunc (h resultHash) Swap(i, j int)      { h[i], h[j] = h[j], h[i] }\nfunc (h resultHash) Less(i, j int) bool { return bytes.Compare(h[i].Bytes(), h[j].Bytes()) < 0 }\n\nfunc TestAccountRange(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tstatedb  = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), nil)\n\t\tstate, _ = state.New(common.Hash{}, statedb, nil)\n\t\taddrs    = [AccountRangeMaxResults * 2]common.Address{}\n\t\tm        = map[common.Address]bool{}\n\t)\n\n\tfor i := range addrs {\n\t\thash := common.HexToHash(fmt.Sprintf(\"%x\", i))\n\t\taddr := common.BytesToAddress(crypto.Keccak256Hash(hash.Bytes()).Bytes())\n\t\taddrs[i] = addr\n\t\tstate.SetBalance(addrs[i], big.NewInt(1))\n\t\tif _, ok := m[addr]; ok {\n\t\t\tt.Fatalf(\"bad\")\n\t\t} else {\n\t\t\tm[addr] = true\n\t\t}\n\t}\n\tstate.Commit(true)\n\troot := state.IntermediateRoot(true)\n\n\ttrie, err := statedb.OpenTrie(root)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\taccountRangeTest(t, &trie, state, common.Hash{}, AccountRangeMaxResults/2, AccountRangeMaxResults/2)\n\t// test pagination\n\tfirstResult := accountRangeTest(t, &trie, state, common.Hash{}, AccountRangeMaxResults, AccountRangeMaxResults)\n\tsecondResult := accountRangeTest(t, &trie, state, common.BytesToHash(firstResult.Next), AccountRangeMaxResults, AccountRangeMaxResults)\n\n\thList := make(resultHash, 0)\n\tfor addr1 := range firstResult.Accounts {\n\t\t// If address is empty, then it makes no sense to compare\n\t\t// them as they might be two different accounts.\n\t\tif addr1 == (common.Address{}) {\n\t\t\tcontinue\n\t\t}\n\t\tif _, duplicate := secondResult.Accounts[addr1]; duplicate {\n\t\t\tt.Fatalf(\"pagination test failed:  results should not overlap\")\n\t\t}\n\t\thList = append(hList, crypto.Keccak256Hash(addr1.Bytes()))\n\t}\n\t// Test to see if it's possible to recover from the middle of the previous\n\t// set and get an even split between the first and second sets.\n\tsort.Sort(hList)\n\tmiddleH := hList[AccountRangeMaxResults/2]\n\tmiddleResult := accountRangeTest(t, &trie, state, middleH, AccountRangeMaxResults, AccountRangeMaxResults)\n\tmissing, infirst, insecond := 0, 0, 0\n\tfor h := range middleResult.Accounts {\n\t\tif _, ok := firstResult.Accounts[h]; ok {\n\t\t\tinfirst++\n\t\t} else if _, ok := secondResult.Accounts[h]; ok {\n\t\t\tinsecond++\n\t\t} else {\n\t\t\tmissing++\n\t\t}\n\t}\n\tif missing != 0 {\n\t\tt.Fatalf(\"%d hashes in the 'middle' set were neither in the first not the second set\", missing)\n\t}\n\tif infirst != AccountRangeMaxResults/2 {\n\t\tt.Fatalf(\"Imbalance in the number of first-test results: %d != %d\", infirst, AccountRangeMaxResults/2)\n\t}\n\tif insecond != AccountRangeMaxResults/2 {\n\t\tt.Fatalf(\"Imbalance in the number of second-test results: %d != %d\", insecond, AccountRangeMaxResults/2)\n\t}\n}\n\nfunc TestEmptyAccountRange(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tstatedb  = state.NewDatabase(rawdb.NewMemoryDatabase())\n\t\tstate, _ = state.New(common.Hash{}, statedb, nil)\n\t)\n\tstate.Commit(true)\n\tstate.IntermediateRoot(true)\n\tresults := state.IteratorDump(true, true, true, (common.Hash{}).Bytes(), AccountRangeMaxResults)\n\tif bytes.Equal(results.Next, (common.Hash{}).Bytes()) {\n\t\tt.Fatalf(\"Empty results should not return a second page\")\n\t}\n\tif len(results.Accounts) != 0 {\n\t\tt.Fatalf(\"Empty state should not return addresses: %v\", results.Accounts)\n\t}\n}\n\nfunc TestStorageRangeAt(t *testing.T) {\n\tt.Parallel()\n\n\t// Create a state where account 0x010000... has a few storage entries.\n\tvar (\n\t\tstate, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)\n\t\taddr     = common.Address{0x01}\n\t\tkeys     = []common.Hash{ // hashes of Keys of storage\n\t\t\tcommon.HexToHash(\"340dd630ad21bf010b4e676dbfa9ba9a02175262d1fa356232cfde6cb5b47ef2\"),\n\t\t\tcommon.HexToHash(\"426fcb404ab2d5d8e61a3d918108006bbb0a9be65e92235bb10eefbdb6dcd053\"),\n\t\t\tcommon.HexToHash(\"48078cfed56339ea54962e72c37c7f588fc4f8e5bc173827ba75cb10a63a96a5\"),\n\t\t\tcommon.HexToHash(\"5723d2c3a83af9b735e3b7f21531e5623d183a9095a56604ead41f3582fdfb75\"),\n\t\t}\n\t\tstorage = storageMap{\n\t\t\tkeys[0]: {Key: &common.Hash{0x02}, Value: common.Hash{0x01}},\n\t\t\tkeys[1]: {Key: &common.Hash{0x04}, Value: common.Hash{0x02}},\n\t\t\tkeys[2]: {Key: &common.Hash{0x01}, Value: common.Hash{0x03}},\n\t\t\tkeys[3]: {Key: &common.Hash{0x03}, Value: common.Hash{0x04}},\n\t\t}\n\t)\n\tfor _, entry := range storage {\n\t\tstate.SetState(addr, *entry.Key, entry.Value)\n\t}\n\n\t// Check a few combinations of limit and start/end.\n\ttests := []struct {\n\t\tstart []byte\n\t\tlimit int\n\t\twant  StorageRangeResult\n\t}{\n\t\t{\n\t\t\tstart: []byte{}, limit: 0,\n\t\t\twant: StorageRangeResult{storageMap{}, &keys[0]},\n\t\t},\n\t\t{\n\t\t\tstart: []byte{}, limit: 100,\n\t\t\twant: StorageRangeResult{storage, nil},\n\t\t},\n\t\t{\n\t\t\tstart: []byte{}, limit: 2,\n\t\t\twant: StorageRangeResult{storageMap{keys[0]: storage[keys[0]], keys[1]: storage[keys[1]]}, &keys[2]},\n\t\t},\n\t\t{\n\t\t\tstart: []byte{0x00}, limit: 4,\n\t\t\twant: StorageRangeResult{storage, nil},\n\t\t},\n\t\t{\n\t\t\tstart: []byte{0x40}, limit: 2,\n\t\t\twant: StorageRangeResult{storageMap{keys[1]: storage[keys[1]], keys[2]: storage[keys[2]]}, &keys[3]},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tresult, err := storageRangeAt(state.StorageTrie(addr), test.start, test.limit)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\tif !reflect.DeepEqual(result, test.want) {\n\t\t\tt.Fatalf(\"wrong result for range 0x%x.., limit %d:\\ngot %s\\nwant %s\",\n\t\t\t\ttest.start, test.limit, dumper.Sdump(result), dumper.Sdump(&test.want))\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "eth/backend.go",
    "content": "// Copyright 2014 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Package eth implements the Ethereum protocol.\npackage eth\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/accounts\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n\t\"github.com/ethereum/go-ethereum/consensus\"\n\t\"github.com/ethereum/go-ethereum/consensus/clique\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/bloombits\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/state/pruner\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/eth/ethconfig\"\n\t\"github.com/ethereum/go-ethereum/eth/filters\"\n\t\"github.com/ethereum/go-ethereum/eth/gasprice\"\n\t\"github.com/ethereum/go-ethereum/eth/protocols/eth\"\n\t\"github.com/ethereum/go-ethereum/eth/protocols/snap\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/internal/ethapi\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/miner\"\n\t\"github.com/ethereum/go-ethereum/node\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\n// Config contains the configuration options of the ETH protocol.\n// Deprecated: use ethconfig.Config instead.\ntype Config = ethconfig.Config\n\n// Ethereum implements the Ethereum full node service.\ntype Ethereum struct {\n\tconfig *ethconfig.Config\n\n\t// Handlers\n\ttxPool             *core.TxPool\n\tblockchain         *core.BlockChain\n\thandler            *handler\n\tethDialCandidates  enode.Iterator\n\tsnapDialCandidates enode.Iterator\n\n\t// DB interfaces\n\tchainDb ethdb.Database // Block chain database\n\n\teventMux       *event.TypeMux\n\tengine         consensus.Engine\n\taccountManager *accounts.Manager\n\n\tbloomRequests     chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests\n\tbloomIndexer      *core.ChainIndexer             // Bloom indexer operating during block imports\n\tcloseBloomHandler chan struct{}\n\n\tAPIBackend *EthAPIBackend\n\n\tminer     *miner.Miner\n\tgasPrice  *big.Int\n\tetherbase common.Address\n\n\tnetworkID     uint64\n\tnetRPCService *ethapi.PublicNetAPI\n\n\tp2pServer *p2p.Server\n\n\tlock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase)\n}\n\n// New creates a new Ethereum object (including the\n// initialisation of the common Ethereum object)\nfunc New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {\n\t// Ensure configuration values are compatible and sane\n\tif config.SyncMode == downloader.LightSync {\n\t\treturn nil, errors.New(\"can't run eth.Ethereum in light sync mode, use les.LightEthereum\")\n\t}\n\tif !config.SyncMode.IsValid() {\n\t\treturn nil, fmt.Errorf(\"invalid sync mode %d\", config.SyncMode)\n\t}\n\tif config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(common.Big0) <= 0 {\n\t\tlog.Warn(\"Sanitizing invalid miner gas price\", \"provided\", config.Miner.GasPrice, \"updated\", ethconfig.Defaults.Miner.GasPrice)\n\t\tconfig.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice)\n\t}\n\tif config.NoPruning && config.TrieDirtyCache > 0 {\n\t\tif config.SnapshotCache > 0 {\n\t\t\tconfig.TrieCleanCache += config.TrieDirtyCache * 3 / 5\n\t\t\tconfig.SnapshotCache += config.TrieDirtyCache * 2 / 5\n\t\t} else {\n\t\t\tconfig.TrieCleanCache += config.TrieDirtyCache\n\t\t}\n\t\tconfig.TrieDirtyCache = 0\n\t}\n\tlog.Info(\"Allocated trie memory caches\", \"clean\", common.StorageSize(config.TrieCleanCache)*1024*1024, \"dirty\", common.StorageSize(config.TrieDirtyCache)*1024*1024)\n\n\t// Transfer mining-related config to the ethash config.\n\tethashConfig := config.Ethash\n\tethashConfig.NotifyFull = config.Miner.NotifyFull\n\n\t// Assemble the Ethereum object\n\tchainDb, err := stack.OpenDatabaseWithFreezer(\"chaindata\", config.DatabaseCache, config.DatabaseHandles, config.DatabaseFreezer, \"eth/db/chaindata/\", false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tchainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideBerlin)\n\tif _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {\n\t\treturn nil, genesisErr\n\t}\n\tlog.Info(\"Initialised chain configuration\", \"config\", chainConfig)\n\n\tif err := pruner.RecoverPruning(stack.ResolvePath(\"\"), chainDb, stack.ResolvePath(config.TrieCleanCacheJournal)); err != nil {\n\t\tlog.Error(\"Failed to recover state\", \"error\", err)\n\t}\n\teth := &Ethereum{\n\t\tconfig:            config,\n\t\tchainDb:           chainDb,\n\t\teventMux:          stack.EventMux(),\n\t\taccountManager:    stack.AccountManager(),\n\t\tengine:            ethconfig.CreateConsensusEngine(stack, chainConfig, &ethashConfig, config.Miner.Notify, config.Miner.Noverify, chainDb),\n\t\tcloseBloomHandler: make(chan struct{}),\n\t\tnetworkID:         config.NetworkId,\n\t\tgasPrice:          config.Miner.GasPrice,\n\t\tetherbase:         config.Miner.Etherbase,\n\t\tbloomRequests:     make(chan chan *bloombits.Retrieval),\n\t\tbloomIndexer:      core.NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms),\n\t\tp2pServer:         stack.Server(),\n\t}\n\n\tbcVersion := rawdb.ReadDatabaseVersion(chainDb)\n\tvar dbVer = \"<nil>\"\n\tif bcVersion != nil {\n\t\tdbVer = fmt.Sprintf(\"%d\", *bcVersion)\n\t}\n\tlog.Info(\"Initialising Ethereum protocol\", \"network\", config.NetworkId, \"dbversion\", dbVer)\n\n\tif !config.SkipBcVersionCheck {\n\t\tif bcVersion != nil && *bcVersion > core.BlockChainVersion {\n\t\t\treturn nil, fmt.Errorf(\"database version is v%d, Geth %s only supports v%d\", *bcVersion, params.VersionWithMeta, core.BlockChainVersion)\n\t\t} else if bcVersion == nil || *bcVersion < core.BlockChainVersion {\n\t\t\tlog.Warn(\"Upgrade blockchain database version\", \"from\", dbVer, \"to\", core.BlockChainVersion)\n\t\t\trawdb.WriteDatabaseVersion(chainDb, core.BlockChainVersion)\n\t\t}\n\t}\n\tvar (\n\t\tvmConfig = vm.Config{\n\t\t\tEnablePreimageRecording: config.EnablePreimageRecording,\n\t\t\tEWASMInterpreter:        config.EWASMInterpreter,\n\t\t\tEVMInterpreter:          config.EVMInterpreter,\n\t\t}\n\t\tcacheConfig = &core.CacheConfig{\n\t\t\tTrieCleanLimit:      config.TrieCleanCache,\n\t\t\tTrieCleanJournal:    stack.ResolvePath(config.TrieCleanCacheJournal),\n\t\t\tTrieCleanRejournal:  config.TrieCleanCacheRejournal,\n\t\t\tTrieCleanNoPrefetch: config.NoPrefetch,\n\t\t\tTrieDirtyLimit:      config.TrieDirtyCache,\n\t\t\tTrieDirtyDisabled:   config.NoPruning,\n\t\t\tTrieTimeLimit:       config.TrieTimeout,\n\t\t\tSnapshotLimit:       config.SnapshotCache,\n\t\t\tPreimages:           config.Preimages,\n\t\t}\n\t)\n\teth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Rewind the chain in case of an incompatible config upgrade.\n\tif compat, ok := genesisErr.(*params.ConfigCompatError); ok {\n\t\tlog.Warn(\"Rewinding chain to upgrade configuration\", \"err\", compat)\n\t\teth.blockchain.SetHead(compat.RewindTo)\n\t\trawdb.WriteChainConfig(chainDb, genesisHash, chainConfig)\n\t}\n\teth.bloomIndexer.Start(eth.blockchain)\n\n\tif config.TxPool.Journal != \"\" {\n\t\tconfig.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal)\n\t}\n\teth.txPool = core.NewTxPool(config.TxPool, chainConfig, eth.blockchain)\n\n\t// Permit the downloader to use the trie cache allowance during fast sync\n\tcacheLimit := cacheConfig.TrieCleanLimit + cacheConfig.TrieDirtyLimit + cacheConfig.SnapshotLimit\n\tcheckpoint := config.Checkpoint\n\tif checkpoint == nil {\n\t\tcheckpoint = params.TrustedCheckpoints[genesisHash]\n\t}\n\tif eth.handler, err = newHandler(&handlerConfig{\n\t\tDatabase:   chainDb,\n\t\tChain:      eth.blockchain,\n\t\tTxPool:     eth.txPool,\n\t\tNetwork:    config.NetworkId,\n\t\tSync:       config.SyncMode,\n\t\tBloomCache: uint64(cacheLimit),\n\t\tEventMux:   eth.eventMux,\n\t\tCheckpoint: checkpoint,\n\t\tWhitelist:  config.Whitelist,\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\teth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock)\n\teth.miner.SetExtra(makeExtraData(config.Miner.ExtraData))\n\n\teth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil}\n\tif eth.APIBackend.allowUnprotectedTxs {\n\t\tlog.Info(\"Unprotected transactions allowed\")\n\t}\n\tgpoParams := config.GPO\n\tif gpoParams.Default == nil {\n\t\tgpoParams.Default = config.Miner.GasPrice\n\t}\n\teth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams)\n\n\teth.ethDialCandidates, err = setupDiscovery(eth.config.EthDiscoveryURLs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\teth.snapDialCandidates, err = setupDiscovery(eth.config.SnapDiscoveryURLs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Start the RPC service\n\teth.netRPCService = ethapi.NewPublicNetAPI(eth.p2pServer, config.NetworkId)\n\n\t// Register the backend on the node\n\tstack.RegisterAPIs(eth.APIs())\n\tstack.RegisterProtocols(eth.Protocols())\n\tstack.RegisterLifecycle(eth)\n\t// Check for unclean shutdown\n\tif uncleanShutdowns, discards, err := rawdb.PushUncleanShutdownMarker(chainDb); err != nil {\n\t\tlog.Error(\"Could not update unclean-shutdown-marker list\", \"error\", err)\n\t} else {\n\t\tif discards > 0 {\n\t\t\tlog.Warn(\"Old unclean shutdowns found\", \"count\", discards)\n\t\t}\n\t\tfor _, tstamp := range uncleanShutdowns {\n\t\t\tt := time.Unix(int64(tstamp), 0)\n\t\t\tlog.Warn(\"Unclean shutdown detected\", \"booted\", t,\n\t\t\t\t\"age\", common.PrettyAge(t))\n\t\t}\n\t}\n\treturn eth, nil\n}\n\nfunc makeExtraData(extra []byte) []byte {\n\tif len(extra) == 0 {\n\t\t// create default extradata\n\t\textra, _ = rlp.EncodeToBytes([]interface{}{\n\t\t\tuint(params.VersionMajor<<16 | params.VersionMinor<<8 | params.VersionPatch),\n\t\t\t\"geth\",\n\t\t\truntime.Version(),\n\t\t\truntime.GOOS,\n\t\t})\n\t}\n\tif uint64(len(extra)) > params.MaximumExtraDataSize {\n\t\tlog.Warn(\"Miner extra data exceed limit\", \"extra\", hexutil.Bytes(extra), \"limit\", params.MaximumExtraDataSize)\n\t\textra = nil\n\t}\n\treturn extra\n}\n\n// APIs return the collection of RPC services the ethereum package offers.\n// NOTE, some of these services probably need to be moved to somewhere else.\nfunc (s *Ethereum) APIs() []rpc.API {\n\tapis := ethapi.GetAPIs(s.APIBackend)\n\n\t// Append any APIs exposed explicitly by the consensus engine\n\tapis = append(apis, s.engine.APIs(s.BlockChain())...)\n\n\t// Append all the local APIs and return\n\treturn append(apis, []rpc.API{\n\t\t{\n\t\t\tNamespace: \"eth\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   NewPublicEthereumAPI(s),\n\t\t\tPublic:    true,\n\t\t}, {\n\t\t\tNamespace: \"eth\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   NewPublicMinerAPI(s),\n\t\t\tPublic:    true,\n\t\t}, {\n\t\t\tNamespace: \"eth\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   downloader.NewPublicDownloaderAPI(s.handler.downloader, s.eventMux),\n\t\t\tPublic:    true,\n\t\t}, {\n\t\t\tNamespace: \"miner\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   NewPrivateMinerAPI(s),\n\t\t\tPublic:    false,\n\t\t}, {\n\t\t\tNamespace: \"eth\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   filters.NewPublicFilterAPI(s.APIBackend, false, 5*time.Minute),\n\t\t\tPublic:    true,\n\t\t}, {\n\t\t\tNamespace: \"admin\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   NewPrivateAdminAPI(s),\n\t\t}, {\n\t\t\tNamespace: \"debug\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   NewPublicDebugAPI(s),\n\t\t\tPublic:    true,\n\t\t}, {\n\t\t\tNamespace: \"debug\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   NewPrivateDebugAPI(s),\n\t\t}, {\n\t\t\tNamespace: \"net\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   s.netRPCService,\n\t\t\tPublic:    true,\n\t\t},\n\t}...)\n}\n\nfunc (s *Ethereum) ResetWithGenesisBlock(gb *types.Block) {\n\ts.blockchain.ResetWithGenesisBlock(gb)\n}\n\nfunc (s *Ethereum) Etherbase() (eb common.Address, err error) {\n\ts.lock.RLock()\n\tetherbase := s.etherbase\n\ts.lock.RUnlock()\n\n\tif etherbase != (common.Address{}) {\n\t\treturn etherbase, nil\n\t}\n\tif wallets := s.AccountManager().Wallets(); len(wallets) > 0 {\n\t\tif accounts := wallets[0].Accounts(); len(accounts) > 0 {\n\t\t\tetherbase := accounts[0].Address\n\n\t\t\ts.lock.Lock()\n\t\t\ts.etherbase = etherbase\n\t\t\ts.lock.Unlock()\n\n\t\t\tlog.Info(\"Etherbase automatically configured\", \"address\", etherbase)\n\t\t\treturn etherbase, nil\n\t\t}\n\t}\n\treturn common.Address{}, fmt.Errorf(\"etherbase must be explicitly specified\")\n}\n\n// isLocalBlock checks whether the specified block is mined\n// by local miner accounts.\n//\n// We regard two types of accounts as local miner account: etherbase\n// and accounts specified via `txpool.locals` flag.\nfunc (s *Ethereum) isLocalBlock(block *types.Block) bool {\n\tauthor, err := s.engine.Author(block.Header())\n\tif err != nil {\n\t\tlog.Warn(\"Failed to retrieve block author\", \"number\", block.NumberU64(), \"hash\", block.Hash(), \"err\", err)\n\t\treturn false\n\t}\n\t// Check whether the given address is etherbase.\n\ts.lock.RLock()\n\tetherbase := s.etherbase\n\ts.lock.RUnlock()\n\tif author == etherbase {\n\t\treturn true\n\t}\n\t// Check whether the given address is specified by `txpool.local`\n\t// CLI flag.\n\tfor _, account := range s.config.TxPool.Locals {\n\t\tif account == author {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// shouldPreserve checks whether we should preserve the given block\n// during the chain reorg depending on whether the author of block\n// is a local account.\nfunc (s *Ethereum) shouldPreserve(block *types.Block) bool {\n\t// The reason we need to disable the self-reorg preserving for clique\n\t// is it can be probable to introduce a deadlock.\n\t//\n\t// e.g. If there are 7 available signers\n\t//\n\t// r1   A\n\t// r2     B\n\t// r3       C\n\t// r4         D\n\t// r5   A      [X] F G\n\t// r6    [X]\n\t//\n\t// In the round5, the inturn signer E is offline, so the worst case\n\t// is A, F and G sign the block of round5 and reject the block of opponents\n\t// and in the round6, the last available signer B is offline, the whole\n\t// network is stuck.\n\tif _, ok := s.engine.(*clique.Clique); ok {\n\t\treturn false\n\t}\n\treturn s.isLocalBlock(block)\n}\n\n// SetEtherbase sets the mining reward address.\nfunc (s *Ethereum) SetEtherbase(etherbase common.Address) {\n\ts.lock.Lock()\n\ts.etherbase = etherbase\n\ts.lock.Unlock()\n\n\ts.miner.SetEtherbase(etherbase)\n}\n\n// StartMining starts the miner with the given number of CPU threads. If mining\n// is already running, this method adjust the number of threads allowed to use\n// and updates the minimum price required by the transaction pool.\nfunc (s *Ethereum) StartMining(threads int) error {\n\t// Update the thread count within the consensus engine\n\ttype threaded interface {\n\t\tSetThreads(threads int)\n\t}\n\tif th, ok := s.engine.(threaded); ok {\n\t\tlog.Info(\"Updated mining threads\", \"threads\", threads)\n\t\tif threads == 0 {\n\t\t\tthreads = -1 // Disable the miner from within\n\t\t}\n\t\tth.SetThreads(threads)\n\t}\n\t// If the miner was not running, initialize it\n\tif !s.IsMining() {\n\t\t// Propagate the initial price point to the transaction pool\n\t\ts.lock.RLock()\n\t\tprice := s.gasPrice\n\t\ts.lock.RUnlock()\n\t\ts.txPool.SetGasPrice(price)\n\n\t\t// Configure the local mining address\n\t\teb, err := s.Etherbase()\n\t\tif err != nil {\n\t\t\tlog.Error(\"Cannot start mining without etherbase\", \"err\", err)\n\t\t\treturn fmt.Errorf(\"etherbase missing: %v\", err)\n\t\t}\n\t\tif clique, ok := s.engine.(*clique.Clique); ok {\n\t\t\twallet, err := s.accountManager.Find(accounts.Account{Address: eb})\n\t\t\tif wallet == nil || err != nil {\n\t\t\t\tlog.Error(\"Etherbase account unavailable locally\", \"err\", err)\n\t\t\t\treturn fmt.Errorf(\"signer missing: %v\", err)\n\t\t\t}\n\t\t\tclique.Authorize(eb, wallet.SignData)\n\t\t}\n\t\t// If mining is started, we can disable the transaction rejection mechanism\n\t\t// introduced to speed sync times.\n\t\tatomic.StoreUint32(&s.handler.acceptTxs, 1)\n\n\t\tgo s.miner.Start(eb)\n\t}\n\treturn nil\n}\n\n// StopMining terminates the miner, both at the consensus engine level as well as\n// at the block creation level.\nfunc (s *Ethereum) StopMining() {\n\t// Update the thread count within the consensus engine\n\ttype threaded interface {\n\t\tSetThreads(threads int)\n\t}\n\tif th, ok := s.engine.(threaded); ok {\n\t\tth.SetThreads(-1)\n\t}\n\t// Stop the block creating itself\n\ts.miner.Stop()\n}\n\nfunc (s *Ethereum) IsMining() bool      { return s.miner.Mining() }\nfunc (s *Ethereum) Miner() *miner.Miner { return s.miner }\n\nfunc (s *Ethereum) AccountManager() *accounts.Manager  { return s.accountManager }\nfunc (s *Ethereum) BlockChain() *core.BlockChain       { return s.blockchain }\nfunc (s *Ethereum) TxPool() *core.TxPool               { return s.txPool }\nfunc (s *Ethereum) EventMux() *event.TypeMux           { return s.eventMux }\nfunc (s *Ethereum) Engine() consensus.Engine           { return s.engine }\nfunc (s *Ethereum) ChainDb() ethdb.Database            { return s.chainDb }\nfunc (s *Ethereum) IsListening() bool                  { return true } // Always listening\nfunc (s *Ethereum) Downloader() *downloader.Downloader { return s.handler.downloader }\nfunc (s *Ethereum) Synced() bool                       { return atomic.LoadUint32(&s.handler.acceptTxs) == 1 }\nfunc (s *Ethereum) ArchiveMode() bool                  { return s.config.NoPruning }\nfunc (s *Ethereum) BloomIndexer() *core.ChainIndexer   { return s.bloomIndexer }\n\n// Protocols returns all the currently configured\n// network protocols to start.\nfunc (s *Ethereum) Protocols() []p2p.Protocol {\n\tprotos := eth.MakeProtocols((*ethHandler)(s.handler), s.networkID, s.ethDialCandidates)\n\tif s.config.SnapshotCache > 0 {\n\t\tprotos = append(protos, snap.MakeProtocols((*snapHandler)(s.handler), s.snapDialCandidates)...)\n\t}\n\treturn protos\n}\n\n// Start implements node.Lifecycle, starting all internal goroutines needed by the\n// Ethereum protocol implementation.\nfunc (s *Ethereum) Start() error {\n\teth.StartENRUpdater(s.blockchain, s.p2pServer.LocalNode())\n\n\t// Start the bloom bits servicing goroutines\n\ts.startBloomHandlers(params.BloomBitsBlocks)\n\n\t// Figure out a max peers count based on the server limits\n\tmaxPeers := s.p2pServer.MaxPeers\n\tif s.config.LightServ > 0 {\n\t\tif s.config.LightPeers >= s.p2pServer.MaxPeers {\n\t\t\treturn fmt.Errorf(\"invalid peer config: light peer count (%d) >= total peer count (%d)\", s.config.LightPeers, s.p2pServer.MaxPeers)\n\t\t}\n\t\tmaxPeers -= s.config.LightPeers\n\t}\n\t// Start the networking layer and the light server if requested\n\ts.handler.Start(maxPeers)\n\treturn nil\n}\n\n// Stop implements node.Lifecycle, terminating all internal goroutines used by the\n// Ethereum protocol.\nfunc (s *Ethereum) Stop() error {\n\t// Stop all the peer-related stuff first.\n\ts.handler.Stop()\n\n\t// Then stop everything else.\n\ts.bloomIndexer.Close()\n\tclose(s.closeBloomHandler)\n\ts.txPool.Stop()\n\ts.miner.Stop()\n\ts.blockchain.Stop()\n\ts.engine.Close()\n\trawdb.PopUncleanShutdownMarker(s.chainDb)\n\ts.chainDb.Close()\n\ts.eventMux.Stop()\n\n\treturn nil\n}\n"
  },
  {
    "path": "eth/bloombits.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/bitutil\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n)\n\nconst (\n\t// bloomServiceThreads is the number of goroutines used globally by an Ethereum\n\t// instance to service bloombits lookups for all running filters.\n\tbloomServiceThreads = 16\n\n\t// bloomFilterThreads is the number of goroutines used locally per filter to\n\t// multiplex requests onto the global servicing goroutines.\n\tbloomFilterThreads = 3\n\n\t// bloomRetrievalBatch is the maximum number of bloom bit retrievals to service\n\t// in a single batch.\n\tbloomRetrievalBatch = 16\n\n\t// bloomRetrievalWait is the maximum time to wait for enough bloom bit requests\n\t// to accumulate request an entire batch (avoiding hysteresis).\n\tbloomRetrievalWait = time.Duration(0)\n)\n\n// startBloomHandlers starts a batch of goroutines to accept bloom bit database\n// retrievals from possibly a range of filters and serving the data to satisfy.\nfunc (eth *Ethereum) startBloomHandlers(sectionSize uint64) {\n\tfor i := 0; i < bloomServiceThreads; i++ {\n\t\tgo func() {\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-eth.closeBloomHandler:\n\t\t\t\t\treturn\n\n\t\t\t\tcase request := <-eth.bloomRequests:\n\t\t\t\t\ttask := <-request\n\t\t\t\t\ttask.Bitsets = make([][]byte, len(task.Sections))\n\t\t\t\t\tfor i, section := range task.Sections {\n\t\t\t\t\t\thead := rawdb.ReadCanonicalHash(eth.chainDb, (section+1)*sectionSize-1)\n\t\t\t\t\t\tif compVector, err := rawdb.ReadBloomBits(eth.chainDb, task.Bit, section, head); err == nil {\n\t\t\t\t\t\t\tif blob, err := bitutil.DecompressBytes(compVector, int(sectionSize/8)); err == nil {\n\t\t\t\t\t\t\t\ttask.Bitsets[i] = blob\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\ttask.Error = err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\ttask.Error = err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\trequest <- task\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n"
  },
  {
    "path": "eth/discovery.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/forkid\"\n\t\"github.com/ethereum/go-ethereum/p2p/dnsdisc\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\n// ethEntry is the \"eth\" ENR entry which advertises eth protocol\n// on the discovery network.\ntype ethEntry struct {\n\tForkID forkid.ID // Fork identifier per EIP-2124\n\n\t// Ignore additional fields (for forward compatibility).\n\tRest []rlp.RawValue `rlp:\"tail\"`\n}\n\n// ENRKey implements enr.Entry.\nfunc (e ethEntry) ENRKey() string {\n\treturn \"eth\"\n}\n\n// startEthEntryUpdate starts the ENR updater loop.\nfunc (eth *Ethereum) startEthEntryUpdate(ln *enode.LocalNode) {\n\tvar newHead = make(chan core.ChainHeadEvent, 10)\n\tsub := eth.blockchain.SubscribeChainHeadEvent(newHead)\n\n\tgo func() {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-newHead:\n\t\t\t\tln.Set(eth.currentEthEntry())\n\t\t\tcase <-sub.Err():\n\t\t\t\t// Would be nice to sync with eth.Stop, but there is no\n\t\t\t\t// good way to do that.\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (eth *Ethereum) currentEthEntry() *ethEntry {\n\treturn &ethEntry{ForkID: forkid.NewID(eth.blockchain.Config(), eth.blockchain.Genesis().Hash(),\n\t\teth.blockchain.CurrentHeader().Number.Uint64())}\n}\n\n// setupDiscovery creates the node discovery source for the `eth` and `snap`\n// protocols.\nfunc setupDiscovery(urls []string) (enode.Iterator, error) {\n\tif len(urls) == 0 {\n\t\treturn nil, nil\n\t}\n\tclient := dnsdisc.NewClient(dnsdisc.Config{})\n\treturn client.NewIterator(urls...)\n}\n"
  },
  {
    "path": "eth/downloader/api.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage downloader\n\nimport (\n\t\"context\"\n\t\"sync\"\n\n\t\"github.com/ethereum/go-ethereum\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\n// PublicDownloaderAPI provides an API which gives information about the current synchronisation status.\n// It offers only methods that operates on data that can be available to anyone without security risks.\ntype PublicDownloaderAPI struct {\n\td                         *Downloader\n\tmux                       *event.TypeMux\n\tinstallSyncSubscription   chan chan interface{}\n\tuninstallSyncSubscription chan *uninstallSyncSubscriptionRequest\n}\n\n// NewPublicDownloaderAPI create a new PublicDownloaderAPI. The API has an internal event loop that\n// listens for events from the downloader through the global event mux. In case it receives one of\n// these events it broadcasts it to all syncing subscriptions that are installed through the\n// installSyncSubscription channel.\nfunc NewPublicDownloaderAPI(d *Downloader, m *event.TypeMux) *PublicDownloaderAPI {\n\tapi := &PublicDownloaderAPI{\n\t\td:                         d,\n\t\tmux:                       m,\n\t\tinstallSyncSubscription:   make(chan chan interface{}),\n\t\tuninstallSyncSubscription: make(chan *uninstallSyncSubscriptionRequest),\n\t}\n\n\tgo api.eventLoop()\n\n\treturn api\n}\n\n// eventLoop runs a loop until the event mux closes. It will install and uninstall new\n// sync subscriptions and broadcasts sync status updates to the installed sync subscriptions.\nfunc (api *PublicDownloaderAPI) eventLoop() {\n\tvar (\n\t\tsub               = api.mux.Subscribe(StartEvent{}, DoneEvent{}, FailedEvent{})\n\t\tsyncSubscriptions = make(map[chan interface{}]struct{})\n\t)\n\n\tfor {\n\t\tselect {\n\t\tcase i := <-api.installSyncSubscription:\n\t\t\tsyncSubscriptions[i] = struct{}{}\n\t\tcase u := <-api.uninstallSyncSubscription:\n\t\t\tdelete(syncSubscriptions, u.c)\n\t\t\tclose(u.uninstalled)\n\t\tcase event := <-sub.Chan():\n\t\t\tif event == nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar notification interface{}\n\t\t\tswitch event.Data.(type) {\n\t\t\tcase StartEvent:\n\t\t\t\tnotification = &SyncingResult{\n\t\t\t\t\tSyncing: true,\n\t\t\t\t\tStatus:  api.d.Progress(),\n\t\t\t\t}\n\t\t\tcase DoneEvent, FailedEvent:\n\t\t\t\tnotification = false\n\t\t\t}\n\t\t\t// broadcast\n\t\t\tfor c := range syncSubscriptions {\n\t\t\t\tc <- notification\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Syncing provides information when this nodes starts synchronising with the Ethereum network and when it's finished.\nfunc (api *PublicDownloaderAPI) Syncing(ctx context.Context) (*rpc.Subscription, error) {\n\tnotifier, supported := rpc.NotifierFromContext(ctx)\n\tif !supported {\n\t\treturn &rpc.Subscription{}, rpc.ErrNotificationsUnsupported\n\t}\n\n\trpcSub := notifier.CreateSubscription()\n\n\tgo func() {\n\t\tstatuses := make(chan interface{})\n\t\tsub := api.SubscribeSyncStatus(statuses)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase status := <-statuses:\n\t\t\t\tnotifier.Notify(rpcSub.ID, status)\n\t\t\tcase <-rpcSub.Err():\n\t\t\t\tsub.Unsubscribe()\n\t\t\t\treturn\n\t\t\tcase <-notifier.Closed():\n\t\t\t\tsub.Unsubscribe()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn rpcSub, nil\n}\n\n// SyncingResult provides information about the current synchronisation status for this node.\ntype SyncingResult struct {\n\tSyncing bool                  `json:\"syncing\"`\n\tStatus  ethereum.SyncProgress `json:\"status\"`\n}\n\n// uninstallSyncSubscriptionRequest uninstalles a syncing subscription in the API event loop.\ntype uninstallSyncSubscriptionRequest struct {\n\tc           chan interface{}\n\tuninstalled chan interface{}\n}\n\n// SyncStatusSubscription represents a syncing subscription.\ntype SyncStatusSubscription struct {\n\tapi       *PublicDownloaderAPI // register subscription in event loop of this api instance\n\tc         chan interface{}     // channel where events are broadcasted to\n\tunsubOnce sync.Once            // make sure unsubscribe logic is executed once\n}\n\n// Unsubscribe uninstalls the subscription from the DownloadAPI event loop.\n// The status channel that was passed to subscribeSyncStatus isn't used anymore\n// after this method returns.\nfunc (s *SyncStatusSubscription) Unsubscribe() {\n\ts.unsubOnce.Do(func() {\n\t\treq := uninstallSyncSubscriptionRequest{s.c, make(chan interface{})}\n\t\ts.api.uninstallSyncSubscription <- &req\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-s.c:\n\t\t\t\t// drop new status events until uninstall confirmation\n\t\t\t\tcontinue\n\t\t\tcase <-req.uninstalled:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n}\n\n// SubscribeSyncStatus creates a subscription that will broadcast new synchronisation updates.\n// The given channel must receive interface values, the result can either\nfunc (api *PublicDownloaderAPI) SubscribeSyncStatus(status chan interface{}) *SyncStatusSubscription {\n\tapi.installSyncSubscription <- status\n\treturn &SyncStatusSubscription{api: api, c: status}\n}\n"
  },
  {
    "path": "eth/downloader/downloader.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Package downloader contains the manual full chain synchronisation.\npackage downloader\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/eth/protocols/snap\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/metrics\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\nvar (\n\tMaxBlockFetch   = 128 // Amount of blocks to be fetched per retrieval request\n\tMaxHeaderFetch  = 192 // Amount of block headers to be fetched per retrieval request\n\tMaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly\n\tMaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request\n\tMaxStateFetch   = 384 // Amount of node state values to allow fetching per request\n\n\trttMinEstimate   = 2 * time.Second  // Minimum round-trip time to target for download requests\n\trttMaxEstimate   = 20 * time.Second // Maximum round-trip time to target for download requests\n\trttMinConfidence = 0.1              // Worse confidence factor in our estimated RTT value\n\tttlScaling       = 3                // Constant scaling factor for RTT -> TTL conversion\n\tttlLimit         = time.Minute      // Maximum TTL allowance to prevent reaching crazy timeouts\n\n\tqosTuningPeers   = 5    // Number of peers to tune based on (best peers)\n\tqosConfidenceCap = 10   // Number of peers above which not to modify RTT confidence\n\tqosTuningImpact  = 0.25 // Impact that a new tuning target has on the previous value\n\n\tmaxQueuedHeaders            = 32 * 1024                         // [eth/62] Maximum number of headers to queue for import (DOS protection)\n\tmaxHeadersProcess           = 2048                              // Number of header download results to import at once into the chain\n\tmaxResultsProcess           = 2048                              // Number of content download results to import at once into the chain\n\tfullMaxForkAncestry  uint64 = params.FullImmutabilityThreshold  // Maximum chain reorganisation (locally redeclared so tests can reduce it)\n\tlightMaxForkAncestry uint64 = params.LightImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)\n\n\treorgProtThreshold   = 48 // Threshold number of recent blocks to disable mini reorg protection\n\treorgProtHeaderDelay = 2  // Number of headers to delay delivering to cover mini reorgs\n\n\tfsHeaderCheckFrequency = 100             // Verification frequency of the downloaded headers during fast sync\n\tfsHeaderSafetyNet      = 2048            // Number of headers to discard in case a chain violation is detected\n\tfsHeaderForceVerify    = 24              // Number of headers to verify before and after the pivot to accept it\n\tfsHeaderContCheck      = 3 * time.Second // Time interval to check for header continuations during state download\n\tfsMinFullBlocks        = 64              // Number of blocks to retrieve fully even in fast sync\n)\n\nvar (\n\terrBusy                    = errors.New(\"busy\")\n\terrUnknownPeer             = errors.New(\"peer is unknown or unhealthy\")\n\terrBadPeer                 = errors.New(\"action from bad peer ignored\")\n\terrStallingPeer            = errors.New(\"peer is stalling\")\n\terrUnsyncedPeer            = errors.New(\"unsynced peer\")\n\terrNoPeers                 = errors.New(\"no peers to keep download active\")\n\terrTimeout                 = errors.New(\"timeout\")\n\terrEmptyHeaderSet          = errors.New(\"empty header set by peer\")\n\terrPeersUnavailable        = errors.New(\"no peers available or all tried for download\")\n\terrInvalidAncestor         = errors.New(\"retrieved ancestor is invalid\")\n\terrInvalidChain            = errors.New(\"retrieved hash chain is invalid\")\n\terrInvalidBody             = errors.New(\"retrieved block body is invalid\")\n\terrInvalidReceipt          = errors.New(\"retrieved receipt is invalid\")\n\terrCancelStateFetch        = errors.New(\"state data download canceled (requested)\")\n\terrCancelContentProcessing = errors.New(\"content processing canceled (requested)\")\n\terrCanceled                = errors.New(\"syncing canceled (requested)\")\n\terrNoSyncActive            = errors.New(\"no sync active\")\n\terrTooOld                  = errors.New(\"peer's protocol version too old\")\n\terrNoAncestorFound         = errors.New(\"no common ancestor found\")\n)\n\ntype Downloader struct {\n\t// WARNING: The `rttEstimate` and `rttConfidence` fields are accessed atomically.\n\t// On 32 bit platforms, only 64-bit aligned fields can be atomic. The struct is\n\t// guaranteed to be so aligned, so take advantage of that. For more information,\n\t// see https://golang.org/pkg/sync/atomic/#pkg-note-BUG.\n\trttEstimate   uint64 // Round trip time to target for download requests\n\trttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops)\n\n\tmode uint32         // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode\n\tmux  *event.TypeMux // Event multiplexer to announce sync operation events\n\n\tcheckpoint uint64   // Checkpoint block number to enforce head against (e.g. fast sync)\n\tgenesis    uint64   // Genesis block number to limit sync to (e.g. light client CHT)\n\tqueue      *queue   // Scheduler for selecting the hashes to download\n\tpeers      *peerSet // Set of active peers from which download can proceed\n\n\tstateDB    ethdb.Database  // Database to state sync into (and deduplicate via)\n\tstateBloom *trie.SyncBloom // Bloom filter for fast trie node and contract code existence checks\n\n\t// Statistics\n\tsyncStatsChainOrigin uint64 // Origin block number where syncing started at\n\tsyncStatsChainHeight uint64 // Highest block number known when syncing started\n\tsyncStatsState       stateSyncStats\n\tsyncStatsLock        sync.RWMutex // Lock protecting the sync stats fields\n\n\tlightchain LightChain\n\tblockchain BlockChain\n\n\t// Callbacks\n\tdropPeer peerDropFn // Drops a peer for misbehaving\n\n\t// Status\n\tsynchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing\n\tsynchronising   int32\n\tnotified        int32\n\tcommitted       int32\n\tancientLimit    uint64 // The maximum block number which can be regarded as ancient data.\n\n\t// Channels\n\theaderCh      chan dataPack        // Channel receiving inbound block headers\n\tbodyCh        chan dataPack        // Channel receiving inbound block bodies\n\treceiptCh     chan dataPack        // Channel receiving inbound receipts\n\tbodyWakeCh    chan bool            // Channel to signal the block body fetcher of new tasks\n\treceiptWakeCh chan bool            // Channel to signal the receipt fetcher of new tasks\n\theaderProcCh  chan []*types.Header // Channel to feed the header processor new tasks\n\n\t// State sync\n\tpivotHeader *types.Header // Pivot block header to dynamically push the syncing state root\n\tpivotLock   sync.RWMutex  // Lock protecting pivot header reads from updates\n\n\tsnapSync       bool         // Whether to run state sync over the snap protocol\n\tSnapSyncer     *snap.Syncer // TODO(karalabe): make private! hack for now\n\tstateSyncStart chan *stateSync\n\ttrackStateReq  chan *stateReq\n\tstateCh        chan dataPack // Channel receiving inbound node state data\n\n\t// Cancellation and termination\n\tcancelPeer string         // Identifier of the peer currently being used as the master (cancel on drop)\n\tcancelCh   chan struct{}  // Channel to cancel mid-flight syncs\n\tcancelLock sync.RWMutex   // Lock to protect the cancel channel and peer in delivers\n\tcancelWg   sync.WaitGroup // Make sure all fetcher goroutines have exited.\n\n\tquitCh   chan struct{} // Quit channel to signal termination\n\tquitLock sync.Mutex    // Lock to prevent double closes\n\n\t// Testing hooks\n\tsyncInitHook     func(uint64, uint64)  // Method to call upon initiating a new sync run\n\tbodyFetchHook    func([]*types.Header) // Method to call upon starting a block body fetch\n\treceiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch\n\tchainInsertHook  func([]*fetchResult)  // Method to call upon inserting a chain of blocks (possibly in multiple invocations)\n}\n\n// LightChain encapsulates functions required to synchronise a light chain.\ntype LightChain interface {\n\t// HasHeader verifies a header's presence in the local chain.\n\tHasHeader(common.Hash, uint64) bool\n\n\t// GetHeaderByHash retrieves a header from the local chain.\n\tGetHeaderByHash(common.Hash) *types.Header\n\n\t// CurrentHeader retrieves the head header from the local chain.\n\tCurrentHeader() *types.Header\n\n\t// GetTd returns the total difficulty of a local block.\n\tGetTd(common.Hash, uint64) *big.Int\n\n\t// InsertHeaderChain inserts a batch of headers into the local chain.\n\tInsertHeaderChain([]*types.Header, int) (int, error)\n\n\t// SetHead rewinds the local chain to a new head.\n\tSetHead(uint64) error\n}\n\n// BlockChain encapsulates functions required to sync a (full or fast) blockchain.\ntype BlockChain interface {\n\tLightChain\n\n\t// HasBlock verifies a block's presence in the local chain.\n\tHasBlock(common.Hash, uint64) bool\n\n\t// HasFastBlock verifies a fast block's presence in the local chain.\n\tHasFastBlock(common.Hash, uint64) bool\n\n\t// GetBlockByHash retrieves a block from the local chain.\n\tGetBlockByHash(common.Hash) *types.Block\n\n\t// CurrentBlock retrieves the head block from the local chain.\n\tCurrentBlock() *types.Block\n\n\t// CurrentFastBlock retrieves the head fast block from the local chain.\n\tCurrentFastBlock() *types.Block\n\n\t// FastSyncCommitHead directly commits the head block to a certain entity.\n\tFastSyncCommitHead(common.Hash) error\n\n\t// InsertChain inserts a batch of blocks into the local chain.\n\tInsertChain(types.Blocks) (int, error)\n\n\t// InsertReceiptChain inserts a batch of receipts into the local chain.\n\tInsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error)\n}\n\n// New creates a new downloader to fetch hashes and blocks from remote peers.\nfunc New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader {\n\tif lightchain == nil {\n\t\tlightchain = chain\n\t}\n\tdl := &Downloader{\n\t\tstateDB:        stateDb,\n\t\tstateBloom:     stateBloom,\n\t\tmux:            mux,\n\t\tcheckpoint:     checkpoint,\n\t\tqueue:          newQueue(blockCacheMaxItems, blockCacheInitialItems),\n\t\tpeers:          newPeerSet(),\n\t\trttEstimate:    uint64(rttMaxEstimate),\n\t\trttConfidence:  uint64(1000000),\n\t\tblockchain:     chain,\n\t\tlightchain:     lightchain,\n\t\tdropPeer:       dropPeer,\n\t\theaderCh:       make(chan dataPack, 1),\n\t\tbodyCh:         make(chan dataPack, 1),\n\t\treceiptCh:      make(chan dataPack, 1),\n\t\tbodyWakeCh:     make(chan bool, 1),\n\t\treceiptWakeCh:  make(chan bool, 1),\n\t\theaderProcCh:   make(chan []*types.Header, 1),\n\t\tquitCh:         make(chan struct{}),\n\t\tstateCh:        make(chan dataPack),\n\t\tSnapSyncer:     snap.NewSyncer(stateDb),\n\t\tstateSyncStart: make(chan *stateSync),\n\t\tsyncStatsState: stateSyncStats{\n\t\t\tprocessed: rawdb.ReadFastTrieProgress(stateDb),\n\t\t},\n\t\ttrackStateReq: make(chan *stateReq),\n\t}\n\tgo dl.qosTuner()\n\tgo dl.stateFetcher()\n\treturn dl\n}\n\n// Progress retrieves the synchronisation boundaries, specifically the origin\n// block where synchronisation started at (may have failed/suspended); the block\n// or header sync is currently at; and the latest known block which the sync targets.\n//\n// In addition, during the state download phase of fast synchronisation the number\n// of processed and the total number of known states are also returned. Otherwise\n// these are zero.\nfunc (d *Downloader) Progress() ethereum.SyncProgress {\n\t// Lock the current stats and return the progress\n\td.syncStatsLock.RLock()\n\tdefer d.syncStatsLock.RUnlock()\n\n\tcurrent := uint64(0)\n\tmode := d.getMode()\n\tswitch {\n\tcase d.blockchain != nil && mode == FullSync:\n\t\tcurrent = d.blockchain.CurrentBlock().NumberU64()\n\tcase d.blockchain != nil && mode == FastSync:\n\t\tcurrent = d.blockchain.CurrentFastBlock().NumberU64()\n\tcase d.lightchain != nil:\n\t\tcurrent = d.lightchain.CurrentHeader().Number.Uint64()\n\tdefault:\n\t\tlog.Error(\"Unknown downloader chain/mode combo\", \"light\", d.lightchain != nil, \"full\", d.blockchain != nil, \"mode\", mode)\n\t}\n\treturn ethereum.SyncProgress{\n\t\tStartingBlock: d.syncStatsChainOrigin,\n\t\tCurrentBlock:  current,\n\t\tHighestBlock:  d.syncStatsChainHeight,\n\t\tPulledStates:  d.syncStatsState.processed,\n\t\tKnownStates:   d.syncStatsState.processed + d.syncStatsState.pending,\n\t}\n}\n\n// Synchronising returns whether the downloader is currently retrieving blocks.\nfunc (d *Downloader) Synchronising() bool {\n\treturn atomic.LoadInt32(&d.synchronising) > 0\n}\n\n// RegisterPeer injects a new download peer into the set of block source to be\n// used for fetching hashes and blocks from.\nfunc (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {\n\tvar logger log.Logger\n\tif len(id) < 16 {\n\t\t// Tests use short IDs, don't choke on them\n\t\tlogger = log.New(\"peer\", id)\n\t} else {\n\t\tlogger = log.New(\"peer\", id[:8])\n\t}\n\tlogger.Trace(\"Registering sync peer\")\n\tif err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {\n\t\tlogger.Error(\"Failed to register sync peer\", \"err\", err)\n\t\treturn err\n\t}\n\td.qosReduceConfidence()\n\n\treturn nil\n}\n\n// RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer.\nfunc (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error {\n\treturn d.RegisterPeer(id, version, &lightPeerWrapper{peer})\n}\n\n// UnregisterPeer remove a peer from the known list, preventing any action from\n// the specified peer. An effort is also made to return any pending fetches into\n// the queue.\nfunc (d *Downloader) UnregisterPeer(id string) error {\n\t// Unregister the peer from the active peer set and revoke any fetch tasks\n\tvar logger log.Logger\n\tif len(id) < 16 {\n\t\t// Tests use short IDs, don't choke on them\n\t\tlogger = log.New(\"peer\", id)\n\t} else {\n\t\tlogger = log.New(\"peer\", id[:8])\n\t}\n\tlogger.Trace(\"Unregistering sync peer\")\n\tif err := d.peers.Unregister(id); err != nil {\n\t\tlogger.Error(\"Failed to unregister sync peer\", \"err\", err)\n\t\treturn err\n\t}\n\td.queue.Revoke(id)\n\n\treturn nil\n}\n\n// Synchronise tries to sync up our local block chain with a remote peer, both\n// adding various sanity checks as well as wrapping it with various log entries.\nfunc (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {\n\terr := d.synchronise(id, head, td, mode)\n\n\tswitch err {\n\tcase nil, errBusy, errCanceled:\n\t\treturn err\n\t}\n\tif errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) ||\n\t\terrors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) ||\n\t\terrors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) {\n\t\tlog.Warn(\"Synchronisation failed, dropping peer\", \"peer\", id, \"err\", err)\n\t\tif d.dropPeer == nil {\n\t\t\t// The dropPeer method is nil when `--copydb` is used for a local copy.\n\t\t\t// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored\n\t\t\tlog.Warn(\"Downloader wants to drop peer, but peerdrop-function is not set\", \"peer\", id)\n\t\t} else {\n\t\t\td.dropPeer(id)\n\t\t}\n\t\treturn err\n\t}\n\tlog.Warn(\"Synchronisation failed, retrying\", \"err\", err)\n\treturn err\n}\n\n// synchronise will select the peer and use it for synchronising. If an empty string is given\n// it will use the best peer possible and synchronize if its TD is higher than our own. If any of the\n// checks fail an error will be returned. This method is synchronous\nfunc (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {\n\t// Mock out the synchronisation if testing\n\tif d.synchroniseMock != nil {\n\t\treturn d.synchroniseMock(id, hash)\n\t}\n\t// Make sure only one goroutine is ever allowed past this point at once\n\tif !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {\n\t\treturn errBusy\n\t}\n\tdefer atomic.StoreInt32(&d.synchronising, 0)\n\n\t// Post a user notification of the sync (only once per session)\n\tif atomic.CompareAndSwapInt32(&d.notified, 0, 1) {\n\t\tlog.Info(\"Block synchronisation started\")\n\t}\n\t// If we are already full syncing, but have a fast-sync bloom filter laying\n\t// around, make sure it doesn't use memory any more. This is a special case\n\t// when the user attempts to fast sync a new empty network.\n\tif mode == FullSync && d.stateBloom != nil {\n\t\td.stateBloom.Close()\n\t}\n\t// If snap sync was requested, create the snap scheduler and switch to fast\n\t// sync mode. Long term we could drop fast sync or merge the two together,\n\t// but until snap becomes prevalent, we should support both. TODO(karalabe).\n\tif mode == SnapSync {\n\t\tif !d.snapSync {\n\t\t\tlog.Warn(\"Enabling snapshot sync prototype\")\n\t\t\td.snapSync = true\n\t\t}\n\t\tmode = FastSync\n\t}\n\t// Reset the queue, peer set and wake channels to clean any internal leftover state\n\td.queue.Reset(blockCacheMaxItems, blockCacheInitialItems)\n\td.peers.Reset()\n\n\tfor _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {\n\t\tselect {\n\t\tcase <-ch:\n\t\tdefault:\n\t\t}\n\t}\n\tfor _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} {\n\t\tfor empty := false; !empty; {\n\t\t\tselect {\n\t\t\tcase <-ch:\n\t\t\tdefault:\n\t\t\t\tempty = true\n\t\t\t}\n\t\t}\n\t}\n\tfor empty := false; !empty; {\n\t\tselect {\n\t\tcase <-d.headerProcCh:\n\t\tdefault:\n\t\t\tempty = true\n\t\t}\n\t}\n\t// Create cancel channel for aborting mid-flight and mark the master peer\n\td.cancelLock.Lock()\n\td.cancelCh = make(chan struct{})\n\td.cancelPeer = id\n\td.cancelLock.Unlock()\n\n\tdefer d.Cancel() // No matter what, we can't leave the cancel channel open\n\n\t// Atomically set the requested sync mode\n\tatomic.StoreUint32(&d.mode, uint32(mode))\n\n\t// Retrieve the origin peer and initiate the downloading process\n\tp := d.peers.Peer(id)\n\tif p == nil {\n\t\treturn errUnknownPeer\n\t}\n\treturn d.syncWithPeer(p, hash, td)\n}\n\nfunc (d *Downloader) getMode() SyncMode {\n\treturn SyncMode(atomic.LoadUint32(&d.mode))\n}\n\n// syncWithPeer starts a block synchronization based on the hash chain from the\n// specified peer and head hash.\nfunc (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) {\n\td.mux.Post(StartEvent{})\n\tdefer func() {\n\t\t// reset on error\n\t\tif err != nil {\n\t\t\td.mux.Post(FailedEvent{err})\n\t\t} else {\n\t\t\tlatest := d.lightchain.CurrentHeader()\n\t\t\td.mux.Post(DoneEvent{latest})\n\t\t}\n\t}()\n\tif p.version < 64 {\n\t\treturn fmt.Errorf(\"%w: advertized %d < required %d\", errTooOld, p.version, 64)\n\t}\n\tmode := d.getMode()\n\n\tlog.Debug(\"Synchronising with the network\", \"peer\", p.id, \"eth\", p.version, \"head\", hash, \"td\", td, \"mode\", mode)\n\tdefer func(start time.Time) {\n\t\tlog.Debug(\"Synchronisation terminated\", \"elapsed\", common.PrettyDuration(time.Since(start)))\n\t}(time.Now())\n\n\t// Look up the sync boundaries: the common ancestor and the target block\n\tlatest, pivot, err := d.fetchHead(p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif mode == FastSync && pivot == nil {\n\t\t// If no pivot block was returned, the head is below the min full block\n\t\t// threshold (i.e. new chian). In that case we won't really fast sync\n\t\t// anyway, but still need a valid pivot block to avoid some code hitting\n\t\t// nil panics on an access.\n\t\tpivot = d.blockchain.CurrentBlock().Header()\n\t}\n\theight := latest.Number.Uint64()\n\n\torigin, err := d.findAncestor(p, latest)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.syncStatsLock.Lock()\n\tif d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {\n\t\td.syncStatsChainOrigin = origin\n\t}\n\td.syncStatsChainHeight = height\n\td.syncStatsLock.Unlock()\n\n\t// Ensure our origin point is below any fast sync pivot point\n\tif mode == FastSync {\n\t\tif height <= uint64(fsMinFullBlocks) {\n\t\t\torigin = 0\n\t\t} else {\n\t\t\tpivotNumber := pivot.Number.Uint64()\n\t\t\tif pivotNumber <= origin {\n\t\t\t\torigin = pivotNumber - 1\n\t\t\t}\n\t\t\t// Write out the pivot into the database so a rollback beyond it will\n\t\t\t// reenable fast sync\n\t\t\trawdb.WriteLastPivotNumber(d.stateDB, pivotNumber)\n\t\t}\n\t}\n\td.committed = 1\n\tif mode == FastSync && pivot.Number.Uint64() != 0 {\n\t\td.committed = 0\n\t}\n\tif mode == FastSync {\n\t\t// Set the ancient data limitation.\n\t\t// If we are running fast sync, all block data older than ancientLimit will be\n\t\t// written to the ancient store. More recent data will be written to the active\n\t\t// database and will wait for the freezer to migrate.\n\t\t//\n\t\t// If there is a checkpoint available, then calculate the ancientLimit through\n\t\t// that. Otherwise calculate the ancient limit through the advertised height\n\t\t// of the remote peer.\n\t\t//\n\t\t// The reason for picking checkpoint first is that a malicious peer can give us\n\t\t// a fake (very high) height, forcing the ancient limit to also be very high.\n\t\t// The peer would start to feed us valid blocks until head, resulting in all of\n\t\t// the blocks might be written into the ancient store. A following mini-reorg\n\t\t// could cause issues.\n\t\tif d.checkpoint != 0 && d.checkpoint > fullMaxForkAncestry+1 {\n\t\t\td.ancientLimit = d.checkpoint\n\t\t} else if height > fullMaxForkAncestry+1 {\n\t\t\td.ancientLimit = height - fullMaxForkAncestry - 1\n\t\t} else {\n\t\t\td.ancientLimit = 0\n\t\t}\n\t\tfrozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.\n\n\t\t// If a part of blockchain data has already been written into active store,\n\t\t// disable the ancient style insertion explicitly.\n\t\tif origin >= frozen && frozen != 0 {\n\t\t\td.ancientLimit = 0\n\t\t\tlog.Info(\"Disabling direct-ancient mode\", \"origin\", origin, \"ancient\", frozen-1)\n\t\t} else if d.ancientLimit > 0 {\n\t\t\tlog.Debug(\"Enabling direct-ancient mode\", \"ancient\", d.ancientLimit)\n\t\t}\n\t\t// Rewind the ancient store and blockchain if reorg happens.\n\t\tif origin+1 < frozen {\n\t\t\tif err := d.lightchain.SetHead(origin + 1); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\t// Initiate the sync using a concurrent header and content retrieval algorithm\n\td.queue.Prepare(origin+1, mode)\n\tif d.syncInitHook != nil {\n\t\td.syncInitHook(origin, height)\n\t}\n\tfetchers := []func() error{\n\t\tfunc() error { return d.fetchHeaders(p, origin+1) }, // Headers are always retrieved\n\t\tfunc() error { return d.fetchBodies(origin + 1) },   // Bodies are retrieved during normal and fast sync\n\t\tfunc() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync\n\t\tfunc() error { return d.processHeaders(origin+1, td) },\n\t}\n\tif mode == FastSync {\n\t\td.pivotLock.Lock()\n\t\td.pivotHeader = pivot\n\t\td.pivotLock.Unlock()\n\n\t\tfetchers = append(fetchers, func() error { return d.processFastSyncContent() })\n\t} else if mode == FullSync {\n\t\tfetchers = append(fetchers, d.processFullSyncContent)\n\t}\n\treturn d.spawnSync(fetchers)\n}\n\n// spawnSync runs d.process and all given fetcher functions to completion in\n// separate goroutines, returning the first error that appears.\nfunc (d *Downloader) spawnSync(fetchers []func() error) error {\n\terrc := make(chan error, len(fetchers))\n\td.cancelWg.Add(len(fetchers))\n\tfor _, fn := range fetchers {\n\t\tfn := fn\n\t\tgo func() { defer d.cancelWg.Done(); errc <- fn() }()\n\t}\n\t// Wait for the first error, then terminate the others.\n\tvar err error\n\tfor i := 0; i < len(fetchers); i++ {\n\t\tif i == len(fetchers)-1 {\n\t\t\t// Close the queue when all fetchers have exited.\n\t\t\t// This will cause the block processor to end when\n\t\t\t// it has processed the queue.\n\t\t\td.queue.Close()\n\t\t}\n\t\tif err = <-errc; err != nil && err != errCanceled {\n\t\t\tbreak\n\t\t}\n\t}\n\td.queue.Close()\n\td.Cancel()\n\treturn err\n}\n\n// cancel aborts all of the operations and resets the queue. However, cancel does\n// not wait for the running download goroutines to finish. This method should be\n// used when cancelling the downloads from inside the downloader.\nfunc (d *Downloader) cancel() {\n\t// Close the current cancel channel\n\td.cancelLock.Lock()\n\tdefer d.cancelLock.Unlock()\n\n\tif d.cancelCh != nil {\n\t\tselect {\n\t\tcase <-d.cancelCh:\n\t\t\t// Channel was already closed\n\t\tdefault:\n\t\t\tclose(d.cancelCh)\n\t\t}\n\t}\n}\n\n// Cancel aborts all of the operations and waits for all download goroutines to\n// finish before returning.\nfunc (d *Downloader) Cancel() {\n\td.cancel()\n\td.cancelWg.Wait()\n}\n\n// Terminate interrupts the downloader, canceling all pending operations.\n// The downloader cannot be reused after calling Terminate.\nfunc (d *Downloader) Terminate() {\n\t// Close the termination channel (make sure double close is allowed)\n\td.quitLock.Lock()\n\tselect {\n\tcase <-d.quitCh:\n\tdefault:\n\t\tclose(d.quitCh)\n\t}\n\tif d.stateBloom != nil {\n\t\td.stateBloom.Close()\n\t}\n\td.quitLock.Unlock()\n\n\t// Cancel any pending download requests\n\td.Cancel()\n}\n\n// fetchHead retrieves the head header and prior pivot block (if available) from\n// a remote peer.\nfunc (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *types.Header, err error) {\n\tp.log.Debug(\"Retrieving remote chain head\")\n\tmode := d.getMode()\n\n\t// Request the advertised remote head block and wait for the response\n\tlatest, _ := p.peer.Head()\n\tfetch := 1\n\tif mode == FastSync {\n\t\tfetch = 2 // head + pivot headers\n\t}\n\tgo p.peer.RequestHeadersByHash(latest, fetch, fsMinFullBlocks-1, true)\n\n\tttl := d.requestTTL()\n\ttimeout := time.After(ttl)\n\tfor {\n\t\tselect {\n\t\tcase <-d.cancelCh:\n\t\t\treturn nil, nil, errCanceled\n\n\t\tcase packet := <-d.headerCh:\n\t\t\t// Discard anything not from the origin peer\n\t\t\tif packet.PeerId() != p.id {\n\t\t\t\tlog.Debug(\"Received headers from incorrect peer\", \"peer\", packet.PeerId())\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// Make sure the peer gave us at least one and at most the requested headers\n\t\t\theaders := packet.(*headerPack).headers\n\t\t\tif len(headers) == 0 || len(headers) > fetch {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"%w: returned headers %d != requested %d\", errBadPeer, len(headers), fetch)\n\t\t\t}\n\t\t\t// The first header needs to be the head, validate against the checkpoint\n\t\t\t// and request. If only 1 header was returned, make sure there's no pivot\n\t\t\t// or there was not one requested.\n\t\t\thead := headers[0]\n\t\t\tif (mode == FastSync || mode == LightSync) && head.Number.Uint64() < d.checkpoint {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"%w: remote head %d below checkpoint %d\", errUnsyncedPeer, head.Number, d.checkpoint)\n\t\t\t}\n\t\t\tif len(headers) == 1 {\n\t\t\t\tif mode == FastSync && head.Number.Uint64() > uint64(fsMinFullBlocks) {\n\t\t\t\t\treturn nil, nil, fmt.Errorf(\"%w: no pivot included along head header\", errBadPeer)\n\t\t\t\t}\n\t\t\t\tp.log.Debug(\"Remote head identified, no pivot\", \"number\", head.Number, \"hash\", head.Hash())\n\t\t\t\treturn head, nil, nil\n\t\t\t}\n\t\t\t// At this point we have 2 headers in total and the first is the\n\t\t\t// validated head of the chian. Check the pivot number and return,\n\t\t\tpivot := headers[1]\n\t\t\tif pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"%w: remote pivot %d != requested %d\", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks))\n\t\t\t}\n\t\t\treturn head, pivot, nil\n\n\t\tcase <-timeout:\n\t\t\tp.log.Debug(\"Waiting for head header timed out\", \"elapsed\", ttl)\n\t\t\treturn nil, nil, errTimeout\n\n\t\tcase <-d.bodyCh:\n\t\tcase <-d.receiptCh:\n\t\t\t// Out of bounds delivery, ignore\n\t\t}\n\t}\n}\n\n// calculateRequestSpan calculates what headers to request from a peer when trying to determine the\n// common ancestor.\n// It returns parameters to be used for peer.RequestHeadersByNumber:\n//  from - starting block number\n//  count - number of headers to request\n//  skip - number of headers to skip\n// and also returns 'max', the last block which is expected to be returned by the remote peers,\n// given the (from,count,skip)\nfunc calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {\n\tvar (\n\t\tfrom     int\n\t\tcount    int\n\t\tMaxCount = MaxHeaderFetch / 16\n\t)\n\t// requestHead is the highest block that we will ask for. If requestHead is not offset,\n\t// the highest block that we will get is 16 blocks back from head, which means we\n\t// will fetch 14 or 15 blocks unnecessarily in the case the height difference\n\t// between us and the peer is 1-2 blocks, which is most common\n\trequestHead := int(remoteHeight) - 1\n\tif requestHead < 0 {\n\t\trequestHead = 0\n\t}\n\t// requestBottom is the lowest block we want included in the query\n\t// Ideally, we want to include the one just below our own head\n\trequestBottom := int(localHeight - 1)\n\tif requestBottom < 0 {\n\t\trequestBottom = 0\n\t}\n\ttotalSpan := requestHead - requestBottom\n\tspan := 1 + totalSpan/MaxCount\n\tif span < 2 {\n\t\tspan = 2\n\t}\n\tif span > 16 {\n\t\tspan = 16\n\t}\n\n\tcount = 1 + totalSpan/span\n\tif count > MaxCount {\n\t\tcount = MaxCount\n\t}\n\tif count < 2 {\n\t\tcount = 2\n\t}\n\tfrom = requestHead - (count-1)*span\n\tif from < 0 {\n\t\tfrom = 0\n\t}\n\tmax := from + (count-1)*span\n\treturn int64(from), count, span - 1, uint64(max)\n}\n\n// findAncestor tries to locate the common ancestor link of the local chain and\n// a remote peers blockchain. In the general case when our node was in sync and\n// on the correct chain, checking the top N links should already get us a match.\n// In the rare scenario when we ended up on a long reorganisation (i.e. none of\n// the head links match), we do a binary search to find the common ancestor.\nfunc (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) {\n\t// Figure out the valid ancestor range to prevent rewrite attacks\n\tvar (\n\t\tfloor        = int64(-1)\n\t\tlocalHeight  uint64\n\t\tremoteHeight = remoteHeader.Number.Uint64()\n\t)\n\tmode := d.getMode()\n\tswitch mode {\n\tcase FullSync:\n\t\tlocalHeight = d.blockchain.CurrentBlock().NumberU64()\n\tcase FastSync:\n\t\tlocalHeight = d.blockchain.CurrentFastBlock().NumberU64()\n\tdefault:\n\t\tlocalHeight = d.lightchain.CurrentHeader().Number.Uint64()\n\t}\n\tp.log.Debug(\"Looking for common ancestor\", \"local\", localHeight, \"remote\", remoteHeight)\n\n\t// Recap floor value for binary search\n\tmaxForkAncestry := fullMaxForkAncestry\n\tif d.getMode() == LightSync {\n\t\tmaxForkAncestry = lightMaxForkAncestry\n\t}\n\tif localHeight >= maxForkAncestry {\n\t\t// We're above the max reorg threshold, find the earliest fork point\n\t\tfloor = int64(localHeight - maxForkAncestry)\n\t}\n\t// If we're doing a light sync, ensure the floor doesn't go below the CHT, as\n\t// all headers before that point will be missing.\n\tif mode == LightSync {\n\t\t// If we don't know the current CHT position, find it\n\t\tif d.genesis == 0 {\n\t\t\theader := d.lightchain.CurrentHeader()\n\t\t\tfor header != nil {\n\t\t\t\td.genesis = header.Number.Uint64()\n\t\t\t\tif floor >= int64(d.genesis)-1 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\theader = d.lightchain.GetHeaderByHash(header.ParentHash)\n\t\t\t}\n\t\t}\n\t\t// We already know the \"genesis\" block number, cap floor to that\n\t\tif floor < int64(d.genesis)-1 {\n\t\t\tfloor = int64(d.genesis) - 1\n\t\t}\n\t}\n\n\tancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor)\n\tif err == nil {\n\t\treturn ancestor, nil\n\t}\n\t// The returned error was not nil.\n\t// If the error returned does not reflect that a common ancestor was not found, return it.\n\t// If the error reflects that a common ancestor was not found, continue to binary search,\n\t// where the error value will be reassigned.\n\tif !errors.Is(err, errNoAncestorFound) {\n\t\treturn 0, err\n\t}\n\n\tancestor, err = d.findAncestorBinarySearch(p, mode, remoteHeight, floor)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn ancestor, nil\n}\n\nfunc (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (commonAncestor uint64, err error) {\n\tfrom, count, skip, max := calculateRequestSpan(remoteHeight, localHeight)\n\n\tp.log.Trace(\"Span searching for common ancestor\", \"count\", count, \"from\", from, \"skip\", skip)\n\tgo p.peer.RequestHeadersByNumber(uint64(from), count, skip, false)\n\n\t// Wait for the remote response to the head fetch\n\tnumber, hash := uint64(0), common.Hash{}\n\n\tttl := d.requestTTL()\n\ttimeout := time.After(ttl)\n\n\tfor finished := false; !finished; {\n\t\tselect {\n\t\tcase <-d.cancelCh:\n\t\t\treturn 0, errCanceled\n\n\t\tcase packet := <-d.headerCh:\n\t\t\t// Discard anything not from the origin peer\n\t\t\tif packet.PeerId() != p.id {\n\t\t\t\tlog.Debug(\"Received headers from incorrect peer\", \"peer\", packet.PeerId())\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// Make sure the peer actually gave something valid\n\t\t\theaders := packet.(*headerPack).headers\n\t\t\tif len(headers) == 0 {\n\t\t\t\tp.log.Warn(\"Empty head header set\")\n\t\t\t\treturn 0, errEmptyHeaderSet\n\t\t\t}\n\t\t\t// Make sure the peer's reply conforms to the request\n\t\t\tfor i, header := range headers {\n\t\t\t\texpectNumber := from + int64(i)*int64(skip+1)\n\t\t\t\tif number := header.Number.Int64(); number != expectNumber {\n\t\t\t\t\tp.log.Warn(\"Head headers broke chain ordering\", \"index\", i, \"requested\", expectNumber, \"received\", number)\n\t\t\t\t\treturn 0, fmt.Errorf(\"%w: %v\", errInvalidChain, errors.New(\"head headers broke chain ordering\"))\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Check if a common ancestor was found\n\t\t\tfinished = true\n\t\t\tfor i := len(headers) - 1; i >= 0; i-- {\n\t\t\t\t// Skip any headers that underflow/overflow our requested set\n\t\t\t\tif headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// Otherwise check if we already know the header or not\n\t\t\t\th := headers[i].Hash()\n\t\t\t\tn := headers[i].Number.Uint64()\n\n\t\t\t\tvar known bool\n\t\t\t\tswitch mode {\n\t\t\t\tcase FullSync:\n\t\t\t\t\tknown = d.blockchain.HasBlock(h, n)\n\t\t\t\tcase FastSync:\n\t\t\t\t\tknown = d.blockchain.HasFastBlock(h, n)\n\t\t\t\tdefault:\n\t\t\t\t\tknown = d.lightchain.HasHeader(h, n)\n\t\t\t\t}\n\t\t\t\tif known {\n\t\t\t\t\tnumber, hash = n, h\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase <-timeout:\n\t\t\tp.log.Debug(\"Waiting for head header timed out\", \"elapsed\", ttl)\n\t\t\treturn 0, errTimeout\n\n\t\tcase <-d.bodyCh:\n\t\tcase <-d.receiptCh:\n\t\t\t// Out of bounds delivery, ignore\n\t\t}\n\t}\n\t// If the head fetch already found an ancestor, return\n\tif hash != (common.Hash{}) {\n\t\tif int64(number) <= floor {\n\t\t\tp.log.Warn(\"Ancestor below allowance\", \"number\", number, \"hash\", hash, \"allowance\", floor)\n\t\t\treturn 0, errInvalidAncestor\n\t\t}\n\t\tp.log.Debug(\"Found common ancestor\", \"number\", number, \"hash\", hash)\n\t\treturn number, nil\n\t}\n\treturn 0, errNoAncestorFound\n}\n\nfunc (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (commonAncestor uint64, err error) {\n\thash := common.Hash{}\n\n\t// Ancestor not found, we need to binary search over our chain\n\tstart, end := uint64(0), remoteHeight\n\tif floor > 0 {\n\t\tstart = uint64(floor)\n\t}\n\tp.log.Trace(\"Binary searching for common ancestor\", \"start\", start, \"end\", end)\n\n\tfor start+1 < end {\n\t\t// Split our chain interval in two, and request the hash to cross check\n\t\tcheck := (start + end) / 2\n\n\t\tttl := d.requestTTL()\n\t\ttimeout := time.After(ttl)\n\n\t\tgo p.peer.RequestHeadersByNumber(check, 1, 0, false)\n\n\t\t// Wait until a reply arrives to this request\n\t\tfor arrived := false; !arrived; {\n\t\t\tselect {\n\t\t\tcase <-d.cancelCh:\n\t\t\t\treturn 0, errCanceled\n\n\t\t\tcase packet := <-d.headerCh:\n\t\t\t\t// Discard anything not from the origin peer\n\t\t\t\tif packet.PeerId() != p.id {\n\t\t\t\t\tlog.Debug(\"Received headers from incorrect peer\", \"peer\", packet.PeerId())\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t// Make sure the peer actually gave something valid\n\t\t\t\theaders := packet.(*headerPack).headers\n\t\t\t\tif len(headers) != 1 {\n\t\t\t\t\tp.log.Warn(\"Multiple headers for single request\", \"headers\", len(headers))\n\t\t\t\t\treturn 0, fmt.Errorf(\"%w: multiple headers (%d) for single request\", errBadPeer, len(headers))\n\t\t\t\t}\n\t\t\t\tarrived = true\n\n\t\t\t\t// Modify the search interval based on the response\n\t\t\t\th := headers[0].Hash()\n\t\t\t\tn := headers[0].Number.Uint64()\n\n\t\t\t\tvar known bool\n\t\t\t\tswitch mode {\n\t\t\t\tcase FullSync:\n\t\t\t\t\tknown = d.blockchain.HasBlock(h, n)\n\t\t\t\tcase FastSync:\n\t\t\t\t\tknown = d.blockchain.HasFastBlock(h, n)\n\t\t\t\tdefault:\n\t\t\t\t\tknown = d.lightchain.HasHeader(h, n)\n\t\t\t\t}\n\t\t\t\tif !known {\n\t\t\t\t\tend = check\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\theader := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists\n\t\t\t\tif header.Number.Uint64() != check {\n\t\t\t\t\tp.log.Warn(\"Received non requested header\", \"number\", header.Number, \"hash\", header.Hash(), \"request\", check)\n\t\t\t\t\treturn 0, fmt.Errorf(\"%w: non-requested header (%d)\", errBadPeer, header.Number)\n\t\t\t\t}\n\t\t\t\tstart = check\n\t\t\t\thash = h\n\n\t\t\tcase <-timeout:\n\t\t\t\tp.log.Debug(\"Waiting for search header timed out\", \"elapsed\", ttl)\n\t\t\t\treturn 0, errTimeout\n\n\t\t\tcase <-d.bodyCh:\n\t\t\tcase <-d.receiptCh:\n\t\t\t\t// Out of bounds delivery, ignore\n\t\t\t}\n\t\t}\n\t}\n\t// Ensure valid ancestry and return\n\tif int64(start) <= floor {\n\t\tp.log.Warn(\"Ancestor below allowance\", \"number\", start, \"hash\", hash, \"allowance\", floor)\n\t\treturn 0, errInvalidAncestor\n\t}\n\tp.log.Debug(\"Found common ancestor\", \"number\", start, \"hash\", hash)\n\treturn start, nil\n}\n\n// fetchHeaders keeps retrieving headers concurrently from the number\n// requested, until no more are returned, potentially throttling on the way. To\n// facilitate concurrency but still protect against malicious nodes sending bad\n// headers, we construct a header chain skeleton using the \"origin\" peer we are\n// syncing with, and fill in the missing headers using anyone else. Headers from\n// other peers are only accepted if they map cleanly to the skeleton. If no one\n// can fill in the skeleton - not even the origin peer - it's assumed invalid and\n// the origin is dropped.\nfunc (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error {\n\tp.log.Debug(\"Directing header downloads\", \"origin\", from)\n\tdefer p.log.Debug(\"Header download terminated\")\n\n\t// Create a timeout timer, and the associated header fetcher\n\tskeleton := true            // Skeleton assembly phase or finishing up\n\tpivoting := false           // Whether the next request is pivot verification\n\trequest := time.Now()       // time of the last skeleton fetch request\n\ttimeout := time.NewTimer(0) // timer to dump a non-responsive active peer\n\t<-timeout.C                 // timeout channel should be initially empty\n\tdefer timeout.Stop()\n\n\tvar ttl time.Duration\n\tgetHeaders := func(from uint64) {\n\t\trequest = time.Now()\n\n\t\tttl = d.requestTTL()\n\t\ttimeout.Reset(ttl)\n\n\t\tif skeleton {\n\t\t\tp.log.Trace(\"Fetching skeleton headers\", \"count\", MaxHeaderFetch, \"from\", from)\n\t\t\tgo p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)\n\t\t} else {\n\t\t\tp.log.Trace(\"Fetching full headers\", \"count\", MaxHeaderFetch, \"from\", from)\n\t\t\tgo p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false)\n\t\t}\n\t}\n\tgetNextPivot := func() {\n\t\tpivoting = true\n\t\trequest = time.Now()\n\n\t\tttl = d.requestTTL()\n\t\ttimeout.Reset(ttl)\n\n\t\td.pivotLock.RLock()\n\t\tpivot := d.pivotHeader.Number.Uint64()\n\t\td.pivotLock.RUnlock()\n\n\t\tp.log.Trace(\"Fetching next pivot header\", \"number\", pivot+uint64(fsMinFullBlocks))\n\t\tgo p.peer.RequestHeadersByNumber(pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep\n\t}\n\t// Start pulling the header chain skeleton until all is done\n\tancestor := from\n\tgetHeaders(from)\n\n\tmode := d.getMode()\n\tfor {\n\t\tselect {\n\t\tcase <-d.cancelCh:\n\t\t\treturn errCanceled\n\n\t\tcase packet := <-d.headerCh:\n\t\t\t// Make sure the active peer is giving us the skeleton headers\n\t\t\tif packet.PeerId() != p.id {\n\t\t\t\tlog.Debug(\"Received skeleton from incorrect peer\", \"peer\", packet.PeerId())\n\t\t\t\tbreak\n\t\t\t}\n\t\t\theaderReqTimer.UpdateSince(request)\n\t\t\ttimeout.Stop()\n\n\t\t\t// If the pivot is being checked, move if it became stale and run the real retrieval\n\t\t\tvar pivot uint64\n\n\t\t\td.pivotLock.RLock()\n\t\t\tif d.pivotHeader != nil {\n\t\t\t\tpivot = d.pivotHeader.Number.Uint64()\n\t\t\t}\n\t\t\td.pivotLock.RUnlock()\n\n\t\t\tif pivoting {\n\t\t\t\tif packet.Items() == 2 {\n\t\t\t\t\t// Retrieve the headers and do some sanity checks, just in case\n\t\t\t\t\theaders := packet.(*headerPack).headers\n\n\t\t\t\t\tif have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want {\n\t\t\t\t\t\tlog.Warn(\"Peer sent invalid next pivot\", \"have\", have, \"want\", want)\n\t\t\t\t\t\treturn fmt.Errorf(\"%w: next pivot number %d != requested %d\", errInvalidChain, have, want)\n\t\t\t\t\t}\n\t\t\t\t\tif have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want {\n\t\t\t\t\t\tlog.Warn(\"Peer sent invalid pivot confirmer\", \"have\", have, \"want\", want)\n\t\t\t\t\t\treturn fmt.Errorf(\"%w: next pivot confirmer number %d != requested %d\", errInvalidChain, have, want)\n\t\t\t\t\t}\n\t\t\t\t\tlog.Warn(\"Pivot seemingly stale, moving\", \"old\", pivot, \"new\", headers[0].Number)\n\t\t\t\t\tpivot = headers[0].Number.Uint64()\n\n\t\t\t\t\td.pivotLock.Lock()\n\t\t\t\t\td.pivotHeader = headers[0]\n\t\t\t\t\td.pivotLock.Unlock()\n\n\t\t\t\t\t// Write out the pivot into the database so a rollback beyond\n\t\t\t\t\t// it will reenable fast sync and update the state root that\n\t\t\t\t\t// the state syncer will be downloading.\n\t\t\t\t\trawdb.WriteLastPivotNumber(d.stateDB, pivot)\n\t\t\t\t}\n\t\t\t\tpivoting = false\n\t\t\t\tgetHeaders(from)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// If the skeleton's finished, pull any remaining head headers directly from the origin\n\t\t\tif skeleton && packet.Items() == 0 {\n\t\t\t\tskeleton = false\n\t\t\t\tgetHeaders(from)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// If no more headers are inbound, notify the content fetchers and return\n\t\t\tif packet.Items() == 0 {\n\t\t\t\t// Don't abort header fetches while the pivot is downloading\n\t\t\t\tif atomic.LoadInt32(&d.committed) == 0 && pivot <= from {\n\t\t\t\t\tp.log.Debug(\"No headers, waiting for pivot commit\")\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-time.After(fsHeaderContCheck):\n\t\t\t\t\t\tgetHeaders(from)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tcase <-d.cancelCh:\n\t\t\t\t\t\treturn errCanceled\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Pivot done (or not in fast sync) and no more headers, terminate the process\n\t\t\t\tp.log.Debug(\"No more headers available\")\n\t\t\t\tselect {\n\t\t\t\tcase d.headerProcCh <- nil:\n\t\t\t\t\treturn nil\n\t\t\t\tcase <-d.cancelCh:\n\t\t\t\t\treturn errCanceled\n\t\t\t\t}\n\t\t\t}\n\t\t\theaders := packet.(*headerPack).headers\n\n\t\t\t// If we received a skeleton batch, resolve internals concurrently\n\t\t\tif skeleton {\n\t\t\t\tfilled, proced, err := d.fillHeaderSkeleton(from, headers)\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.log.Debug(\"Skeleton chain invalid\", \"err\", err)\n\t\t\t\t\treturn fmt.Errorf(\"%w: %v\", errInvalidChain, err)\n\t\t\t\t}\n\t\t\t\theaders = filled[proced:]\n\t\t\t\tfrom += uint64(proced)\n\t\t\t} else {\n\t\t\t\t// If we're closing in on the chain head, but haven't yet reached it, delay\n\t\t\t\t// the last few headers so mini reorgs on the head don't cause invalid hash\n\t\t\t\t// chain errors.\n\t\t\t\tif n := len(headers); n > 0 {\n\t\t\t\t\t// Retrieve the current head we're at\n\t\t\t\t\tvar head uint64\n\t\t\t\t\tif mode == LightSync {\n\t\t\t\t\t\thead = d.lightchain.CurrentHeader().Number.Uint64()\n\t\t\t\t\t} else {\n\t\t\t\t\t\thead = d.blockchain.CurrentFastBlock().NumberU64()\n\t\t\t\t\t\tif full := d.blockchain.CurrentBlock().NumberU64(); head < full {\n\t\t\t\t\t\t\thead = full\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t// If the head is below the common ancestor, we're actually deduplicating\n\t\t\t\t\t// already existing chain segments, so use the ancestor as the fake head.\n\t\t\t\t\t// Otherwise we might end up delaying header deliveries pointlessly.\n\t\t\t\t\tif head < ancestor {\n\t\t\t\t\t\thead = ancestor\n\t\t\t\t\t}\n\t\t\t\t\t// If the head is way older than this batch, delay the last few headers\n\t\t\t\t\tif head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() {\n\t\t\t\t\t\tdelay := reorgProtHeaderDelay\n\t\t\t\t\t\tif delay > n {\n\t\t\t\t\t\t\tdelay = n\n\t\t\t\t\t\t}\n\t\t\t\t\t\theaders = headers[:n-delay]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Insert all the new headers and fetch the next batch\n\t\t\tif len(headers) > 0 {\n\t\t\t\tp.log.Trace(\"Scheduling new headers\", \"count\", len(headers), \"from\", from)\n\t\t\t\tselect {\n\t\t\t\tcase d.headerProcCh <- headers:\n\t\t\t\tcase <-d.cancelCh:\n\t\t\t\t\treturn errCanceled\n\t\t\t\t}\n\t\t\t\tfrom += uint64(len(headers))\n\n\t\t\t\t// If we're still skeleton filling fast sync, check pivot staleness\n\t\t\t\t// before continuing to the next skeleton filling\n\t\t\t\tif skeleton && pivot > 0 {\n\t\t\t\t\tgetNextPivot()\n\t\t\t\t} else {\n\t\t\t\t\tgetHeaders(from)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// No headers delivered, or all of them being delayed, sleep a bit and retry\n\t\t\t\tp.log.Trace(\"All headers delayed, waiting\")\n\t\t\t\tselect {\n\t\t\t\tcase <-time.After(fsHeaderContCheck):\n\t\t\t\t\tgetHeaders(from)\n\t\t\t\t\tcontinue\n\t\t\t\tcase <-d.cancelCh:\n\t\t\t\t\treturn errCanceled\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase <-timeout.C:\n\t\t\tif d.dropPeer == nil {\n\t\t\t\t// The dropPeer method is nil when `--copydb` is used for a local copy.\n\t\t\t\t// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored\n\t\t\t\tp.log.Warn(\"Downloader wants to drop peer, but peerdrop-function is not set\", \"peer\", p.id)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// Header retrieval timed out, consider the peer bad and drop\n\t\t\tp.log.Debug(\"Header request timed out\", \"elapsed\", ttl)\n\t\t\theaderTimeoutMeter.Mark(1)\n\t\t\td.dropPeer(p.id)\n\n\t\t\t// Finish the sync gracefully instead of dumping the gathered data though\n\t\t\tfor _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {\n\t\t\t\tselect {\n\t\t\t\tcase ch <- false:\n\t\t\t\tcase <-d.cancelCh:\n\t\t\t\t}\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase d.headerProcCh <- nil:\n\t\t\tcase <-d.cancelCh:\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"%w: header request timed out\", errBadPeer)\n\t\t}\n\t}\n}\n\n// fillHeaderSkeleton concurrently retrieves headers from all our available peers\n// and maps them to the provided skeleton header chain.\n//\n// Any partial results from the beginning of the skeleton is (if possible) forwarded\n// immediately to the header processor to keep the rest of the pipeline full even\n// in the case of header stalls.\n//\n// The method returns the entire filled skeleton and also the number of headers\n// already forwarded for processing.\nfunc (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {\n\tlog.Debug(\"Filling up skeleton\", \"from\", from)\n\td.queue.ScheduleSkeleton(from, skeleton)\n\n\tvar (\n\t\tdeliver = func(packet dataPack) (int, error) {\n\t\t\tpack := packet.(*headerPack)\n\t\t\treturn d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh)\n\t\t}\n\t\texpire  = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) }\n\t\treserve = func(p *peerConnection, count int) (*fetchRequest, bool, bool) {\n\t\t\treturn d.queue.ReserveHeaders(p, count), false, false\n\t\t}\n\t\tfetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) }\n\t\tcapacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) }\n\t\tsetIdle  = func(p *peerConnection, accepted int, deliveryTime time.Time) {\n\t\t\tp.SetHeadersIdle(accepted, deliveryTime)\n\t\t}\n\t)\n\terr := d.fetchParts(d.headerCh, deliver, d.queue.headerContCh, expire,\n\t\td.queue.PendingHeaders, d.queue.InFlightHeaders, reserve,\n\t\tnil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, \"headers\")\n\n\tlog.Debug(\"Skeleton fill terminated\", \"err\", err)\n\n\tfilled, proced := d.queue.RetrieveHeaders()\n\treturn filled, proced, err\n}\n\n// fetchBodies iteratively downloads the scheduled block bodies, taking any\n// available peers, reserving a chunk of blocks for each, waiting for delivery\n// and also periodically checking for timeouts.\nfunc (d *Downloader) fetchBodies(from uint64) error {\n\tlog.Debug(\"Downloading block bodies\", \"origin\", from)\n\n\tvar (\n\t\tdeliver = func(packet dataPack) (int, error) {\n\t\t\tpack := packet.(*bodyPack)\n\t\t\treturn d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles)\n\t\t}\n\t\texpire   = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) }\n\t\tfetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) }\n\t\tcapacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) }\n\t\tsetIdle  = func(p *peerConnection, accepted int, deliveryTime time.Time) { p.SetBodiesIdle(accepted, deliveryTime) }\n\t)\n\terr := d.fetchParts(d.bodyCh, deliver, d.bodyWakeCh, expire,\n\t\td.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ReserveBodies,\n\t\td.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, \"bodies\")\n\n\tlog.Debug(\"Block body download terminated\", \"err\", err)\n\treturn err\n}\n\n// fetchReceipts iteratively downloads the scheduled block receipts, taking any\n// available peers, reserving a chunk of receipts for each, waiting for delivery\n// and also periodically checking for timeouts.\nfunc (d *Downloader) fetchReceipts(from uint64) error {\n\tlog.Debug(\"Downloading transaction receipts\", \"origin\", from)\n\n\tvar (\n\t\tdeliver = func(packet dataPack) (int, error) {\n\t\t\tpack := packet.(*receiptPack)\n\t\t\treturn d.queue.DeliverReceipts(pack.peerID, pack.receipts)\n\t\t}\n\t\texpire   = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) }\n\t\tfetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) }\n\t\tcapacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) }\n\t\tsetIdle  = func(p *peerConnection, accepted int, deliveryTime time.Time) {\n\t\t\tp.SetReceiptsIdle(accepted, deliveryTime)\n\t\t}\n\t)\n\terr := d.fetchParts(d.receiptCh, deliver, d.receiptWakeCh, expire,\n\t\td.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ReserveReceipts,\n\t\td.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, \"receipts\")\n\n\tlog.Debug(\"Transaction receipt download terminated\", \"err\", err)\n\treturn err\n}\n\n// fetchParts iteratively downloads scheduled block parts, taking any available\n// peers, reserving a chunk of fetch requests for each, waiting for delivery and\n// also periodically checking for timeouts.\n//\n// As the scheduling/timeout logic mostly is the same for all downloaded data\n// types, this method is used by each for data gathering and is instrumented with\n// various callbacks to handle the slight differences between processing them.\n//\n// The instrumentation parameters:\n//  - errCancel:   error type to return if the fetch operation is cancelled (mostly makes logging nicer)\n//  - deliveryCh:  channel from which to retrieve downloaded data packets (merged from all concurrent peers)\n//  - deliver:     processing callback to deliver data packets into type specific download queues (usually within `queue`)\n//  - wakeCh:      notification channel for waking the fetcher when new tasks are available (or sync completed)\n//  - expire:      task callback method to abort requests that took too long and return the faulty peers (traffic shaping)\n//  - pending:     task callback for the number of requests still needing download (detect completion/non-completability)\n//  - inFlight:    task callback for the number of in-progress requests (wait for all active downloads to finish)\n//  - throttle:    task callback to check if the processing queue is full and activate throttling (bound memory use)\n//  - reserve:     task callback to reserve new download tasks to a particular peer (also signals partial completions)\n//  - fetchHook:   tester callback to notify of new tasks being initiated (allows testing the scheduling logic)\n//  - fetch:       network callback to actually send a particular download request to a physical remote peer\n//  - cancel:      task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)\n//  - capacity:    network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping)\n//  - idle:        network callback to retrieve the currently (type specific) idle peers that can be assigned tasks\n//  - setIdle:     network callback to set a peer back to idle and update its estimated capacity (traffic shaping)\n//  - kind:        textual label of the type being downloaded to display in log messages\nfunc (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,\n\texpire func() map[string]int, pending func() int, inFlight func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, bool),\n\tfetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,\n\tidle func() ([]*peerConnection, int), setIdle func(*peerConnection, int, time.Time), kind string) error {\n\n\t// Create a ticker to detect expired retrieval tasks\n\tticker := time.NewTicker(100 * time.Millisecond)\n\tdefer ticker.Stop()\n\n\tupdate := make(chan struct{}, 1)\n\n\t// Prepare the queue and fetch block parts until the block header fetcher's done\n\tfinished := false\n\tfor {\n\t\tselect {\n\t\tcase <-d.cancelCh:\n\t\t\treturn errCanceled\n\n\t\tcase packet := <-deliveryCh:\n\t\t\tdeliveryTime := time.Now()\n\t\t\t// If the peer was previously banned and failed to deliver its pack\n\t\t\t// in a reasonable time frame, ignore its message.\n\t\t\tif peer := d.peers.Peer(packet.PeerId()); peer != nil {\n\t\t\t\t// Deliver the received chunk of data and check chain validity\n\t\t\t\taccepted, err := deliver(packet)\n\t\t\t\tif errors.Is(err, errInvalidChain) {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t// Unless a peer delivered something completely else than requested (usually\n\t\t\t\t// caused by a timed out request which came through in the end), set it to\n\t\t\t\t// idle. If the delivery's stale, the peer should have already been idled.\n\t\t\t\tif !errors.Is(err, errStaleDelivery) {\n\t\t\t\t\tsetIdle(peer, accepted, deliveryTime)\n\t\t\t\t}\n\t\t\t\t// Issue a log to the user to see what's going on\n\t\t\t\tswitch {\n\t\t\t\tcase err == nil && packet.Items() == 0:\n\t\t\t\t\tpeer.log.Trace(\"Requested data not delivered\", \"type\", kind)\n\t\t\t\tcase err == nil:\n\t\t\t\t\tpeer.log.Trace(\"Delivered new batch of data\", \"type\", kind, \"count\", packet.Stats())\n\t\t\t\tdefault:\n\t\t\t\t\tpeer.log.Debug(\"Failed to deliver retrieved data\", \"type\", kind, \"err\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Blocks assembled, try to update the progress\n\t\t\tselect {\n\t\t\tcase update <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\n\t\tcase cont := <-wakeCh:\n\t\t\t// The header fetcher sent a continuation flag, check if it's done\n\t\t\tif !cont {\n\t\t\t\tfinished = true\n\t\t\t}\n\t\t\t// Headers arrive, try to update the progress\n\t\t\tselect {\n\t\t\tcase update <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\n\t\tcase <-ticker.C:\n\t\t\t// Sanity check update the progress\n\t\t\tselect {\n\t\t\tcase update <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\n\t\tcase <-update:\n\t\t\t// Short circuit if we lost all our peers\n\t\t\tif d.peers.Len() == 0 {\n\t\t\t\treturn errNoPeers\n\t\t\t}\n\t\t\t// Check for fetch request timeouts and demote the responsible peers\n\t\t\tfor pid, fails := range expire() {\n\t\t\t\tif peer := d.peers.Peer(pid); peer != nil {\n\t\t\t\t\t// If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps\n\t\t\t\t\t// ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times\n\t\t\t\t\t// out that sync wise we need to get rid of the peer.\n\t\t\t\t\t//\n\t\t\t\t\t// The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth\n\t\t\t\t\t// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing\n\t\t\t\t\t// how response times reacts, to it always requests one more than the minimum (i.e. min 2).\n\t\t\t\t\tif fails > 2 {\n\t\t\t\t\t\tpeer.log.Trace(\"Data delivery timed out\", \"type\", kind)\n\t\t\t\t\t\tsetIdle(peer, 0, time.Now())\n\t\t\t\t\t} else {\n\t\t\t\t\t\tpeer.log.Debug(\"Stalling delivery, dropping\", \"type\", kind)\n\n\t\t\t\t\t\tif d.dropPeer == nil {\n\t\t\t\t\t\t\t// The dropPeer method is nil when `--copydb` is used for a local copy.\n\t\t\t\t\t\t\t// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored\n\t\t\t\t\t\t\tpeer.log.Warn(\"Downloader wants to drop peer, but peerdrop-function is not set\", \"peer\", pid)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\td.dropPeer(pid)\n\n\t\t\t\t\t\t\t// If this peer was the master peer, abort sync immediately\n\t\t\t\t\t\t\td.cancelLock.RLock()\n\t\t\t\t\t\t\tmaster := pid == d.cancelPeer\n\t\t\t\t\t\t\td.cancelLock.RUnlock()\n\n\t\t\t\t\t\t\tif master {\n\t\t\t\t\t\t\t\td.cancel()\n\t\t\t\t\t\t\t\treturn errTimeout\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// If there's nothing more to fetch, wait or terminate\n\t\t\tif pending() == 0 {\n\t\t\t\tif !inFlight() && finished {\n\t\t\t\t\tlog.Debug(\"Data fetching completed\", \"type\", kind)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// Send a download request to all idle peers, until throttled\n\t\t\tprogressed, throttled, running := false, false, inFlight()\n\t\t\tidles, total := idle()\n\t\t\tpendCount := pending()\n\t\t\tfor _, peer := range idles {\n\t\t\t\t// Short circuit if throttling activated\n\t\t\t\tif throttled {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t// Short circuit if there is no more available task.\n\t\t\t\tif pendCount = pending(); pendCount == 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t// Reserve a chunk of fetches for a peer. A nil can mean either that\n\t\t\t\t// no more headers are available, or that the peer is known not to\n\t\t\t\t// have them.\n\t\t\t\trequest, progress, throttle := reserve(peer, capacity(peer))\n\t\t\t\tif progress {\n\t\t\t\t\tprogressed = true\n\t\t\t\t}\n\t\t\t\tif throttle {\n\t\t\t\t\tthrottled = true\n\t\t\t\t\tthrottleCounter.Inc(1)\n\t\t\t\t}\n\t\t\t\tif request == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif request.From > 0 {\n\t\t\t\t\tpeer.log.Trace(\"Requesting new batch of data\", \"type\", kind, \"from\", request.From)\n\t\t\t\t} else {\n\t\t\t\t\tpeer.log.Trace(\"Requesting new batch of data\", \"type\", kind, \"count\", len(request.Headers), \"from\", request.Headers[0].Number)\n\t\t\t\t}\n\t\t\t\t// Fetch the chunk and make sure any errors return the hashes to the queue\n\t\t\t\tif fetchHook != nil {\n\t\t\t\t\tfetchHook(request.Headers)\n\t\t\t\t}\n\t\t\t\tif err := fetch(peer, request); err != nil {\n\t\t\t\t\t// Although we could try and make an attempt to fix this, this error really\n\t\t\t\t\t// means that we've double allocated a fetch task to a peer. If that is the\n\t\t\t\t\t// case, the internal state of the downloader and the queue is very wrong so\n\t\t\t\t\t// better hard crash and note the error instead of silently accumulating into\n\t\t\t\t\t// a much bigger issue.\n\t\t\t\t\tpanic(fmt.Sprintf(\"%v: %s fetch assignment failed\", peer, kind))\n\t\t\t\t}\n\t\t\t\trunning = true\n\t\t\t}\n\t\t\t// Make sure that we have peers available for fetching. If all peers have been tried\n\t\t\t// and all failed throw an error\n\t\t\tif !progressed && !throttled && !running && len(idles) == total && pendCount > 0 {\n\t\t\t\treturn errPeersUnavailable\n\t\t\t}\n\t\t}\n\t}\n}\n\n// processHeaders takes batches of retrieved headers from an input channel and\n// keeps processing and scheduling them into the header chain and downloader's\n// queue until the stream ends or a failure occurs.\nfunc (d *Downloader) processHeaders(origin uint64, td *big.Int) error {\n\t// Keep a count of uncertain headers to roll back\n\tvar (\n\t\trollback    uint64 // Zero means no rollback (fine as you can't unroll the genesis)\n\t\trollbackErr error\n\t\tmode        = d.getMode()\n\t)\n\tdefer func() {\n\t\tif rollback > 0 {\n\t\t\tlastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0\n\t\t\tif mode != LightSync {\n\t\t\t\tlastFastBlock = d.blockchain.CurrentFastBlock().Number()\n\t\t\t\tlastBlock = d.blockchain.CurrentBlock().Number()\n\t\t\t}\n\t\t\tif err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block\n\t\t\t\t// We're already unwinding the stack, only print the error to make it more visible\n\t\t\t\tlog.Error(\"Failed to roll back chain segment\", \"head\", rollback-1, \"err\", err)\n\t\t\t}\n\t\t\tcurFastBlock, curBlock := common.Big0, common.Big0\n\t\t\tif mode != LightSync {\n\t\t\t\tcurFastBlock = d.blockchain.CurrentFastBlock().Number()\n\t\t\t\tcurBlock = d.blockchain.CurrentBlock().Number()\n\t\t\t}\n\t\t\tlog.Warn(\"Rolled back chain segment\",\n\t\t\t\t\"header\", fmt.Sprintf(\"%d->%d\", lastHeader, d.lightchain.CurrentHeader().Number),\n\t\t\t\t\"fast\", fmt.Sprintf(\"%d->%d\", lastFastBlock, curFastBlock),\n\t\t\t\t\"block\", fmt.Sprintf(\"%d->%d\", lastBlock, curBlock), \"reason\", rollbackErr)\n\t\t}\n\t}()\n\t// Wait for batches of headers to process\n\tgotHeaders := false\n\n\tfor {\n\t\tselect {\n\t\tcase <-d.cancelCh:\n\t\t\trollbackErr = errCanceled\n\t\t\treturn errCanceled\n\n\t\tcase headers := <-d.headerProcCh:\n\t\t\t// Terminate header processing if we synced up\n\t\t\tif len(headers) == 0 {\n\t\t\t\t// Notify everyone that headers are fully processed\n\t\t\t\tfor _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase ch <- false:\n\t\t\t\t\tcase <-d.cancelCh:\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// If no headers were retrieved at all, the peer violated its TD promise that it had a\n\t\t\t\t// better chain compared to ours. The only exception is if its promised blocks were\n\t\t\t\t// already imported by other means (e.g. fetcher):\n\t\t\t\t//\n\t\t\t\t// R <remote peer>, L <local node>: Both at block 10\n\t\t\t\t// R: Mine block 11, and propagate it to L\n\t\t\t\t// L: Queue block 11 for import\n\t\t\t\t// L: Notice that R's head and TD increased compared to ours, start sync\n\t\t\t\t// L: Import of block 11 finishes\n\t\t\t\t// L: Sync begins, and finds common ancestor at 11\n\t\t\t\t// L: Request new headers up from 11 (R's TD was higher, it must have something)\n\t\t\t\t// R: Nothing to give\n\t\t\t\tif mode != LightSync {\n\t\t\t\t\thead := d.blockchain.CurrentBlock()\n\t\t\t\t\tif !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 {\n\t\t\t\t\t\treturn errStallingPeer\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// If fast or light syncing, ensure promised headers are indeed delivered. This is\n\t\t\t\t// needed to detect scenarios where an attacker feeds a bad pivot and then bails out\n\t\t\t\t// of delivering the post-pivot blocks that would flag the invalid content.\n\t\t\t\t//\n\t\t\t\t// This check cannot be executed \"as is\" for full imports, since blocks may still be\n\t\t\t\t// queued for processing when the header download completes. However, as long as the\n\t\t\t\t// peer gave us something useful, we're already happy/progressed (above check).\n\t\t\t\tif mode == FastSync || mode == LightSync {\n\t\t\t\t\thead := d.lightchain.CurrentHeader()\n\t\t\t\t\tif td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {\n\t\t\t\t\t\treturn errStallingPeer\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Disable any rollback and return\n\t\t\t\trollback = 0\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// Otherwise split the chunk of headers into batches and process them\n\t\t\tgotHeaders = true\n\t\t\tfor len(headers) > 0 {\n\t\t\t\t// Terminate if something failed in between processing chunks\n\t\t\t\tselect {\n\t\t\t\tcase <-d.cancelCh:\n\t\t\t\t\trollbackErr = errCanceled\n\t\t\t\t\treturn errCanceled\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\t// Select the next chunk of headers to import\n\t\t\t\tlimit := maxHeadersProcess\n\t\t\t\tif limit > len(headers) {\n\t\t\t\t\tlimit = len(headers)\n\t\t\t\t}\n\t\t\t\tchunk := headers[:limit]\n\n\t\t\t\t// In case of header only syncing, validate the chunk immediately\n\t\t\t\tif mode == FastSync || mode == LightSync {\n\t\t\t\t\t// If we're importing pure headers, verify based on their recentness\n\t\t\t\t\tvar pivot uint64\n\n\t\t\t\t\td.pivotLock.RLock()\n\t\t\t\t\tif d.pivotHeader != nil {\n\t\t\t\t\t\tpivot = d.pivotHeader.Number.Uint64()\n\t\t\t\t\t}\n\t\t\t\t\td.pivotLock.RUnlock()\n\n\t\t\t\t\tfrequency := fsHeaderCheckFrequency\n\t\t\t\t\tif chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {\n\t\t\t\t\t\tfrequency = 1\n\t\t\t\t\t}\n\t\t\t\t\tif n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {\n\t\t\t\t\t\trollbackErr = err\n\n\t\t\t\t\t\t// If some headers were inserted, track them as uncertain\n\t\t\t\t\t\tif (mode == FastSync || frequency > 1) && n > 0 && rollback == 0 {\n\t\t\t\t\t\t\trollback = chunk[0].Number.Uint64()\n\t\t\t\t\t\t}\n\t\t\t\t\t\tlog.Warn(\"Invalid header encountered\", \"number\", chunk[n].Number, \"hash\", chunk[n].Hash(), \"parent\", chunk[n].ParentHash, \"err\", err)\n\t\t\t\t\t\treturn fmt.Errorf(\"%w: %v\", errInvalidChain, err)\n\t\t\t\t\t}\n\t\t\t\t\t// All verifications passed, track all headers within the alloted limits\n\t\t\t\t\tif mode == FastSync {\n\t\t\t\t\t\thead := chunk[len(chunk)-1].Number.Uint64()\n\t\t\t\t\t\tif head-rollback > uint64(fsHeaderSafetyNet) {\n\t\t\t\t\t\t\trollback = head - uint64(fsHeaderSafetyNet)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\trollback = 1\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Unless we're doing light chains, schedule the headers for associated content retrieval\n\t\t\t\tif mode == FullSync || mode == FastSync {\n\t\t\t\t\t// If we've reached the allowed number of pending headers, stall a bit\n\t\t\t\t\tfor d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {\n\t\t\t\t\t\tselect {\n\t\t\t\t\t\tcase <-d.cancelCh:\n\t\t\t\t\t\t\trollbackErr = errCanceled\n\t\t\t\t\t\t\treturn errCanceled\n\t\t\t\t\t\tcase <-time.After(time.Second):\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t// Otherwise insert the headers for content retrieval\n\t\t\t\t\tinserts := d.queue.Schedule(chunk, origin)\n\t\t\t\t\tif len(inserts) != len(chunk) {\n\t\t\t\t\t\trollbackErr = fmt.Errorf(\"stale headers: len inserts %v len(chunk) %v\", len(inserts), len(chunk))\n\t\t\t\t\t\treturn fmt.Errorf(\"%w: stale headers\", errBadPeer)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\theaders = headers[limit:]\n\t\t\t\torigin += uint64(limit)\n\t\t\t}\n\t\t\t// Update the highest block number we know if a higher one is found.\n\t\t\td.syncStatsLock.Lock()\n\t\t\tif d.syncStatsChainHeight < origin {\n\t\t\t\td.syncStatsChainHeight = origin - 1\n\t\t\t}\n\t\t\td.syncStatsLock.Unlock()\n\n\t\t\t// Signal the content downloaders of the availablility of new tasks\n\t\t\tfor _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {\n\t\t\t\tselect {\n\t\t\t\tcase ch <- true:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n// processFullSyncContent takes fetch results from the queue and imports them into the chain.\nfunc (d *Downloader) processFullSyncContent() error {\n\tfor {\n\t\tresults := d.queue.Results(true)\n\t\tif len(results) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tif d.chainInsertHook != nil {\n\t\t\td.chainInsertHook(results)\n\t\t}\n\t\tif err := d.importBlockResults(results); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\nfunc (d *Downloader) importBlockResults(results []*fetchResult) error {\n\t// Check for any early termination requests\n\tif len(results) == 0 {\n\t\treturn nil\n\t}\n\tselect {\n\tcase <-d.quitCh:\n\t\treturn errCancelContentProcessing\n\tdefault:\n\t}\n\t// Retrieve the a batch of results to import\n\tfirst, last := results[0].Header, results[len(results)-1].Header\n\tlog.Debug(\"Inserting downloaded chain\", \"items\", len(results),\n\t\t\"firstnum\", first.Number, \"firsthash\", first.Hash(),\n\t\t\"lastnum\", last.Number, \"lasthash\", last.Hash(),\n\t)\n\tblocks := make([]*types.Block, len(results))\n\tfor i, result := range results {\n\t\tblocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)\n\t}\n\tif index, err := d.blockchain.InsertChain(blocks); err != nil {\n\t\tif index < len(results) {\n\t\t\tlog.Debug(\"Downloaded item processing failed\", \"number\", results[index].Header.Number, \"hash\", results[index].Header.Hash(), \"err\", err)\n\t\t} else {\n\t\t\t// The InsertChain method in blockchain.go will sometimes return an out-of-bounds index,\n\t\t\t// when it needs to preprocess blocks to import a sidechain.\n\t\t\t// The importer will put together a new list of blocks to import, which is a superset\n\t\t\t// of the blocks delivered from the downloader, and the indexing will be off.\n\t\t\tlog.Debug(\"Downloaded item processing failed on sidechain import\", \"index\", index, \"err\", err)\n\t\t}\n\t\treturn fmt.Errorf(\"%w: %v\", errInvalidChain, err)\n\t}\n\treturn nil\n}\n\n// processFastSyncContent takes fetch results from the queue and writes them to the\n// database. It also controls the synchronisation of state nodes of the pivot block.\nfunc (d *Downloader) processFastSyncContent() error {\n\t// Start syncing state of the reported head block. This should get us most of\n\t// the state of the pivot block.\n\td.pivotLock.RLock()\n\tsync := d.syncState(d.pivotHeader.Root)\n\td.pivotLock.RUnlock()\n\n\tdefer func() {\n\t\t// The `sync` object is replaced every time the pivot moves. We need to\n\t\t// defer close the very last active one, hence the lazy evaluation vs.\n\t\t// calling defer sync.Cancel() !!!\n\t\tsync.Cancel()\n\t}()\n\n\tcloseOnErr := func(s *stateSync) {\n\t\tif err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled && err != snap.ErrCancelled {\n\t\t\td.queue.Close() // wake up Results\n\t\t}\n\t}\n\tgo closeOnErr(sync)\n\n\t// To cater for moving pivot points, track the pivot block and subsequently\n\t// accumulated download results separately.\n\tvar (\n\t\toldPivot *fetchResult   // Locked in pivot block, might change eventually\n\t\toldTail  []*fetchResult // Downloaded content after the pivot\n\t)\n\tfor {\n\t\t// Wait for the next batch of downloaded data to be available, and if the pivot\n\t\t// block became stale, move the goalpost\n\t\tresults := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness\n\t\tif len(results) == 0 {\n\t\t\t// If pivot sync is done, stop\n\t\t\tif oldPivot == nil {\n\t\t\t\treturn sync.Cancel()\n\t\t\t}\n\t\t\t// If sync failed, stop\n\t\t\tselect {\n\t\t\tcase <-d.cancelCh:\n\t\t\t\tsync.Cancel()\n\t\t\t\treturn errCanceled\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t\tif d.chainInsertHook != nil {\n\t\t\td.chainInsertHook(results)\n\t\t}\n\t\t// If we haven't downloaded the pivot block yet, check pivot staleness\n\t\t// notifications from the header downloader\n\t\td.pivotLock.RLock()\n\t\tpivot := d.pivotHeader\n\t\td.pivotLock.RUnlock()\n\n\t\tif oldPivot == nil {\n\t\t\tif pivot.Root != sync.root {\n\t\t\t\tsync.Cancel()\n\t\t\t\tsync = d.syncState(pivot.Root)\n\n\t\t\t\tgo closeOnErr(sync)\n\t\t\t}\n\t\t} else {\n\t\t\tresults = append(append([]*fetchResult{oldPivot}, oldTail...), results...)\n\t\t}\n\t\t// Split around the pivot block and process the two sides via fast/full sync\n\t\tif atomic.LoadInt32(&d.committed) == 0 {\n\t\t\tlatest := results[len(results)-1].Header\n\t\t\t// If the height is above the pivot block by 2 sets, it means the pivot\n\t\t\t// become stale in the network and it was garbage collected, move to a\n\t\t\t// new pivot.\n\t\t\t//\n\t\t\t// Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those\n\t\t\t// need to be taken into account, otherwise we're detecting the pivot move\n\t\t\t// late and will drop peers due to unavailable state!!!\n\t\t\tif height := latest.Number.Uint64(); height >= pivot.Number.Uint64()+2*uint64(fsMinFullBlocks)-uint64(reorgProtHeaderDelay) {\n\t\t\t\tlog.Warn(\"Pivot became stale, moving\", \"old\", pivot.Number.Uint64(), \"new\", height-uint64(fsMinFullBlocks)+uint64(reorgProtHeaderDelay))\n\t\t\t\tpivot = results[len(results)-1-fsMinFullBlocks+reorgProtHeaderDelay].Header // must exist as lower old pivot is uncommitted\n\n\t\t\t\td.pivotLock.Lock()\n\t\t\t\td.pivotHeader = pivot\n\t\t\t\td.pivotLock.Unlock()\n\n\t\t\t\t// Write out the pivot into the database so a rollback beyond it will\n\t\t\t\t// reenable fast sync\n\t\t\t\trawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64())\n\t\t\t}\n\t\t}\n\t\tP, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results)\n\t\tif err := d.commitFastSyncData(beforeP, sync); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif P != nil {\n\t\t\t// If new pivot block found, cancel old state retrieval and restart\n\t\t\tif oldPivot != P {\n\t\t\t\tsync.Cancel()\n\t\t\t\tsync = d.syncState(P.Header.Root)\n\n\t\t\t\tgo closeOnErr(sync)\n\t\t\t\toldPivot = P\n\t\t\t}\n\t\t\t// Wait for completion, occasionally checking for pivot staleness\n\t\t\tselect {\n\t\t\tcase <-sync.done:\n\t\t\t\tif sync.err != nil {\n\t\t\t\t\treturn sync.err\n\t\t\t\t}\n\t\t\t\tif err := d.commitPivotBlock(P); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\toldPivot = nil\n\n\t\t\tcase <-time.After(time.Second):\n\t\t\t\toldTail = afterP\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t// Fast sync done, pivot commit done, full import\n\t\tif err := d.importBlockResults(afterP); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\nfunc splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {\n\tif len(results) == 0 {\n\t\treturn nil, nil, nil\n\t}\n\tif lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot {\n\t\t// the pivot is somewhere in the future\n\t\treturn nil, results, nil\n\t}\n\t// This can also be optimized, but only happens very seldom\n\tfor _, result := range results {\n\t\tnum := result.Header.Number.Uint64()\n\t\tswitch {\n\t\tcase num < pivot:\n\t\t\tbefore = append(before, result)\n\t\tcase num == pivot:\n\t\t\tp = result\n\t\tdefault:\n\t\t\tafter = append(after, result)\n\t\t}\n\t}\n\treturn p, before, after\n}\n\nfunc (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error {\n\t// Check for any early termination requests\n\tif len(results) == 0 {\n\t\treturn nil\n\t}\n\tselect {\n\tcase <-d.quitCh:\n\t\treturn errCancelContentProcessing\n\tcase <-stateSync.done:\n\t\tif err := stateSync.Wait(); err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t}\n\t// Retrieve the a batch of results to import\n\tfirst, last := results[0].Header, results[len(results)-1].Header\n\tlog.Debug(\"Inserting fast-sync blocks\", \"items\", len(results),\n\t\t\"firstnum\", first.Number, \"firsthash\", first.Hash(),\n\t\t\"lastnumn\", last.Number, \"lasthash\", last.Hash(),\n\t)\n\tblocks := make([]*types.Block, len(results))\n\treceipts := make([]types.Receipts, len(results))\n\tfor i, result := range results {\n\t\tblocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)\n\t\treceipts[i] = result.Receipts\n\t}\n\tif index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil {\n\t\tlog.Debug(\"Downloaded item processing failed\", \"number\", results[index].Header.Number, \"hash\", results[index].Header.Hash(), \"err\", err)\n\t\treturn fmt.Errorf(\"%w: %v\", errInvalidChain, err)\n\t}\n\treturn nil\n}\n\nfunc (d *Downloader) commitPivotBlock(result *fetchResult) error {\n\tblock := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)\n\tlog.Debug(\"Committing fast sync pivot as new head\", \"number\", block.Number(), \"hash\", block.Hash())\n\n\t// Commit the pivot block as the new head, will require full sync from here on\n\tif _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil {\n\t\treturn err\n\t}\n\tif err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil {\n\t\treturn err\n\t}\n\tatomic.StoreInt32(&d.committed, 1)\n\n\t// If we had a bloom filter for the state sync, deallocate it now. Note, we only\n\t// deallocate internally, but keep the empty wrapper. This ensures that if we do\n\t// a rollback after committing the pivot and restarting fast sync, we don't end\n\t// up using a nil bloom. Empty bloom is fine, it just returns that it does not\n\t// have the info we need, so reach down to the database instead.\n\tif d.stateBloom != nil {\n\t\td.stateBloom.Close()\n\t}\n\treturn nil\n}\n\n// DeliverHeaders injects a new batch of block headers received from a remote\n// node into the download schedule.\nfunc (d *Downloader) DeliverHeaders(id string, headers []*types.Header) error {\n\treturn d.deliver(d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)\n}\n\n// DeliverBodies injects a new batch of block bodies received from a remote node.\nfunc (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) error {\n\treturn d.deliver(d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter)\n}\n\n// DeliverReceipts injects a new batch of receipts received from a remote node.\nfunc (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) error {\n\treturn d.deliver(d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter)\n}\n\n// DeliverNodeData injects a new batch of node state data received from a remote node.\nfunc (d *Downloader) DeliverNodeData(id string, data [][]byte) error {\n\treturn d.deliver(d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter)\n}\n\n// DeliverSnapPacket is invoked from a peer's message handler when it transmits a\n// data packet for the local node to consume.\nfunc (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error {\n\tswitch packet := packet.(type) {\n\tcase *snap.AccountRangePacket:\n\t\thashes, accounts, err := packet.Unpack()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof)\n\n\tcase *snap.StorageRangesPacket:\n\t\thashset, slotset := packet.Unpack()\n\t\treturn d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof)\n\n\tcase *snap.ByteCodesPacket:\n\t\treturn d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes)\n\n\tcase *snap.TrieNodesPacket:\n\t\treturn d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes)\n\n\tdefault:\n\t\treturn fmt.Errorf(\"unexpected snap packet type: %T\", packet)\n\t}\n}\n\n// deliver injects a new batch of data received from a remote node.\nfunc (d *Downloader) deliver(destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) {\n\t// Update the delivery metrics for both good and failed deliveries\n\tinMeter.Mark(int64(packet.Items()))\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tdropMeter.Mark(int64(packet.Items()))\n\t\t}\n\t}()\n\t// Deliver or abort if the sync is canceled while queuing\n\td.cancelLock.RLock()\n\tcancel := d.cancelCh\n\td.cancelLock.RUnlock()\n\tif cancel == nil {\n\t\treturn errNoSyncActive\n\t}\n\tselect {\n\tcase destCh <- packet:\n\t\treturn nil\n\tcase <-cancel:\n\t\treturn errNoSyncActive\n\t}\n}\n\n// qosTuner is the quality of service tuning loop that occasionally gathers the\n// peer latency statistics and updates the estimated request round trip time.\nfunc (d *Downloader) qosTuner() {\n\tfor {\n\t\t// Retrieve the current median RTT and integrate into the previoust target RTT\n\t\trtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))\n\t\tatomic.StoreUint64(&d.rttEstimate, uint64(rtt))\n\n\t\t// A new RTT cycle passed, increase our confidence in the estimated RTT\n\t\tconf := atomic.LoadUint64(&d.rttConfidence)\n\t\tconf = conf + (1000000-conf)/2\n\t\tatomic.StoreUint64(&d.rttConfidence, conf)\n\n\t\t// Log the new QoS values and sleep until the next RTT\n\t\tlog.Debug(\"Recalculated downloader QoS values\", \"rtt\", rtt, \"confidence\", float64(conf)/1000000.0, \"ttl\", d.requestTTL())\n\t\tselect {\n\t\tcase <-d.quitCh:\n\t\t\treturn\n\t\tcase <-time.After(rtt):\n\t\t}\n\t}\n}\n\n// qosReduceConfidence is meant to be called when a new peer joins the downloader's\n// peer set, needing to reduce the confidence we have in out QoS estimates.\nfunc (d *Downloader) qosReduceConfidence() {\n\t// If we have a single peer, confidence is always 1\n\tpeers := uint64(d.peers.Len())\n\tif peers == 0 {\n\t\t// Ensure peer connectivity races don't catch us off guard\n\t\treturn\n\t}\n\tif peers == 1 {\n\t\tatomic.StoreUint64(&d.rttConfidence, 1000000)\n\t\treturn\n\t}\n\t// If we have a ton of peers, don't drop confidence)\n\tif peers >= uint64(qosConfidenceCap) {\n\t\treturn\n\t}\n\t// Otherwise drop the confidence factor\n\tconf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers\n\tif float64(conf)/1000000 < rttMinConfidence {\n\t\tconf = uint64(rttMinConfidence * 1000000)\n\t}\n\tatomic.StoreUint64(&d.rttConfidence, conf)\n\n\trtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))\n\tlog.Debug(\"Relaxed downloader QoS values\", \"rtt\", rtt, \"confidence\", float64(conf)/1000000.0, \"ttl\", d.requestTTL())\n}\n\n// requestRTT returns the current target round trip time for a download request\n// to complete in.\n//\n// Note, the returned RTT is .9 of the actually estimated RTT. The reason is that\n// the downloader tries to adapt queries to the RTT, so multiple RTT values can\n// be adapted to, but smaller ones are preferred (stabler download stream).\nfunc (d *Downloader) requestRTT() time.Duration {\n\treturn time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10\n}\n\n// requestTTL returns the current timeout allowance for a single download request\n// to finish under.\nfunc (d *Downloader) requestTTL() time.Duration {\n\tvar (\n\t\trtt  = time.Duration(atomic.LoadUint64(&d.rttEstimate))\n\t\tconf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0\n\t)\n\tttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf)\n\tif ttl > ttlLimit {\n\t\tttl = ttlLimit\n\t}\n\treturn ttl\n}\n"
  },
  {
    "path": "eth/downloader/downloader_test.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage downloader\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\n// Reduce some of the parameters to make the tester faster.\nfunc init() {\n\tfullMaxForkAncestry = 10000\n\tlightMaxForkAncestry = 10000\n\tblockCacheMaxItems = 1024\n\tfsHeaderContCheck = 500 * time.Millisecond\n}\n\n// downloadTester is a test simulator for mocking out local block chain.\ntype downloadTester struct {\n\tdownloader *Downloader\n\n\tgenesis *types.Block   // Genesis blocks used by the tester and peers\n\tstateDb ethdb.Database // Database used by the tester for syncing from peers\n\tpeerDb  ethdb.Database // Database of the peers containing all data\n\tpeers   map[string]*downloadTesterPeer\n\n\townHashes   []common.Hash                  // Hash chain belonging to the tester\n\townHeaders  map[common.Hash]*types.Header  // Headers belonging to the tester\n\townBlocks   map[common.Hash]*types.Block   // Blocks belonging to the tester\n\townReceipts map[common.Hash]types.Receipts // Receipts belonging to the tester\n\townChainTd  map[common.Hash]*big.Int       // Total difficulties of the blocks in the local chain\n\n\tancientHeaders  map[common.Hash]*types.Header  // Ancient headers belonging to the tester\n\tancientBlocks   map[common.Hash]*types.Block   // Ancient blocks belonging to the tester\n\tancientReceipts map[common.Hash]types.Receipts // Ancient receipts belonging to the tester\n\tancientChainTd  map[common.Hash]*big.Int       // Ancient total difficulties of the blocks in the local chain\n\n\tlock sync.RWMutex\n}\n\n// newTester creates a new downloader test mocker.\nfunc newTester() *downloadTester {\n\ttester := &downloadTester{\n\t\tgenesis:     testGenesis,\n\t\tpeerDb:      testDB,\n\t\tpeers:       make(map[string]*downloadTesterPeer),\n\t\townHashes:   []common.Hash{testGenesis.Hash()},\n\t\townHeaders:  map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},\n\t\townBlocks:   map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},\n\t\townReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},\n\t\townChainTd:  map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},\n\n\t\t// Initialize ancient store with test genesis block\n\t\tancientHeaders:  map[common.Hash]*types.Header{testGenesis.Hash(): testGenesis.Header()},\n\t\tancientBlocks:   map[common.Hash]*types.Block{testGenesis.Hash(): testGenesis},\n\t\tancientReceipts: map[common.Hash]types.Receipts{testGenesis.Hash(): nil},\n\t\tancientChainTd:  map[common.Hash]*big.Int{testGenesis.Hash(): testGenesis.Difficulty()},\n\t}\n\ttester.stateDb = rawdb.NewMemoryDatabase()\n\ttester.stateDb.Put(testGenesis.Root().Bytes(), []byte{0x00})\n\n\ttester.downloader = New(0, tester.stateDb, trie.NewSyncBloom(1, tester.stateDb), new(event.TypeMux), tester, nil, tester.dropPeer)\n\treturn tester\n}\n\n// terminate aborts any operations on the embedded downloader and releases all\n// held resources.\nfunc (dl *downloadTester) terminate() {\n\tdl.downloader.Terminate()\n}\n\n// sync starts synchronizing with a remote peer, blocking until it completes.\nfunc (dl *downloadTester) sync(id string, td *big.Int, mode SyncMode) error {\n\tdl.lock.RLock()\n\thash := dl.peers[id].chain.headBlock().Hash()\n\t// If no particular TD was requested, load from the peer's blockchain\n\tif td == nil {\n\t\ttd = dl.peers[id].chain.td(hash)\n\t}\n\tdl.lock.RUnlock()\n\n\t// Synchronise with the chosen peer and ensure proper cleanup afterwards\n\terr := dl.downloader.synchronise(id, hash, td, mode)\n\tselect {\n\tcase <-dl.downloader.cancelCh:\n\t\t// Ok, downloader fully cancelled after sync cycle\n\tdefault:\n\t\t// Downloader is still accepting packets, can block a peer up\n\t\tpanic(\"downloader active post sync cycle\") // panic will be caught by tester\n\t}\n\treturn err\n}\n\n// HasHeader checks if a header is present in the testers canonical chain.\nfunc (dl *downloadTester) HasHeader(hash common.Hash, number uint64) bool {\n\treturn dl.GetHeaderByHash(hash) != nil\n}\n\n// HasBlock checks if a block is present in the testers canonical chain.\nfunc (dl *downloadTester) HasBlock(hash common.Hash, number uint64) bool {\n\treturn dl.GetBlockByHash(hash) != nil\n}\n\n// HasFastBlock checks if a block is present in the testers canonical chain.\nfunc (dl *downloadTester) HasFastBlock(hash common.Hash, number uint64) bool {\n\tdl.lock.RLock()\n\tdefer dl.lock.RUnlock()\n\n\tif _, ok := dl.ancientReceipts[hash]; ok {\n\t\treturn true\n\t}\n\t_, ok := dl.ownReceipts[hash]\n\treturn ok\n}\n\n// GetHeader retrieves a header from the testers canonical chain.\nfunc (dl *downloadTester) GetHeaderByHash(hash common.Hash) *types.Header {\n\tdl.lock.RLock()\n\tdefer dl.lock.RUnlock()\n\treturn dl.getHeaderByHash(hash)\n}\n\n// getHeaderByHash returns the header if found either within ancients or own blocks)\n// This method assumes that the caller holds at least the read-lock (dl.lock)\nfunc (dl *downloadTester) getHeaderByHash(hash common.Hash) *types.Header {\n\theader := dl.ancientHeaders[hash]\n\tif header != nil {\n\t\treturn header\n\t}\n\treturn dl.ownHeaders[hash]\n}\n\n// GetBlock retrieves a block from the testers canonical chain.\nfunc (dl *downloadTester) GetBlockByHash(hash common.Hash) *types.Block {\n\tdl.lock.RLock()\n\tdefer dl.lock.RUnlock()\n\n\tblock := dl.ancientBlocks[hash]\n\tif block != nil {\n\t\treturn block\n\t}\n\treturn dl.ownBlocks[hash]\n}\n\n// CurrentHeader retrieves the current head header from the canonical chain.\nfunc (dl *downloadTester) CurrentHeader() *types.Header {\n\tdl.lock.RLock()\n\tdefer dl.lock.RUnlock()\n\n\tfor i := len(dl.ownHashes) - 1; i >= 0; i-- {\n\t\tif header := dl.ancientHeaders[dl.ownHashes[i]]; header != nil {\n\t\t\treturn header\n\t\t}\n\t\tif header := dl.ownHeaders[dl.ownHashes[i]]; header != nil {\n\t\t\treturn header\n\t\t}\n\t}\n\treturn dl.genesis.Header()\n}\n\n// CurrentBlock retrieves the current head block from the canonical chain.\nfunc (dl *downloadTester) CurrentBlock() *types.Block {\n\tdl.lock.RLock()\n\tdefer dl.lock.RUnlock()\n\n\tfor i := len(dl.ownHashes) - 1; i >= 0; i-- {\n\t\tif block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {\n\t\t\tif _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {\n\t\t\t\treturn block\n\t\t\t}\n\t\t\treturn block\n\t\t}\n\t\tif block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {\n\t\t\tif _, err := dl.stateDb.Get(block.Root().Bytes()); err == nil {\n\t\t\t\treturn block\n\t\t\t}\n\t\t}\n\t}\n\treturn dl.genesis\n}\n\n// CurrentFastBlock retrieves the current head fast-sync block from the canonical chain.\nfunc (dl *downloadTester) CurrentFastBlock() *types.Block {\n\tdl.lock.RLock()\n\tdefer dl.lock.RUnlock()\n\n\tfor i := len(dl.ownHashes) - 1; i >= 0; i-- {\n\t\tif block := dl.ancientBlocks[dl.ownHashes[i]]; block != nil {\n\t\t\treturn block\n\t\t}\n\t\tif block := dl.ownBlocks[dl.ownHashes[i]]; block != nil {\n\t\t\treturn block\n\t\t}\n\t}\n\treturn dl.genesis\n}\n\n// FastSyncCommitHead manually sets the head block to a given hash.\nfunc (dl *downloadTester) FastSyncCommitHead(hash common.Hash) error {\n\t// For now only check that the state trie is correct\n\tif block := dl.GetBlockByHash(hash); block != nil {\n\t\t_, err := trie.NewSecure(block.Root(), trie.NewDatabase(dl.stateDb))\n\t\treturn err\n\t}\n\treturn fmt.Errorf(\"non existent block: %x\", hash[:4])\n}\n\n// GetTd retrieves the block's total difficulty from the canonical chain.\nfunc (dl *downloadTester) GetTd(hash common.Hash, number uint64) *big.Int {\n\tdl.lock.RLock()\n\tdefer dl.lock.RUnlock()\n\n\treturn dl.getTd(hash)\n}\n\n// getTd retrieves the block's total difficulty if found either within\n// ancients or own blocks).\n// This method assumes that the caller holds at least the read-lock (dl.lock)\nfunc (dl *downloadTester) getTd(hash common.Hash) *big.Int {\n\tif td := dl.ancientChainTd[hash]; td != nil {\n\t\treturn td\n\t}\n\treturn dl.ownChainTd[hash]\n}\n\n// InsertHeaderChain injects a new batch of headers into the simulated chain.\nfunc (dl *downloadTester) InsertHeaderChain(headers []*types.Header, checkFreq int) (i int, err error) {\n\tdl.lock.Lock()\n\tdefer dl.lock.Unlock()\n\t// Do a quick check, as the blockchain.InsertHeaderChain doesn't insert anything in case of errors\n\tif dl.getHeaderByHash(headers[0].ParentHash) == nil {\n\t\treturn 0, fmt.Errorf(\"InsertHeaderChain: unknown parent at first position, parent of number %d\", headers[0].Number)\n\t}\n\tvar hashes []common.Hash\n\tfor i := 1; i < len(headers); i++ {\n\t\thash := headers[i-1].Hash()\n\t\tif headers[i].ParentHash != headers[i-1].Hash() {\n\t\t\treturn i, fmt.Errorf(\"non-contiguous import at position %d\", i)\n\t\t}\n\t\thashes = append(hashes, hash)\n\t}\n\thashes = append(hashes, headers[len(headers)-1].Hash())\n\t// Do a full insert if pre-checks passed\n\tfor i, header := range headers {\n\t\thash := hashes[i]\n\t\tif dl.getHeaderByHash(hash) != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif dl.getHeaderByHash(header.ParentHash) == nil {\n\t\t\t// This _should_ be impossible, due to precheck and induction\n\t\t\treturn i, fmt.Errorf(\"InsertHeaderChain: unknown parent at position %d\", i)\n\t\t}\n\t\tdl.ownHashes = append(dl.ownHashes, hash)\n\t\tdl.ownHeaders[hash] = header\n\n\t\ttd := dl.getTd(header.ParentHash)\n\t\tdl.ownChainTd[hash] = new(big.Int).Add(td, header.Difficulty)\n\t}\n\treturn len(headers), nil\n}\n\n// InsertChain injects a new batch of blocks into the simulated chain.\nfunc (dl *downloadTester) InsertChain(blocks types.Blocks) (i int, err error) {\n\tdl.lock.Lock()\n\tdefer dl.lock.Unlock()\n\tfor i, block := range blocks {\n\t\tif parent, ok := dl.ownBlocks[block.ParentHash()]; !ok {\n\t\t\treturn i, fmt.Errorf(\"InsertChain: unknown parent at position %d / %d\", i, len(blocks))\n\t\t} else if _, err := dl.stateDb.Get(parent.Root().Bytes()); err != nil {\n\t\t\treturn i, fmt.Errorf(\"InsertChain: unknown parent state %x: %v\", parent.Root(), err)\n\t\t}\n\t\tif hdr := dl.getHeaderByHash(block.Hash()); hdr == nil {\n\t\t\tdl.ownHashes = append(dl.ownHashes, block.Hash())\n\t\t\tdl.ownHeaders[block.Hash()] = block.Header()\n\t\t}\n\t\tdl.ownBlocks[block.Hash()] = block\n\t\tdl.ownReceipts[block.Hash()] = make(types.Receipts, 0)\n\t\tdl.stateDb.Put(block.Root().Bytes(), []byte{0x00})\n\t\ttd := dl.getTd(block.ParentHash())\n\t\tdl.ownChainTd[block.Hash()] = new(big.Int).Add(td, block.Difficulty())\n\t}\n\treturn len(blocks), nil\n}\n\n// InsertReceiptChain injects a new batch of receipts into the simulated chain.\nfunc (dl *downloadTester) InsertReceiptChain(blocks types.Blocks, receipts []types.Receipts, ancientLimit uint64) (i int, err error) {\n\tdl.lock.Lock()\n\tdefer dl.lock.Unlock()\n\n\tfor i := 0; i < len(blocks) && i < len(receipts); i++ {\n\t\tif _, ok := dl.ownHeaders[blocks[i].Hash()]; !ok {\n\t\t\treturn i, errors.New(\"unknown owner\")\n\t\t}\n\t\tif _, ok := dl.ancientBlocks[blocks[i].ParentHash()]; !ok {\n\t\t\tif _, ok := dl.ownBlocks[blocks[i].ParentHash()]; !ok {\n\t\t\t\treturn i, errors.New(\"InsertReceiptChain: unknown parent\")\n\t\t\t}\n\t\t}\n\t\tif blocks[i].NumberU64() <= ancientLimit {\n\t\t\tdl.ancientBlocks[blocks[i].Hash()] = blocks[i]\n\t\t\tdl.ancientReceipts[blocks[i].Hash()] = receipts[i]\n\n\t\t\t// Migrate from active db to ancient db\n\t\t\tdl.ancientHeaders[blocks[i].Hash()] = blocks[i].Header()\n\t\t\tdl.ancientChainTd[blocks[i].Hash()] = new(big.Int).Add(dl.ancientChainTd[blocks[i].ParentHash()], blocks[i].Difficulty())\n\t\t\tdelete(dl.ownHeaders, blocks[i].Hash())\n\t\t\tdelete(dl.ownChainTd, blocks[i].Hash())\n\t\t} else {\n\t\t\tdl.ownBlocks[blocks[i].Hash()] = blocks[i]\n\t\t\tdl.ownReceipts[blocks[i].Hash()] = receipts[i]\n\t\t}\n\t}\n\treturn len(blocks), nil\n}\n\n// SetHead rewinds the local chain to a new head.\nfunc (dl *downloadTester) SetHead(head uint64) error {\n\tdl.lock.Lock()\n\tdefer dl.lock.Unlock()\n\n\t// Find the hash of the head to reset to\n\tvar hash common.Hash\n\tfor h, header := range dl.ownHeaders {\n\t\tif header.Number.Uint64() == head {\n\t\t\thash = h\n\t\t}\n\t}\n\tfor h, header := range dl.ancientHeaders {\n\t\tif header.Number.Uint64() == head {\n\t\t\thash = h\n\t\t}\n\t}\n\tif hash == (common.Hash{}) {\n\t\treturn fmt.Errorf(\"unknown head to set: %d\", head)\n\t}\n\t// Find the offset in the header chain\n\tvar offset int\n\tfor o, h := range dl.ownHashes {\n\t\tif h == hash {\n\t\t\toffset = o\n\t\t\tbreak\n\t\t}\n\t}\n\t// Remove all the hashes and associated data afterwards\n\tfor i := offset + 1; i < len(dl.ownHashes); i++ {\n\t\tdelete(dl.ownChainTd, dl.ownHashes[i])\n\t\tdelete(dl.ownHeaders, dl.ownHashes[i])\n\t\tdelete(dl.ownReceipts, dl.ownHashes[i])\n\t\tdelete(dl.ownBlocks, dl.ownHashes[i])\n\n\t\tdelete(dl.ancientChainTd, dl.ownHashes[i])\n\t\tdelete(dl.ancientHeaders, dl.ownHashes[i])\n\t\tdelete(dl.ancientReceipts, dl.ownHashes[i])\n\t\tdelete(dl.ancientBlocks, dl.ownHashes[i])\n\t}\n\tdl.ownHashes = dl.ownHashes[:offset+1]\n\treturn nil\n}\n\n// Rollback removes some recently added elements from the chain.\nfunc (dl *downloadTester) Rollback(hashes []common.Hash) {\n}\n\n// newPeer registers a new block download source into the downloader.\nfunc (dl *downloadTester) newPeer(id string, version uint, chain *testChain) error {\n\tdl.lock.Lock()\n\tdefer dl.lock.Unlock()\n\n\tpeer := &downloadTesterPeer{dl: dl, id: id, chain: chain}\n\tdl.peers[id] = peer\n\treturn dl.downloader.RegisterPeer(id, version, peer)\n}\n\n// dropPeer simulates a hard peer removal from the connection pool.\nfunc (dl *downloadTester) dropPeer(id string) {\n\tdl.lock.Lock()\n\tdefer dl.lock.Unlock()\n\n\tdelete(dl.peers, id)\n\tdl.downloader.UnregisterPeer(id)\n}\n\ntype downloadTesterPeer struct {\n\tdl            *downloadTester\n\tid            string\n\tchain         *testChain\n\tmissingStates map[common.Hash]bool // State entries that fast sync should not return\n}\n\n// Head constructs a function to retrieve a peer's current head hash\n// and total difficulty.\nfunc (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) {\n\tb := dlp.chain.headBlock()\n\treturn b.Hash(), dlp.chain.td(b.Hash())\n}\n\n// RequestHeadersByHash constructs a GetBlockHeaders function based on a hashed\n// origin; associated with a particular peer in the download tester. The returned\n// function can be used to retrieve batches of headers from the particular peer.\nfunc (dlp *downloadTesterPeer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {\n\tresult := dlp.chain.headersByHash(origin, amount, skip, reverse)\n\tgo dlp.dl.downloader.DeliverHeaders(dlp.id, result)\n\treturn nil\n}\n\n// RequestHeadersByNumber constructs a GetBlockHeaders function based on a numbered\n// origin; associated with a particular peer in the download tester. The returned\n// function can be used to retrieve batches of headers from the particular peer.\nfunc (dlp *downloadTesterPeer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {\n\tresult := dlp.chain.headersByNumber(origin, amount, skip, reverse)\n\tgo dlp.dl.downloader.DeliverHeaders(dlp.id, result)\n\treturn nil\n}\n\n// RequestBodies constructs a getBlockBodies method associated with a particular\n// peer in the download tester. The returned function can be used to retrieve\n// batches of block bodies from the particularly requested peer.\nfunc (dlp *downloadTesterPeer) RequestBodies(hashes []common.Hash) error {\n\ttxs, uncles := dlp.chain.bodies(hashes)\n\tgo dlp.dl.downloader.DeliverBodies(dlp.id, txs, uncles)\n\treturn nil\n}\n\n// RequestReceipts constructs a getReceipts method associated with a particular\n// peer in the download tester. The returned function can be used to retrieve\n// batches of block receipts from the particularly requested peer.\nfunc (dlp *downloadTesterPeer) RequestReceipts(hashes []common.Hash) error {\n\treceipts := dlp.chain.receipts(hashes)\n\tgo dlp.dl.downloader.DeliverReceipts(dlp.id, receipts)\n\treturn nil\n}\n\n// RequestNodeData constructs a getNodeData method associated with a particular\n// peer in the download tester. The returned function can be used to retrieve\n// batches of node state data from the particularly requested peer.\nfunc (dlp *downloadTesterPeer) RequestNodeData(hashes []common.Hash) error {\n\tdlp.dl.lock.RLock()\n\tdefer dlp.dl.lock.RUnlock()\n\n\tresults := make([][]byte, 0, len(hashes))\n\tfor _, hash := range hashes {\n\t\tif data, err := dlp.dl.peerDb.Get(hash.Bytes()); err == nil {\n\t\t\tif !dlp.missingStates[hash] {\n\t\t\t\tresults = append(results, data)\n\t\t\t}\n\t\t}\n\t}\n\tgo dlp.dl.downloader.DeliverNodeData(dlp.id, results)\n\treturn nil\n}\n\n// assertOwnChain checks if the local chain contains the correct number of items\n// of the various chain components.\nfunc assertOwnChain(t *testing.T, tester *downloadTester, length int) {\n\t// Mark this method as a helper to report errors at callsite, not in here\n\tt.Helper()\n\n\tassertOwnForkedChain(t, tester, 1, []int{length})\n}\n\n// assertOwnForkedChain checks if the local forked chain contains the correct\n// number of items of the various chain components.\nfunc assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, lengths []int) {\n\t// Mark this method as a helper to report errors at callsite, not in here\n\tt.Helper()\n\n\t// Initialize the counters for the first fork\n\theaders, blocks, receipts := lengths[0], lengths[0], lengths[0]\n\n\t// Update the counters for each subsequent fork\n\tfor _, length := range lengths[1:] {\n\t\theaders += length - common\n\t\tblocks += length - common\n\t\treceipts += length - common\n\t}\n\tif tester.downloader.getMode() == LightSync {\n\t\tblocks, receipts = 1, 1\n\t}\n\tif hs := len(tester.ownHeaders) + len(tester.ancientHeaders) - 1; hs != headers {\n\t\tt.Fatalf(\"synchronised headers mismatch: have %v, want %v\", hs, headers)\n\t}\n\tif bs := len(tester.ownBlocks) + len(tester.ancientBlocks) - 1; bs != blocks {\n\t\tt.Fatalf(\"synchronised blocks mismatch: have %v, want %v\", bs, blocks)\n\t}\n\tif rs := len(tester.ownReceipts) + len(tester.ancientReceipts) - 1; rs != receipts {\n\t\tt.Fatalf(\"synchronised receipts mismatch: have %v, want %v\", rs, receipts)\n\t}\n}\n\nfunc TestCanonicalSynchronisation64Full(t *testing.T) { testCanonSync(t, 64, FullSync) }\nfunc TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonSync(t, 64, FastSync) }\n\nfunc TestCanonicalSynchronisation65Full(t *testing.T)  { testCanonSync(t, 65, FullSync) }\nfunc TestCanonicalSynchronisation65Fast(t *testing.T)  { testCanonSync(t, 65, FastSync) }\nfunc TestCanonicalSynchronisation65Light(t *testing.T) { testCanonSync(t, 65, LightSync) }\n\nfunc TestCanonicalSynchronisation66Full(t *testing.T)  { testCanonSync(t, 66, FullSync) }\nfunc TestCanonicalSynchronisation66Fast(t *testing.T)  { testCanonSync(t, 66, FastSync) }\nfunc TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, 66, LightSync) }\n\nfunc testCanonSync(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\n\t// Create a small enough block chain to download\n\tchain := testChainBase.shorten(blockCacheMaxItems - 15)\n\ttester.newPeer(\"peer\", protocol, chain)\n\n\t// Synchronise with the peer and make sure all relevant data was retrieved\n\tif err := tester.sync(\"peer\", nil, mode); err != nil {\n\t\tt.Fatalf(\"failed to synchronise blocks: %v\", err)\n\t}\n\tassertOwnChain(t, tester, chain.len())\n}\n\n// Tests that if a large batch of blocks are being downloaded, it is throttled\n// until the cached blocks are retrieved.\nfunc TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) }\nfunc TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) }\n\nfunc TestThrottling65Full(t *testing.T) { testThrottling(t, 65, FullSync) }\nfunc TestThrottling65Fast(t *testing.T) { testThrottling(t, 65, FastSync) }\n\nfunc TestThrottling66Full(t *testing.T) { testThrottling(t, 66, FullSync) }\nfunc TestThrottling66Fast(t *testing.T) { testThrottling(t, 66, FastSync) }\n\nfunc testThrottling(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\ttester := newTester()\n\n\t// Create a long block chain to download and the tester\n\ttargetBlocks := testChainBase.len() - 1\n\ttester.newPeer(\"peer\", protocol, testChainBase)\n\n\t// Wrap the importer to allow stepping\n\tblocked, proceed := uint32(0), make(chan struct{})\n\ttester.downloader.chainInsertHook = func(results []*fetchResult) {\n\t\tatomic.StoreUint32(&blocked, uint32(len(results)))\n\t\t<-proceed\n\t}\n\t// Start a synchronisation concurrently\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\terrc <- tester.sync(\"peer\", nil, mode)\n\t}()\n\t// Iteratively take some blocks, always checking the retrieval count\n\tfor {\n\t\t// Check the retrieval count synchronously (! reason for this ugly block)\n\t\ttester.lock.RLock()\n\t\tretrieved := len(tester.ownBlocks)\n\t\ttester.lock.RUnlock()\n\t\tif retrieved >= targetBlocks+1 {\n\t\t\tbreak\n\t\t}\n\t\t// Wait a bit for sync to throttle itself\n\t\tvar cached, frozen int\n\t\tfor start := time.Now(); time.Since(start) < 3*time.Second; {\n\t\t\ttime.Sleep(25 * time.Millisecond)\n\n\t\t\ttester.lock.Lock()\n\t\t\ttester.downloader.queue.lock.Lock()\n\t\t\ttester.downloader.queue.resultCache.lock.Lock()\n\t\t\t{\n\t\t\t\tcached = tester.downloader.queue.resultCache.countCompleted()\n\t\t\t\tfrozen = int(atomic.LoadUint32(&blocked))\n\t\t\t\tretrieved = len(tester.ownBlocks)\n\t\t\t}\n\t\t\ttester.downloader.queue.resultCache.lock.Unlock()\n\t\t\ttester.downloader.queue.lock.Unlock()\n\t\t\ttester.lock.Unlock()\n\n\t\t\tif cached == blockCacheMaxItems ||\n\t\t\t\tcached == blockCacheMaxItems-reorgProtHeaderDelay ||\n\t\t\t\tretrieved+cached+frozen == targetBlocks+1 ||\n\t\t\t\tretrieved+cached+frozen == targetBlocks+1-reorgProtHeaderDelay {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// Make sure we filled up the cache, then exhaust it\n\t\ttime.Sleep(25 * time.Millisecond) // give it a chance to screw up\n\t\ttester.lock.RLock()\n\t\tretrieved = len(tester.ownBlocks)\n\t\ttester.lock.RUnlock()\n\t\tif cached != blockCacheMaxItems && cached != blockCacheMaxItems-reorgProtHeaderDelay && retrieved+cached+frozen != targetBlocks+1 && retrieved+cached+frozen != targetBlocks+1-reorgProtHeaderDelay {\n\t\t\tt.Fatalf(\"block count mismatch: have %v, want %v (owned %v, blocked %v, target %v)\", cached, blockCacheMaxItems, retrieved, frozen, targetBlocks+1)\n\t\t}\n\n\t\t// Permit the blocked blocks to import\n\t\tif atomic.LoadUint32(&blocked) > 0 {\n\t\t\tatomic.StoreUint32(&blocked, uint32(0))\n\t\t\tproceed <- struct{}{}\n\t\t}\n\t}\n\t// Check that we haven't pulled more blocks than available\n\tassertOwnChain(t, tester, targetBlocks+1)\n\tif err := <-errc; err != nil {\n\t\tt.Fatalf(\"block synchronization failed: %v\", err)\n\t}\n\ttester.terminate()\n\n}\n\n// Tests that simple synchronization against a forked chain works correctly. In\n// this test common ancestor lookup should *not* be short circuited, and a full\n// binary search should be executed.\nfunc TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) }\nfunc TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) }\n\nfunc TestForkedSync65Full(t *testing.T)  { testForkedSync(t, 65, FullSync) }\nfunc TestForkedSync65Fast(t *testing.T)  { testForkedSync(t, 65, FastSync) }\nfunc TestForkedSync65Light(t *testing.T) { testForkedSync(t, 65, LightSync) }\n\nfunc TestForkedSync66Full(t *testing.T)  { testForkedSync(t, 66, FullSync) }\nfunc TestForkedSync66Fast(t *testing.T)  { testForkedSync(t, 66, FastSync) }\nfunc TestForkedSync66Light(t *testing.T) { testForkedSync(t, 66, LightSync) }\n\nfunc testForkedSync(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\n\tchainA := testChainForkLightA.shorten(testChainBase.len() + 80)\n\tchainB := testChainForkLightB.shorten(testChainBase.len() + 80)\n\ttester.newPeer(\"fork A\", protocol, chainA)\n\ttester.newPeer(\"fork B\", protocol, chainB)\n\t// Synchronise with the peer and make sure all blocks were retrieved\n\tif err := tester.sync(\"fork A\", nil, mode); err != nil {\n\t\tt.Fatalf(\"failed to synchronise blocks: %v\", err)\n\t}\n\tassertOwnChain(t, tester, chainA.len())\n\n\t// Synchronise with the second peer and make sure that fork is pulled too\n\tif err := tester.sync(\"fork B\", nil, mode); err != nil {\n\t\tt.Fatalf(\"failed to synchronise blocks: %v\", err)\n\t}\n\tassertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})\n}\n\n// Tests that synchronising against a much shorter but much heavyer fork works\n// corrently and is not dropped.\nfunc TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) }\nfunc TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) }\n\nfunc TestHeavyForkedSync65Full(t *testing.T)  { testHeavyForkedSync(t, 65, FullSync) }\nfunc TestHeavyForkedSync65Fast(t *testing.T)  { testHeavyForkedSync(t, 65, FastSync) }\nfunc TestHeavyForkedSync65Light(t *testing.T) { testHeavyForkedSync(t, 65, LightSync) }\n\nfunc TestHeavyForkedSync66Full(t *testing.T)  { testHeavyForkedSync(t, 66, FullSync) }\nfunc TestHeavyForkedSync66Fast(t *testing.T)  { testHeavyForkedSync(t, 66, FastSync) }\nfunc TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, 66, LightSync) }\n\nfunc testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\n\tchainA := testChainForkLightA.shorten(testChainBase.len() + 80)\n\tchainB := testChainForkHeavy.shorten(testChainBase.len() + 80)\n\ttester.newPeer(\"light\", protocol, chainA)\n\ttester.newPeer(\"heavy\", protocol, chainB)\n\n\t// Synchronise with the peer and make sure all blocks were retrieved\n\tif err := tester.sync(\"light\", nil, mode); err != nil {\n\t\tt.Fatalf(\"failed to synchronise blocks: %v\", err)\n\t}\n\tassertOwnChain(t, tester, chainA.len())\n\n\t// Synchronise with the second peer and make sure that fork is pulled too\n\tif err := tester.sync(\"heavy\", nil, mode); err != nil {\n\t\tt.Fatalf(\"failed to synchronise blocks: %v\", err)\n\t}\n\tassertOwnForkedChain(t, tester, testChainBase.len(), []int{chainA.len(), chainB.len()})\n}\n\n// Tests that chain forks are contained within a certain interval of the current\n// chain head, ensuring that malicious peers cannot waste resources by feeding\n// long dead chains.\nfunc TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) }\nfunc TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) }\n\nfunc TestBoundedForkedSync65Full(t *testing.T)  { testBoundedForkedSync(t, 65, FullSync) }\nfunc TestBoundedForkedSync65Fast(t *testing.T)  { testBoundedForkedSync(t, 65, FastSync) }\nfunc TestBoundedForkedSync65Light(t *testing.T) { testBoundedForkedSync(t, 65, LightSync) }\n\nfunc TestBoundedForkedSync66Full(t *testing.T)  { testBoundedForkedSync(t, 66, FullSync) }\nfunc TestBoundedForkedSync66Fast(t *testing.T)  { testBoundedForkedSync(t, 66, FastSync) }\nfunc TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, 66, LightSync) }\n\nfunc testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\n\tchainA := testChainForkLightA\n\tchainB := testChainForkLightB\n\ttester.newPeer(\"original\", protocol, chainA)\n\ttester.newPeer(\"rewriter\", protocol, chainB)\n\n\t// Synchronise with the peer and make sure all blocks were retrieved\n\tif err := tester.sync(\"original\", nil, mode); err != nil {\n\t\tt.Fatalf(\"failed to synchronise blocks: %v\", err)\n\t}\n\tassertOwnChain(t, tester, chainA.len())\n\n\t// Synchronise with the second peer and ensure that the fork is rejected to being too old\n\tif err := tester.sync(\"rewriter\", nil, mode); err != errInvalidAncestor {\n\t\tt.Fatalf(\"sync failure mismatch: have %v, want %v\", err, errInvalidAncestor)\n\t}\n}\n\n// Tests that chain forks are contained within a certain interval of the current\n// chain head for short but heavy forks too. These are a bit special because they\n// take different ancestor lookup paths.\nfunc TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) }\nfunc TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) }\n\nfunc TestBoundedHeavyForkedSync65Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 65, FullSync) }\nfunc TestBoundedHeavyForkedSync65Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 65, FastSync) }\nfunc TestBoundedHeavyForkedSync65Light(t *testing.T) { testBoundedHeavyForkedSync(t, 65, LightSync) }\n\nfunc TestBoundedHeavyForkedSync66Full(t *testing.T)  { testBoundedHeavyForkedSync(t, 66, FullSync) }\nfunc TestBoundedHeavyForkedSync66Fast(t *testing.T)  { testBoundedHeavyForkedSync(t, 66, FastSync) }\nfunc TestBoundedHeavyForkedSync66Light(t *testing.T) { testBoundedHeavyForkedSync(t, 66, LightSync) }\n\nfunc testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\ttester := newTester()\n\n\t// Create a long enough forked chain\n\tchainA := testChainForkLightA\n\tchainB := testChainForkHeavy\n\ttester.newPeer(\"original\", protocol, chainA)\n\n\t// Synchronise with the peer and make sure all blocks were retrieved\n\tif err := tester.sync(\"original\", nil, mode); err != nil {\n\t\tt.Fatalf(\"failed to synchronise blocks: %v\", err)\n\t}\n\tassertOwnChain(t, tester, chainA.len())\n\n\ttester.newPeer(\"heavy-rewriter\", protocol, chainB)\n\t// Synchronise with the second peer and ensure that the fork is rejected to being too old\n\tif err := tester.sync(\"heavy-rewriter\", nil, mode); err != errInvalidAncestor {\n\t\tt.Fatalf(\"sync failure mismatch: have %v, want %v\", err, errInvalidAncestor)\n\t}\n\ttester.terminate()\n}\n\n// Tests that an inactive downloader will not accept incoming block headers,\n// bodies and receipts.\nfunc TestInactiveDownloader63(t *testing.T) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\n\t// Check that neither block headers nor bodies are accepted\n\tif err := tester.downloader.DeliverHeaders(\"bad peer\", []*types.Header{}); err != errNoSyncActive {\n\t\tt.Errorf(\"error mismatch: have %v, want %v\", err, errNoSyncActive)\n\t}\n\tif err := tester.downloader.DeliverBodies(\"bad peer\", [][]*types.Transaction{}, [][]*types.Header{}); err != errNoSyncActive {\n\t\tt.Errorf(\"error mismatch: have %v, want %v\", err, errNoSyncActive)\n\t}\n\tif err := tester.downloader.DeliverReceipts(\"bad peer\", [][]*types.Receipt{}); err != errNoSyncActive {\n\t\tt.Errorf(\"error mismatch: have %v, want %v\", err, errNoSyncActive)\n\t}\n}\n\n// Tests that a canceled download wipes all previously accumulated state.\nfunc TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) }\nfunc TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) }\n\nfunc TestCancel65Full(t *testing.T)  { testCancel(t, 65, FullSync) }\nfunc TestCancel65Fast(t *testing.T)  { testCancel(t, 65, FastSync) }\nfunc TestCancel65Light(t *testing.T) { testCancel(t, 65, LightSync) }\n\nfunc TestCancel66Full(t *testing.T)  { testCancel(t, 66, FullSync) }\nfunc TestCancel66Fast(t *testing.T)  { testCancel(t, 66, FastSync) }\nfunc TestCancel66Light(t *testing.T) { testCancel(t, 66, LightSync) }\n\nfunc testCancel(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\n\tchain := testChainBase.shorten(MaxHeaderFetch)\n\ttester.newPeer(\"peer\", protocol, chain)\n\n\t// Make sure canceling works with a pristine downloader\n\ttester.downloader.Cancel()\n\tif !tester.downloader.queue.Idle() {\n\t\tt.Errorf(\"download queue not idle\")\n\t}\n\t// Synchronise with the peer, but cancel afterwards\n\tif err := tester.sync(\"peer\", nil, mode); err != nil {\n\t\tt.Fatalf(\"failed to synchronise blocks: %v\", err)\n\t}\n\ttester.downloader.Cancel()\n\tif !tester.downloader.queue.Idle() {\n\t\tt.Errorf(\"download queue not idle\")\n\t}\n}\n\n// Tests that synchronisation from multiple peers works as intended (multi thread sanity test).\nfunc TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) }\nfunc TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) }\n\nfunc TestMultiSynchronisation65Full(t *testing.T)  { testMultiSynchronisation(t, 65, FullSync) }\nfunc TestMultiSynchronisation65Fast(t *testing.T)  { testMultiSynchronisation(t, 65, FastSync) }\nfunc TestMultiSynchronisation65Light(t *testing.T) { testMultiSynchronisation(t, 65, LightSync) }\n\nfunc TestMultiSynchronisation66Full(t *testing.T)  { testMultiSynchronisation(t, 66, FullSync) }\nfunc TestMultiSynchronisation66Fast(t *testing.T)  { testMultiSynchronisation(t, 66, FastSync) }\nfunc TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, 66, LightSync) }\n\nfunc testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\n\t// Create various peers with various parts of the chain\n\ttargetPeers := 8\n\tchain := testChainBase.shorten(targetPeers * 100)\n\n\tfor i := 0; i < targetPeers; i++ {\n\t\tid := fmt.Sprintf(\"peer #%d\", i)\n\t\ttester.newPeer(id, protocol, chain.shorten(chain.len()/(i+1)))\n\t}\n\tif err := tester.sync(\"peer #0\", nil, mode); err != nil {\n\t\tt.Fatalf(\"failed to synchronise blocks: %v\", err)\n\t}\n\tassertOwnChain(t, tester, chain.len())\n}\n\n// Tests that synchronisations behave well in multi-version protocol environments\n// and not wreak havoc on other nodes in the network.\nfunc TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) }\nfunc TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) }\n\nfunc TestMultiProtoSynchronisation65Full(t *testing.T)  { testMultiProtoSync(t, 65, FullSync) }\nfunc TestMultiProtoSynchronisation65Fast(t *testing.T)  { testMultiProtoSync(t, 65, FastSync) }\nfunc TestMultiProtoSynchronisation65Light(t *testing.T) { testMultiProtoSync(t, 65, LightSync) }\n\nfunc TestMultiProtoSynchronisation66Full(t *testing.T)  { testMultiProtoSync(t, 66, FullSync) }\nfunc TestMultiProtoSynchronisation66Fast(t *testing.T)  { testMultiProtoSync(t, 66, FastSync) }\nfunc TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, 66, LightSync) }\n\nfunc testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\n\t// Create a small enough block chain to download\n\tchain := testChainBase.shorten(blockCacheMaxItems - 15)\n\n\t// Create peers of every type\n\ttester.newPeer(\"peer 64\", 64, chain)\n\ttester.newPeer(\"peer 65\", 65, chain)\n\ttester.newPeer(\"peer 66\", 66, chain)\n\n\t// Synchronise with the requested peer and make sure all blocks were retrieved\n\tif err := tester.sync(fmt.Sprintf(\"peer %d\", protocol), nil, mode); err != nil {\n\t\tt.Fatalf(\"failed to synchronise blocks: %v\", err)\n\t}\n\tassertOwnChain(t, tester, chain.len())\n\n\t// Check that no peers have been dropped off\n\tfor _, version := range []int{64, 65, 66} {\n\t\tpeer := fmt.Sprintf(\"peer %d\", version)\n\t\tif _, ok := tester.peers[peer]; !ok {\n\t\t\tt.Errorf(\"%s dropped\", peer)\n\t\t}\n\t}\n}\n\n// Tests that if a block is empty (e.g. header only), no body request should be\n// made, and instead the header should be assembled into a whole block in itself.\nfunc TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) }\nfunc TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) }\n\nfunc TestEmptyShortCircuit65Full(t *testing.T)  { testEmptyShortCircuit(t, 65, FullSync) }\nfunc TestEmptyShortCircuit65Fast(t *testing.T)  { testEmptyShortCircuit(t, 65, FastSync) }\nfunc TestEmptyShortCircuit65Light(t *testing.T) { testEmptyShortCircuit(t, 65, LightSync) }\n\nfunc TestEmptyShortCircuit66Full(t *testing.T)  { testEmptyShortCircuit(t, 66, FullSync) }\nfunc TestEmptyShortCircuit66Fast(t *testing.T)  { testEmptyShortCircuit(t, 66, FastSync) }\nfunc TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, 66, LightSync) }\n\nfunc testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\n\t// Create a block chain to download\n\tchain := testChainBase\n\ttester.newPeer(\"peer\", protocol, chain)\n\n\t// Instrument the downloader to signal body requests\n\tbodiesHave, receiptsHave := int32(0), int32(0)\n\ttester.downloader.bodyFetchHook = func(headers []*types.Header) {\n\t\tatomic.AddInt32(&bodiesHave, int32(len(headers)))\n\t}\n\ttester.downloader.receiptFetchHook = func(headers []*types.Header) {\n\t\tatomic.AddInt32(&receiptsHave, int32(len(headers)))\n\t}\n\t// Synchronise with the peer and make sure all blocks were retrieved\n\tif err := tester.sync(\"peer\", nil, mode); err != nil {\n\t\tt.Fatalf(\"failed to synchronise blocks: %v\", err)\n\t}\n\tassertOwnChain(t, tester, chain.len())\n\n\t// Validate the number of block bodies that should have been requested\n\tbodiesNeeded, receiptsNeeded := 0, 0\n\tfor _, block := range chain.blockm {\n\t\tif mode != LightSync && block != tester.genesis && (len(block.Transactions()) > 0 || len(block.Uncles()) > 0) {\n\t\t\tbodiesNeeded++\n\t\t}\n\t}\n\tfor _, receipt := range chain.receiptm {\n\t\tif mode == FastSync && len(receipt) > 0 {\n\t\t\treceiptsNeeded++\n\t\t}\n\t}\n\tif int(bodiesHave) != bodiesNeeded {\n\t\tt.Errorf(\"body retrieval count mismatch: have %v, want %v\", bodiesHave, bodiesNeeded)\n\t}\n\tif int(receiptsHave) != receiptsNeeded {\n\t\tt.Errorf(\"receipt retrieval count mismatch: have %v, want %v\", receiptsHave, receiptsNeeded)\n\t}\n}\n\n// Tests that headers are enqueued continuously, preventing malicious nodes from\n// stalling the downloader by feeding gapped header chains.\nfunc TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) }\nfunc TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) }\n\nfunc TestMissingHeaderAttack65Full(t *testing.T)  { testMissingHeaderAttack(t, 65, FullSync) }\nfunc TestMissingHeaderAttack65Fast(t *testing.T)  { testMissingHeaderAttack(t, 65, FastSync) }\nfunc TestMissingHeaderAttack65Light(t *testing.T) { testMissingHeaderAttack(t, 65, LightSync) }\n\nfunc TestMissingHeaderAttack66Full(t *testing.T)  { testMissingHeaderAttack(t, 66, FullSync) }\nfunc TestMissingHeaderAttack66Fast(t *testing.T)  { testMissingHeaderAttack(t, 66, FastSync) }\nfunc TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, 66, LightSync) }\n\nfunc testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\n\tchain := testChainBase.shorten(blockCacheMaxItems - 15)\n\tbrokenChain := chain.shorten(chain.len())\n\tdelete(brokenChain.headerm, brokenChain.chain[brokenChain.len()/2])\n\ttester.newPeer(\"attack\", protocol, brokenChain)\n\n\tif err := tester.sync(\"attack\", nil, mode); err == nil {\n\t\tt.Fatalf(\"succeeded attacker synchronisation\")\n\t}\n\t// Synchronise with the valid peer and make sure sync succeeds\n\ttester.newPeer(\"valid\", protocol, chain)\n\tif err := tester.sync(\"valid\", nil, mode); err != nil {\n\t\tt.Fatalf(\"failed to synchronise blocks: %v\", err)\n\t}\n\tassertOwnChain(t, tester, chain.len())\n}\n\n// Tests that if requested headers are shifted (i.e. first is missing), the queue\n// detects the invalid numbering.\nfunc TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) }\nfunc TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) }\n\nfunc TestShiftedHeaderAttack65Full(t *testing.T)  { testShiftedHeaderAttack(t, 65, FullSync) }\nfunc TestShiftedHeaderAttack65Fast(t *testing.T)  { testShiftedHeaderAttack(t, 65, FastSync) }\nfunc TestShiftedHeaderAttack65Light(t *testing.T) { testShiftedHeaderAttack(t, 65, LightSync) }\n\nfunc TestShiftedHeaderAttack66Full(t *testing.T)  { testShiftedHeaderAttack(t, 66, FullSync) }\nfunc TestShiftedHeaderAttack66Fast(t *testing.T)  { testShiftedHeaderAttack(t, 66, FastSync) }\nfunc TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, 66, LightSync) }\n\nfunc testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\n\tchain := testChainBase.shorten(blockCacheMaxItems - 15)\n\n\t// Attempt a full sync with an attacker feeding shifted headers\n\tbrokenChain := chain.shorten(chain.len())\n\tdelete(brokenChain.headerm, brokenChain.chain[1])\n\tdelete(brokenChain.blockm, brokenChain.chain[1])\n\tdelete(brokenChain.receiptm, brokenChain.chain[1])\n\ttester.newPeer(\"attack\", protocol, brokenChain)\n\tif err := tester.sync(\"attack\", nil, mode); err == nil {\n\t\tt.Fatalf(\"succeeded attacker synchronisation\")\n\t}\n\n\t// Synchronise with the valid peer and make sure sync succeeds\n\ttester.newPeer(\"valid\", protocol, chain)\n\tif err := tester.sync(\"valid\", nil, mode); err != nil {\n\t\tt.Fatalf(\"failed to synchronise blocks: %v\", err)\n\t}\n\tassertOwnChain(t, tester, chain.len())\n}\n\n// Tests that upon detecting an invalid header, the recent ones are rolled back\n// for various failure scenarios. Afterwards a full sync is attempted to make\n// sure no state was corrupted.\nfunc TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) }\nfunc TestInvalidHeaderRollback65Fast(t *testing.T) { testInvalidHeaderRollback(t, 65, FastSync) }\nfunc TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, 66, FastSync) }\n\nfunc testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\n\t// Create a small enough block chain to download\n\ttargetBlocks := 3*fsHeaderSafetyNet + 256 + fsMinFullBlocks\n\tchain := testChainBase.shorten(targetBlocks)\n\n\t// Attempt to sync with an attacker that feeds junk during the fast sync phase.\n\t// This should result in the last fsHeaderSafetyNet headers being rolled back.\n\tmissing := fsHeaderSafetyNet + MaxHeaderFetch + 1\n\tfastAttackChain := chain.shorten(chain.len())\n\tdelete(fastAttackChain.headerm, fastAttackChain.chain[missing])\n\ttester.newPeer(\"fast-attack\", protocol, fastAttackChain)\n\n\tif err := tester.sync(\"fast-attack\", nil, mode); err == nil {\n\t\tt.Fatalf(\"succeeded fast attacker synchronisation\")\n\t}\n\tif head := tester.CurrentHeader().Number.Int64(); int(head) > MaxHeaderFetch {\n\t\tt.Errorf(\"rollback head mismatch: have %v, want at most %v\", head, MaxHeaderFetch)\n\t}\n\n\t// Attempt to sync with an attacker that feeds junk during the block import phase.\n\t// This should result in both the last fsHeaderSafetyNet number of headers being\n\t// rolled back, and also the pivot point being reverted to a non-block status.\n\tmissing = 3*fsHeaderSafetyNet + MaxHeaderFetch + 1\n\tblockAttackChain := chain.shorten(chain.len())\n\tdelete(fastAttackChain.headerm, fastAttackChain.chain[missing]) // Make sure the fast-attacker doesn't fill in\n\tdelete(blockAttackChain.headerm, blockAttackChain.chain[missing])\n\ttester.newPeer(\"block-attack\", protocol, blockAttackChain)\n\n\tif err := tester.sync(\"block-attack\", nil, mode); err == nil {\n\t\tt.Fatalf(\"succeeded block attacker synchronisation\")\n\t}\n\tif head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {\n\t\tt.Errorf(\"rollback head mismatch: have %v, want at most %v\", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)\n\t}\n\tif mode == FastSync {\n\t\tif head := tester.CurrentBlock().NumberU64(); head != 0 {\n\t\t\tt.Errorf(\"fast sync pivot block #%d not rolled back\", head)\n\t\t}\n\t}\n\n\t// Attempt to sync with an attacker that withholds promised blocks after the\n\t// fast sync pivot point. This could be a trial to leave the node with a bad\n\t// but already imported pivot block.\n\twithholdAttackChain := chain.shorten(chain.len())\n\ttester.newPeer(\"withhold-attack\", protocol, withholdAttackChain)\n\ttester.downloader.syncInitHook = func(uint64, uint64) {\n\t\tfor i := missing; i < withholdAttackChain.len(); i++ {\n\t\t\tdelete(withholdAttackChain.headerm, withholdAttackChain.chain[i])\n\t\t}\n\t\ttester.downloader.syncInitHook = nil\n\t}\n\tif err := tester.sync(\"withhold-attack\", nil, mode); err == nil {\n\t\tt.Fatalf(\"succeeded withholding attacker synchronisation\")\n\t}\n\tif head := tester.CurrentHeader().Number.Int64(); int(head) > 2*fsHeaderSafetyNet+MaxHeaderFetch {\n\t\tt.Errorf(\"rollback head mismatch: have %v, want at most %v\", head, 2*fsHeaderSafetyNet+MaxHeaderFetch)\n\t}\n\tif mode == FastSync {\n\t\tif head := tester.CurrentBlock().NumberU64(); head != 0 {\n\t\t\tt.Errorf(\"fast sync pivot block #%d not rolled back\", head)\n\t\t}\n\t}\n\n\t// synchronise with the valid peer and make sure sync succeeds. Since the last rollback\n\t// should also disable fast syncing for this process, verify that we did a fresh full\n\t// sync. Note, we can't assert anything about the receipts since we won't purge the\n\t// database of them, hence we can't use assertOwnChain.\n\ttester.newPeer(\"valid\", protocol, chain)\n\tif err := tester.sync(\"valid\", nil, mode); err != nil {\n\t\tt.Fatalf(\"failed to synchronise blocks: %v\", err)\n\t}\n\tif hs := len(tester.ownHeaders); hs != chain.len() {\n\t\tt.Fatalf(\"synchronised headers mismatch: have %v, want %v\", hs, chain.len())\n\t}\n\tif mode != LightSync {\n\t\tif bs := len(tester.ownBlocks); bs != chain.len() {\n\t\t\tt.Fatalf(\"synchronised blocks mismatch: have %v, want %v\", bs, chain.len())\n\t\t}\n\t}\n\ttester.terminate()\n}\n\n// Tests that a peer advertising a high TD doesn't get to stall the downloader\n// afterwards by not sending any useful hashes.\nfunc TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) }\nfunc TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) }\n\nfunc TestHighTDStarvationAttack65Full(t *testing.T)  { testHighTDStarvationAttack(t, 65, FullSync) }\nfunc TestHighTDStarvationAttack65Fast(t *testing.T)  { testHighTDStarvationAttack(t, 65, FastSync) }\nfunc TestHighTDStarvationAttack65Light(t *testing.T) { testHighTDStarvationAttack(t, 65, LightSync) }\n\nfunc TestHighTDStarvationAttack66Full(t *testing.T)  { testHighTDStarvationAttack(t, 66, FullSync) }\nfunc TestHighTDStarvationAttack66Fast(t *testing.T)  { testHighTDStarvationAttack(t, 66, FastSync) }\nfunc TestHighTDStarvationAttack66Light(t *testing.T) { testHighTDStarvationAttack(t, 66, LightSync) }\n\nfunc testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\n\tchain := testChainBase.shorten(1)\n\ttester.newPeer(\"attack\", protocol, chain)\n\tif err := tester.sync(\"attack\", big.NewInt(1000000), mode); err != errStallingPeer {\n\t\tt.Fatalf(\"synchronisation error mismatch: have %v, want %v\", err, errStallingPeer)\n\t}\n\ttester.terminate()\n}\n\n// Tests that misbehaving peers are disconnected, whilst behaving ones are not.\nfunc TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) }\nfunc TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, 65) }\nfunc TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, 66) }\n\nfunc testBlockHeaderAttackerDropping(t *testing.T, protocol uint) {\n\tt.Parallel()\n\n\t// Define the disconnection requirement for individual hash fetch errors\n\ttests := []struct {\n\t\tresult error\n\t\tdrop   bool\n\t}{\n\t\t{nil, false},                        // Sync succeeded, all is well\n\t\t{errBusy, false},                    // Sync is already in progress, no problem\n\t\t{errUnknownPeer, false},             // Peer is unknown, was already dropped, don't double drop\n\t\t{errBadPeer, true},                  // Peer was deemed bad for some reason, drop it\n\t\t{errStallingPeer, true},             // Peer was detected to be stalling, drop it\n\t\t{errUnsyncedPeer, true},             // Peer was detected to be unsynced, drop it\n\t\t{errNoPeers, false},                 // No peers to download from, soft race, no issue\n\t\t{errTimeout, true},                  // No hashes received in due time, drop the peer\n\t\t{errEmptyHeaderSet, true},           // No headers were returned as a response, drop as it's a dead end\n\t\t{errPeersUnavailable, true},         // Nobody had the advertised blocks, drop the advertiser\n\t\t{errInvalidAncestor, true},          // Agreed upon ancestor is not acceptable, drop the chain rewriter\n\t\t{errInvalidChain, true},             // Hash chain was detected as invalid, definitely drop\n\t\t{errInvalidBody, false},             // A bad peer was detected, but not the sync origin\n\t\t{errInvalidReceipt, false},          // A bad peer was detected, but not the sync origin\n\t\t{errCancelContentProcessing, false}, // Synchronisation was canceled, origin may be innocent, don't drop\n\t}\n\t// Run the tests and check disconnection status\n\ttester := newTester()\n\tdefer tester.terminate()\n\tchain := testChainBase.shorten(1)\n\n\tfor i, tt := range tests {\n\t\t// Register a new peer and ensure its presence\n\t\tid := fmt.Sprintf(\"test %d\", i)\n\t\tif err := tester.newPeer(id, protocol, chain); err != nil {\n\t\t\tt.Fatalf(\"test %d: failed to register new peer: %v\", i, err)\n\t\t}\n\t\tif _, ok := tester.peers[id]; !ok {\n\t\t\tt.Fatalf(\"test %d: registered peer not found\", i)\n\t\t}\n\t\t// Simulate a synchronisation and check the required result\n\t\ttester.downloader.synchroniseMock = func(string, common.Hash) error { return tt.result }\n\n\t\ttester.downloader.Synchronise(id, tester.genesis.Hash(), big.NewInt(1000), FullSync)\n\t\tif _, ok := tester.peers[id]; !ok != tt.drop {\n\t\t\tt.Errorf(\"test %d: peer drop mismatch for %v: have %v, want %v\", i, tt.result, !ok, tt.drop)\n\t\t}\n\t}\n}\n\n// Tests that synchronisation progress (origin block number, current block number\n// and highest block number) is tracked and updated correctly.\nfunc TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) }\nfunc TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) }\n\nfunc TestSyncProgress65Full(t *testing.T)  { testSyncProgress(t, 65, FullSync) }\nfunc TestSyncProgress65Fast(t *testing.T)  { testSyncProgress(t, 65, FastSync) }\nfunc TestSyncProgress65Light(t *testing.T) { testSyncProgress(t, 65, LightSync) }\n\nfunc TestSyncProgress66Full(t *testing.T)  { testSyncProgress(t, 66, FullSync) }\nfunc TestSyncProgress66Fast(t *testing.T)  { testSyncProgress(t, 66, FastSync) }\nfunc TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, 66, LightSync) }\n\nfunc testSyncProgress(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\tchain := testChainBase.shorten(blockCacheMaxItems - 15)\n\n\t// Set a sync init hook to catch progress changes\n\tstarting := make(chan struct{})\n\tprogress := make(chan struct{})\n\n\ttester.downloader.syncInitHook = func(origin, latest uint64) {\n\t\tstarting <- struct{}{}\n\t\t<-progress\n\t}\n\tcheckProgress(t, tester.downloader, \"pristine\", ethereum.SyncProgress{})\n\n\t// Synchronise half the blocks and check initial progress\n\ttester.newPeer(\"peer-half\", protocol, chain.shorten(chain.len()/2))\n\tpending := new(sync.WaitGroup)\n\tpending.Add(1)\n\n\tgo func() {\n\t\tdefer pending.Done()\n\t\tif err := tester.sync(\"peer-half\", nil, mode); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"failed to synchronise blocks: %v\", err))\n\t\t}\n\t}()\n\t<-starting\n\tcheckProgress(t, tester.downloader, \"initial\", ethereum.SyncProgress{\n\t\tHighestBlock: uint64(chain.len()/2 - 1),\n\t})\n\tprogress <- struct{}{}\n\tpending.Wait()\n\n\t// Synchronise all the blocks and check continuation progress\n\ttester.newPeer(\"peer-full\", protocol, chain)\n\tpending.Add(1)\n\tgo func() {\n\t\tdefer pending.Done()\n\t\tif err := tester.sync(\"peer-full\", nil, mode); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"failed to synchronise blocks: %v\", err))\n\t\t}\n\t}()\n\t<-starting\n\tcheckProgress(t, tester.downloader, \"completing\", ethereum.SyncProgress{\n\t\tStartingBlock: uint64(chain.len()/2 - 1),\n\t\tCurrentBlock:  uint64(chain.len()/2 - 1),\n\t\tHighestBlock:  uint64(chain.len() - 1),\n\t})\n\n\t// Check final progress after successful sync\n\tprogress <- struct{}{}\n\tpending.Wait()\n\tcheckProgress(t, tester.downloader, \"final\", ethereum.SyncProgress{\n\t\tStartingBlock: uint64(chain.len()/2 - 1),\n\t\tCurrentBlock:  uint64(chain.len() - 1),\n\t\tHighestBlock:  uint64(chain.len() - 1),\n\t})\n}\n\nfunc checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.SyncProgress) {\n\t// Mark this method as a helper to report errors at callsite, not in here\n\tt.Helper()\n\n\tp := d.Progress()\n\tp.KnownStates, p.PulledStates = 0, 0\n\twant.KnownStates, want.PulledStates = 0, 0\n\tif p != want {\n\t\tt.Fatalf(\"%s progress mismatch:\\nhave %+v\\nwant %+v\", stage, p, want)\n\t}\n}\n\n// Tests that synchronisation progress (origin block number and highest block\n// number) is tracked and updated correctly in case of a fork (or manual head\n// revertal).\nfunc TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) }\nfunc TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) }\n\nfunc TestForkedSyncProgress65Full(t *testing.T)  { testForkedSyncProgress(t, 65, FullSync) }\nfunc TestForkedSyncProgress65Fast(t *testing.T)  { testForkedSyncProgress(t, 65, FastSync) }\nfunc TestForkedSyncProgress65Light(t *testing.T) { testForkedSyncProgress(t, 65, LightSync) }\n\nfunc TestForkedSyncProgress66Full(t *testing.T)  { testForkedSyncProgress(t, 66, FullSync) }\nfunc TestForkedSyncProgress66Fast(t *testing.T)  { testForkedSyncProgress(t, 66, FastSync) }\nfunc TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, 66, LightSync) }\n\nfunc testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\tchainA := testChainForkLightA.shorten(testChainBase.len() + MaxHeaderFetch)\n\tchainB := testChainForkLightB.shorten(testChainBase.len() + MaxHeaderFetch)\n\n\t// Set a sync init hook to catch progress changes\n\tstarting := make(chan struct{})\n\tprogress := make(chan struct{})\n\n\ttester.downloader.syncInitHook = func(origin, latest uint64) {\n\t\tstarting <- struct{}{}\n\t\t<-progress\n\t}\n\tcheckProgress(t, tester.downloader, \"pristine\", ethereum.SyncProgress{})\n\n\t// Synchronise with one of the forks and check progress\n\ttester.newPeer(\"fork A\", protocol, chainA)\n\tpending := new(sync.WaitGroup)\n\tpending.Add(1)\n\tgo func() {\n\t\tdefer pending.Done()\n\t\tif err := tester.sync(\"fork A\", nil, mode); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"failed to synchronise blocks: %v\", err))\n\t\t}\n\t}()\n\t<-starting\n\n\tcheckProgress(t, tester.downloader, \"initial\", ethereum.SyncProgress{\n\t\tHighestBlock: uint64(chainA.len() - 1),\n\t})\n\tprogress <- struct{}{}\n\tpending.Wait()\n\n\t// Simulate a successful sync above the fork\n\ttester.downloader.syncStatsChainOrigin = tester.downloader.syncStatsChainHeight\n\n\t// Synchronise with the second fork and check progress resets\n\ttester.newPeer(\"fork B\", protocol, chainB)\n\tpending.Add(1)\n\tgo func() {\n\t\tdefer pending.Done()\n\t\tif err := tester.sync(\"fork B\", nil, mode); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"failed to synchronise blocks: %v\", err))\n\t\t}\n\t}()\n\t<-starting\n\tcheckProgress(t, tester.downloader, \"forking\", ethereum.SyncProgress{\n\t\tStartingBlock: uint64(testChainBase.len()) - 1,\n\t\tCurrentBlock:  uint64(chainA.len() - 1),\n\t\tHighestBlock:  uint64(chainB.len() - 1),\n\t})\n\n\t// Check final progress after successful sync\n\tprogress <- struct{}{}\n\tpending.Wait()\n\tcheckProgress(t, tester.downloader, \"final\", ethereum.SyncProgress{\n\t\tStartingBlock: uint64(testChainBase.len()) - 1,\n\t\tCurrentBlock:  uint64(chainB.len() - 1),\n\t\tHighestBlock:  uint64(chainB.len() - 1),\n\t})\n}\n\n// Tests that if synchronisation is aborted due to some failure, then the progress\n// origin is not updated in the next sync cycle, as it should be considered the\n// continuation of the previous sync and not a new instance.\nfunc TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) }\nfunc TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) }\n\nfunc TestFailedSyncProgress65Full(t *testing.T)  { testFailedSyncProgress(t, 65, FullSync) }\nfunc TestFailedSyncProgress65Fast(t *testing.T)  { testFailedSyncProgress(t, 65, FastSync) }\nfunc TestFailedSyncProgress65Light(t *testing.T) { testFailedSyncProgress(t, 65, LightSync) }\n\nfunc TestFailedSyncProgress66Full(t *testing.T)  { testFailedSyncProgress(t, 66, FullSync) }\nfunc TestFailedSyncProgress66Fast(t *testing.T)  { testFailedSyncProgress(t, 66, FastSync) }\nfunc TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, 66, LightSync) }\n\nfunc testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\tchain := testChainBase.shorten(blockCacheMaxItems - 15)\n\n\t// Set a sync init hook to catch progress changes\n\tstarting := make(chan struct{})\n\tprogress := make(chan struct{})\n\n\ttester.downloader.syncInitHook = func(origin, latest uint64) {\n\t\tstarting <- struct{}{}\n\t\t<-progress\n\t}\n\tcheckProgress(t, tester.downloader, \"pristine\", ethereum.SyncProgress{})\n\n\t// Attempt a full sync with a faulty peer\n\tbrokenChain := chain.shorten(chain.len())\n\tmissing := brokenChain.len() / 2\n\tdelete(brokenChain.headerm, brokenChain.chain[missing])\n\tdelete(brokenChain.blockm, brokenChain.chain[missing])\n\tdelete(brokenChain.receiptm, brokenChain.chain[missing])\n\ttester.newPeer(\"faulty\", protocol, brokenChain)\n\n\tpending := new(sync.WaitGroup)\n\tpending.Add(1)\n\tgo func() {\n\t\tdefer pending.Done()\n\t\tif err := tester.sync(\"faulty\", nil, mode); err == nil {\n\t\t\tpanic(\"succeeded faulty synchronisation\")\n\t\t}\n\t}()\n\t<-starting\n\tcheckProgress(t, tester.downloader, \"initial\", ethereum.SyncProgress{\n\t\tHighestBlock: uint64(brokenChain.len() - 1),\n\t})\n\tprogress <- struct{}{}\n\tpending.Wait()\n\tafterFailedSync := tester.downloader.Progress()\n\n\t// Synchronise with a good peer and check that the progress origin remind the same\n\t// after a failure\n\ttester.newPeer(\"valid\", protocol, chain)\n\tpending.Add(1)\n\tgo func() {\n\t\tdefer pending.Done()\n\t\tif err := tester.sync(\"valid\", nil, mode); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"failed to synchronise blocks: %v\", err))\n\t\t}\n\t}()\n\t<-starting\n\tcheckProgress(t, tester.downloader, \"completing\", afterFailedSync)\n\n\t// Check final progress after successful sync\n\tprogress <- struct{}{}\n\tpending.Wait()\n\tcheckProgress(t, tester.downloader, \"final\", ethereum.SyncProgress{\n\t\tCurrentBlock: uint64(chain.len() - 1),\n\t\tHighestBlock: uint64(chain.len() - 1),\n\t})\n}\n\n// Tests that if an attacker fakes a chain height, after the attack is detected,\n// the progress height is successfully reduced at the next sync invocation.\nfunc TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) }\nfunc TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) }\n\nfunc TestFakedSyncProgress65Full(t *testing.T)  { testFakedSyncProgress(t, 65, FullSync) }\nfunc TestFakedSyncProgress65Fast(t *testing.T)  { testFakedSyncProgress(t, 65, FastSync) }\nfunc TestFakedSyncProgress65Light(t *testing.T) { testFakedSyncProgress(t, 65, LightSync) }\n\nfunc TestFakedSyncProgress66Full(t *testing.T)  { testFakedSyncProgress(t, 66, FullSync) }\nfunc TestFakedSyncProgress66Fast(t *testing.T)  { testFakedSyncProgress(t, 66, FastSync) }\nfunc TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, 66, LightSync) }\n\nfunc testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\ttester := newTester()\n\tdefer tester.terminate()\n\tchain := testChainBase.shorten(blockCacheMaxItems - 15)\n\n\t// Set a sync init hook to catch progress changes\n\tstarting := make(chan struct{})\n\tprogress := make(chan struct{})\n\ttester.downloader.syncInitHook = func(origin, latest uint64) {\n\t\tstarting <- struct{}{}\n\t\t<-progress\n\t}\n\tcheckProgress(t, tester.downloader, \"pristine\", ethereum.SyncProgress{})\n\n\t// Create and sync with an attacker that promises a higher chain than available.\n\tbrokenChain := chain.shorten(chain.len())\n\tnumMissing := 5\n\tfor i := brokenChain.len() - 2; i > brokenChain.len()-numMissing; i-- {\n\t\tdelete(brokenChain.headerm, brokenChain.chain[i])\n\t}\n\ttester.newPeer(\"attack\", protocol, brokenChain)\n\n\tpending := new(sync.WaitGroup)\n\tpending.Add(1)\n\tgo func() {\n\t\tdefer pending.Done()\n\t\tif err := tester.sync(\"attack\", nil, mode); err == nil {\n\t\t\tpanic(\"succeeded attacker synchronisation\")\n\t\t}\n\t}()\n\t<-starting\n\tcheckProgress(t, tester.downloader, \"initial\", ethereum.SyncProgress{\n\t\tHighestBlock: uint64(brokenChain.len() - 1),\n\t})\n\tprogress <- struct{}{}\n\tpending.Wait()\n\tafterFailedSync := tester.downloader.Progress()\n\n\t// Synchronise with a good peer and check that the progress height has been reduced to\n\t// the true value.\n\tvalidChain := chain.shorten(chain.len() - numMissing)\n\ttester.newPeer(\"valid\", protocol, validChain)\n\tpending.Add(1)\n\n\tgo func() {\n\t\tdefer pending.Done()\n\t\tif err := tester.sync(\"valid\", nil, mode); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"failed to synchronise blocks: %v\", err))\n\t\t}\n\t}()\n\t<-starting\n\tcheckProgress(t, tester.downloader, \"completing\", ethereum.SyncProgress{\n\t\tCurrentBlock: afterFailedSync.CurrentBlock,\n\t\tHighestBlock: uint64(validChain.len() - 1),\n\t})\n\n\t// Check final progress after successful sync.\n\tprogress <- struct{}{}\n\tpending.Wait()\n\tcheckProgress(t, tester.downloader, \"final\", ethereum.SyncProgress{\n\t\tCurrentBlock: uint64(validChain.len() - 1),\n\t\tHighestBlock: uint64(validChain.len() - 1),\n\t})\n}\n\n// This test reproduces an issue where unexpected deliveries would\n// block indefinitely if they arrived at the right time.\nfunc TestDeliverHeadersHang64Full(t *testing.T) { testDeliverHeadersHang(t, 64, FullSync) }\nfunc TestDeliverHeadersHang64Fast(t *testing.T) { testDeliverHeadersHang(t, 64, FastSync) }\n\nfunc TestDeliverHeadersHang65Full(t *testing.T)  { testDeliverHeadersHang(t, 65, FullSync) }\nfunc TestDeliverHeadersHang65Fast(t *testing.T)  { testDeliverHeadersHang(t, 65, FastSync) }\nfunc TestDeliverHeadersHang65Light(t *testing.T) { testDeliverHeadersHang(t, 65, LightSync) }\n\nfunc TestDeliverHeadersHang66Full(t *testing.T)  { testDeliverHeadersHang(t, 66, FullSync) }\nfunc TestDeliverHeadersHang66Fast(t *testing.T)  { testDeliverHeadersHang(t, 66, FastSync) }\nfunc TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, 66, LightSync) }\n\nfunc testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\tmaster := newTester()\n\tdefer master.terminate()\n\tchain := testChainBase.shorten(15)\n\n\tfor i := 0; i < 200; i++ {\n\t\ttester := newTester()\n\t\ttester.peerDb = master.peerDb\n\t\ttester.newPeer(\"peer\", protocol, chain)\n\n\t\t// Whenever the downloader requests headers, flood it with\n\t\t// a lot of unrequested header deliveries.\n\t\ttester.downloader.peers.peers[\"peer\"].peer = &floodingTestPeer{\n\t\t\tpeer:   tester.downloader.peers.peers[\"peer\"].peer,\n\t\t\ttester: tester,\n\t\t}\n\t\tif err := tester.sync(\"peer\", nil, mode); err != nil {\n\t\t\tt.Errorf(\"test %d: sync failed: %v\", i, err)\n\t\t}\n\t\ttester.terminate()\n\t}\n}\n\ntype floodingTestPeer struct {\n\tpeer   Peer\n\ttester *downloadTester\n}\n\nfunc (ftp *floodingTestPeer) Head() (common.Hash, *big.Int) { return ftp.peer.Head() }\nfunc (ftp *floodingTestPeer) RequestHeadersByHash(hash common.Hash, count int, skip int, reverse bool) error {\n\treturn ftp.peer.RequestHeadersByHash(hash, count, skip, reverse)\n}\nfunc (ftp *floodingTestPeer) RequestBodies(hashes []common.Hash) error {\n\treturn ftp.peer.RequestBodies(hashes)\n}\nfunc (ftp *floodingTestPeer) RequestReceipts(hashes []common.Hash) error {\n\treturn ftp.peer.RequestReceipts(hashes)\n}\nfunc (ftp *floodingTestPeer) RequestNodeData(hashes []common.Hash) error {\n\treturn ftp.peer.RequestNodeData(hashes)\n}\n\nfunc (ftp *floodingTestPeer) RequestHeadersByNumber(from uint64, count, skip int, reverse bool) error {\n\tdeliveriesDone := make(chan struct{}, 500)\n\tfor i := 0; i < cap(deliveriesDone)-1; i++ {\n\t\tpeer := fmt.Sprintf(\"fake-peer%d\", i)\n\t\tgo func() {\n\t\t\tftp.tester.downloader.DeliverHeaders(peer, []*types.Header{{}, {}, {}, {}})\n\t\t\tdeliveriesDone <- struct{}{}\n\t\t}()\n\t}\n\n\t// None of the extra deliveries should block.\n\ttimeout := time.After(60 * time.Second)\n\tlaunched := false\n\tfor i := 0; i < cap(deliveriesDone); i++ {\n\t\tselect {\n\t\tcase <-deliveriesDone:\n\t\t\tif !launched {\n\t\t\t\t// Start delivering the requested headers\n\t\t\t\t// after one of the flooding responses has arrived.\n\t\t\t\tgo func() {\n\t\t\t\t\tftp.peer.RequestHeadersByNumber(from, count, skip, reverse)\n\t\t\t\t\tdeliveriesDone <- struct{}{}\n\t\t\t\t}()\n\t\t\t\tlaunched = true\n\t\t\t}\n\t\tcase <-timeout:\n\t\t\tpanic(\"blocked\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc TestRemoteHeaderRequestSpan(t *testing.T) {\n\ttestCases := []struct {\n\t\tremoteHeight uint64\n\t\tlocalHeight  uint64\n\t\texpected     []int\n\t}{\n\t\t// Remote is way higher. We should ask for the remote head and go backwards\n\t\t{1500, 1000,\n\t\t\t[]int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499},\n\t\t},\n\t\t{15000, 13006,\n\t\t\t[]int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999},\n\t\t},\n\t\t// Remote is pretty close to us. We don't have to fetch as many\n\t\t{1200, 1150,\n\t\t\t[]int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199},\n\t\t},\n\t\t// Remote is equal to us (so on a fork with higher td)\n\t\t// We should get the closest couple of ancestors\n\t\t{1500, 1500,\n\t\t\t[]int{1497, 1499},\n\t\t},\n\t\t// We're higher than the remote! Odd\n\t\t{1000, 1500,\n\t\t\t[]int{997, 999},\n\t\t},\n\t\t// Check some weird edgecases that it behaves somewhat rationally\n\t\t{0, 1500,\n\t\t\t[]int{0, 2},\n\t\t},\n\t\t{6000000, 0,\n\t\t\t[]int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999},\n\t\t},\n\t\t{0, 0,\n\t\t\t[]int{0, 2},\n\t\t},\n\t}\n\treqs := func(from, count, span int) []int {\n\t\tvar r []int\n\t\tnum := from\n\t\tfor len(r) < count {\n\t\t\tr = append(r, num)\n\t\t\tnum += span + 1\n\t\t}\n\t\treturn r\n\t}\n\tfor i, tt := range testCases {\n\t\tfrom, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight)\n\t\tdata := reqs(int(from), count, span)\n\n\t\tif max != uint64(data[len(data)-1]) {\n\t\t\tt.Errorf(\"test %d: wrong last value %d != %d\", i, data[len(data)-1], max)\n\t\t}\n\t\tfailed := false\n\t\tif len(data) != len(tt.expected) {\n\t\t\tfailed = true\n\t\t\tt.Errorf(\"test %d: length wrong, expected %d got %d\", i, len(tt.expected), len(data))\n\t\t} else {\n\t\t\tfor j, n := range data {\n\t\t\t\tif n != tt.expected[j] {\n\t\t\t\t\tfailed = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif failed {\n\t\t\tres := strings.Replace(fmt.Sprint(data), \" \", \",\", -1)\n\t\t\texp := strings.Replace(fmt.Sprint(tt.expected), \" \", \",\", -1)\n\t\t\tt.Logf(\"got: %v\\n\", res)\n\t\t\tt.Logf(\"exp: %v\\n\", exp)\n\t\t\tt.Errorf(\"test %d: wrong values\", i)\n\t\t}\n\t}\n}\n\n// Tests that peers below a pre-configured checkpoint block are prevented from\n// being fast-synced from, avoiding potential cheap eclipse attacks.\nfunc TestCheckpointEnforcement64Full(t *testing.T) { testCheckpointEnforcement(t, 64, FullSync) }\nfunc TestCheckpointEnforcement64Fast(t *testing.T) { testCheckpointEnforcement(t, 64, FastSync) }\n\nfunc TestCheckpointEnforcement65Full(t *testing.T)  { testCheckpointEnforcement(t, 65, FullSync) }\nfunc TestCheckpointEnforcement65Fast(t *testing.T)  { testCheckpointEnforcement(t, 65, FastSync) }\nfunc TestCheckpointEnforcement65Light(t *testing.T) { testCheckpointEnforcement(t, 65, LightSync) }\n\nfunc TestCheckpointEnforcement66Full(t *testing.T)  { testCheckpointEnforcement(t, 66, FullSync) }\nfunc TestCheckpointEnforcement66Fast(t *testing.T)  { testCheckpointEnforcement(t, 66, FastSync) }\nfunc TestCheckpointEnforcement66Light(t *testing.T) { testCheckpointEnforcement(t, 66, LightSync) }\n\nfunc testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) {\n\tt.Parallel()\n\n\t// Create a new tester with a particular hard coded checkpoint block\n\ttester := newTester()\n\tdefer tester.terminate()\n\n\ttester.downloader.checkpoint = uint64(fsMinFullBlocks) + 256\n\tchain := testChainBase.shorten(int(tester.downloader.checkpoint) - 1)\n\n\t// Attempt to sync with the peer and validate the result\n\ttester.newPeer(\"peer\", protocol, chain)\n\n\tvar expect error\n\tif mode == FastSync || mode == LightSync {\n\t\texpect = errUnsyncedPeer\n\t}\n\tif err := tester.sync(\"peer\", nil, mode); !errors.Is(err, expect) {\n\t\tt.Fatalf(\"block sync error mismatch: have %v, want %v\", err, expect)\n\t}\n\tif mode == FastSync || mode == LightSync {\n\t\tassertOwnChain(t, tester, 1)\n\t} else {\n\t\tassertOwnChain(t, tester, chain.len())\n\t}\n}\n"
  },
  {
    "path": "eth/downloader/events.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage downloader\n\nimport \"github.com/ethereum/go-ethereum/core/types\"\n\ntype DoneEvent struct {\n\tLatest *types.Header\n}\ntype StartEvent struct{}\ntype FailedEvent struct{ Err error }\n"
  },
  {
    "path": "eth/downloader/metrics.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Contains the metrics collected by the downloader.\n\npackage downloader\n\nimport (\n\t\"github.com/ethereum/go-ethereum/metrics\"\n)\n\nvar (\n\theaderInMeter      = metrics.NewRegisteredMeter(\"eth/downloader/headers/in\", nil)\n\theaderReqTimer     = metrics.NewRegisteredTimer(\"eth/downloader/headers/req\", nil)\n\theaderDropMeter    = metrics.NewRegisteredMeter(\"eth/downloader/headers/drop\", nil)\n\theaderTimeoutMeter = metrics.NewRegisteredMeter(\"eth/downloader/headers/timeout\", nil)\n\n\tbodyInMeter      = metrics.NewRegisteredMeter(\"eth/downloader/bodies/in\", nil)\n\tbodyReqTimer     = metrics.NewRegisteredTimer(\"eth/downloader/bodies/req\", nil)\n\tbodyDropMeter    = metrics.NewRegisteredMeter(\"eth/downloader/bodies/drop\", nil)\n\tbodyTimeoutMeter = metrics.NewRegisteredMeter(\"eth/downloader/bodies/timeout\", nil)\n\n\treceiptInMeter      = metrics.NewRegisteredMeter(\"eth/downloader/receipts/in\", nil)\n\treceiptReqTimer     = metrics.NewRegisteredTimer(\"eth/downloader/receipts/req\", nil)\n\treceiptDropMeter    = metrics.NewRegisteredMeter(\"eth/downloader/receipts/drop\", nil)\n\treceiptTimeoutMeter = metrics.NewRegisteredMeter(\"eth/downloader/receipts/timeout\", nil)\n\n\tstateInMeter   = metrics.NewRegisteredMeter(\"eth/downloader/states/in\", nil)\n\tstateDropMeter = metrics.NewRegisteredMeter(\"eth/downloader/states/drop\", nil)\n\n\tthrottleCounter = metrics.NewRegisteredCounter(\"eth/downloader/throttle\", nil)\n)\n"
  },
  {
    "path": "eth/downloader/modes.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage downloader\n\nimport \"fmt\"\n\n// SyncMode represents the synchronisation mode of the downloader.\n// It is a uint32 as it is used with atomic operations.\ntype SyncMode uint32\n\nconst (\n\tFullSync  SyncMode = iota // Synchronise the entire blockchain history from full blocks\n\tFastSync                  // Quickly download the headers, full sync only at the chain\n\tSnapSync                  // Download the chain and the state via compact snapshots\n\tLightSync                 // Download only the headers and terminate afterwards\n)\n\nfunc (mode SyncMode) IsValid() bool {\n\treturn mode >= FullSync && mode <= LightSync\n}\n\n// String implements the stringer interface.\nfunc (mode SyncMode) String() string {\n\tswitch mode {\n\tcase FullSync:\n\t\treturn \"full\"\n\tcase FastSync:\n\t\treturn \"fast\"\n\tcase SnapSync:\n\t\treturn \"snap\"\n\tcase LightSync:\n\t\treturn \"light\"\n\tdefault:\n\t\treturn \"unknown\"\n\t}\n}\n\nfunc (mode SyncMode) MarshalText() ([]byte, error) {\n\tswitch mode {\n\tcase FullSync:\n\t\treturn []byte(\"full\"), nil\n\tcase FastSync:\n\t\treturn []byte(\"fast\"), nil\n\tcase SnapSync:\n\t\treturn []byte(\"snap\"), nil\n\tcase LightSync:\n\t\treturn []byte(\"light\"), nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown sync mode %d\", mode)\n\t}\n}\n\nfunc (mode *SyncMode) UnmarshalText(text []byte) error {\n\tswitch string(text) {\n\tcase \"full\":\n\t\t*mode = FullSync\n\tcase \"fast\":\n\t\t*mode = FastSync\n\tcase \"snap\":\n\t\t*mode = SnapSync\n\tcase \"light\":\n\t\t*mode = LightSync\n\tdefault:\n\t\treturn fmt.Errorf(`unknown sync mode %q, want \"full\", \"fast\" or \"light\"`, text)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "eth/downloader/peer.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Contains the active peer-set of the downloader, maintaining both failures\n// as well as reputation metrics to prioritize the block retrievals.\n\npackage downloader\n\nimport (\n\t\"errors\"\n\t\"math\"\n\t\"math/big\"\n\t\"sort\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/eth/protocols/eth\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/log\"\n)\n\nconst (\n\tmaxLackingHashes  = 4096 // Maximum number of entries allowed on the list or lacking items\n\tmeasurementImpact = 0.1  // The impact a single measurement has on a peer's final throughput value.\n)\n\nvar (\n\terrAlreadyFetching   = errors.New(\"already fetching blocks from peer\")\n\terrAlreadyRegistered = errors.New(\"peer is already registered\")\n\terrNotRegistered     = errors.New(\"peer is not registered\")\n)\n\n// peerConnection represents an active peer from which hashes and blocks are retrieved.\ntype peerConnection struct {\n\tid string // Unique identifier of the peer\n\n\theaderIdle  int32 // Current header activity state of the peer (idle = 0, active = 1)\n\tblockIdle   int32 // Current block activity state of the peer (idle = 0, active = 1)\n\treceiptIdle int32 // Current receipt activity state of the peer (idle = 0, active = 1)\n\tstateIdle   int32 // Current node data activity state of the peer (idle = 0, active = 1)\n\n\theaderThroughput  float64 // Number of headers measured to be retrievable per second\n\tblockThroughput   float64 // Number of blocks (bodies) measured to be retrievable per second\n\treceiptThroughput float64 // Number of receipts measured to be retrievable per second\n\tstateThroughput   float64 // Number of node data pieces measured to be retrievable per second\n\n\trtt time.Duration // Request round trip time to track responsiveness (QoS)\n\n\theaderStarted  time.Time // Time instance when the last header fetch was started\n\tblockStarted   time.Time // Time instance when the last block (body) fetch was started\n\treceiptStarted time.Time // Time instance when the last receipt fetch was started\n\tstateStarted   time.Time // Time instance when the last node data fetch was started\n\n\tlacking map[common.Hash]struct{} // Set of hashes not to request (didn't have previously)\n\n\tpeer Peer\n\n\tversion uint       // Eth protocol version number to switch strategies\n\tlog     log.Logger // Contextual logger to add extra infos to peer logs\n\tlock    sync.RWMutex\n}\n\n// LightPeer encapsulates the methods required to synchronise with a remote light peer.\ntype LightPeer interface {\n\tHead() (common.Hash, *big.Int)\n\tRequestHeadersByHash(common.Hash, int, int, bool) error\n\tRequestHeadersByNumber(uint64, int, int, bool) error\n}\n\n// Peer encapsulates the methods required to synchronise with a remote full peer.\ntype Peer interface {\n\tLightPeer\n\tRequestBodies([]common.Hash) error\n\tRequestReceipts([]common.Hash) error\n\tRequestNodeData([]common.Hash) error\n}\n\n// lightPeerWrapper wraps a LightPeer struct, stubbing out the Peer-only methods.\ntype lightPeerWrapper struct {\n\tpeer LightPeer\n}\n\nfunc (w *lightPeerWrapper) Head() (common.Hash, *big.Int) { return w.peer.Head() }\nfunc (w *lightPeerWrapper) RequestHeadersByHash(h common.Hash, amount int, skip int, reverse bool) error {\n\treturn w.peer.RequestHeadersByHash(h, amount, skip, reverse)\n}\nfunc (w *lightPeerWrapper) RequestHeadersByNumber(i uint64, amount int, skip int, reverse bool) error {\n\treturn w.peer.RequestHeadersByNumber(i, amount, skip, reverse)\n}\nfunc (w *lightPeerWrapper) RequestBodies([]common.Hash) error {\n\tpanic(\"RequestBodies not supported in light client mode sync\")\n}\nfunc (w *lightPeerWrapper) RequestReceipts([]common.Hash) error {\n\tpanic(\"RequestReceipts not supported in light client mode sync\")\n}\nfunc (w *lightPeerWrapper) RequestNodeData([]common.Hash) error {\n\tpanic(\"RequestNodeData not supported in light client mode sync\")\n}\n\n// newPeerConnection creates a new downloader peer.\nfunc newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection {\n\treturn &peerConnection{\n\t\tid:      id,\n\t\tlacking: make(map[common.Hash]struct{}),\n\t\tpeer:    peer,\n\t\tversion: version,\n\t\tlog:     logger,\n\t}\n}\n\n// Reset clears the internal state of a peer entity.\nfunc (p *peerConnection) Reset() {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\n\tatomic.StoreInt32(&p.headerIdle, 0)\n\tatomic.StoreInt32(&p.blockIdle, 0)\n\tatomic.StoreInt32(&p.receiptIdle, 0)\n\tatomic.StoreInt32(&p.stateIdle, 0)\n\n\tp.headerThroughput = 0\n\tp.blockThroughput = 0\n\tp.receiptThroughput = 0\n\tp.stateThroughput = 0\n\n\tp.lacking = make(map[common.Hash]struct{})\n}\n\n// FetchHeaders sends a header retrieval request to the remote peer.\nfunc (p *peerConnection) FetchHeaders(from uint64, count int) error {\n\t// Short circuit if the peer is already fetching\n\tif !atomic.CompareAndSwapInt32(&p.headerIdle, 0, 1) {\n\t\treturn errAlreadyFetching\n\t}\n\tp.headerStarted = time.Now()\n\n\t// Issue the header retrieval request (absolute upwards without gaps)\n\tgo p.peer.RequestHeadersByNumber(from, count, 0, false)\n\n\treturn nil\n}\n\n// FetchBodies sends a block body retrieval request to the remote peer.\nfunc (p *peerConnection) FetchBodies(request *fetchRequest) error {\n\t// Short circuit if the peer is already fetching\n\tif !atomic.CompareAndSwapInt32(&p.blockIdle, 0, 1) {\n\t\treturn errAlreadyFetching\n\t}\n\tp.blockStarted = time.Now()\n\n\tgo func() {\n\t\t// Convert the header set to a retrievable slice\n\t\thashes := make([]common.Hash, 0, len(request.Headers))\n\t\tfor _, header := range request.Headers {\n\t\t\thashes = append(hashes, header.Hash())\n\t\t}\n\t\tp.peer.RequestBodies(hashes)\n\t}()\n\n\treturn nil\n}\n\n// FetchReceipts sends a receipt retrieval request to the remote peer.\nfunc (p *peerConnection) FetchReceipts(request *fetchRequest) error {\n\t// Short circuit if the peer is already fetching\n\tif !atomic.CompareAndSwapInt32(&p.receiptIdle, 0, 1) {\n\t\treturn errAlreadyFetching\n\t}\n\tp.receiptStarted = time.Now()\n\n\tgo func() {\n\t\t// Convert the header set to a retrievable slice\n\t\thashes := make([]common.Hash, 0, len(request.Headers))\n\t\tfor _, header := range request.Headers {\n\t\t\thashes = append(hashes, header.Hash())\n\t\t}\n\t\tp.peer.RequestReceipts(hashes)\n\t}()\n\n\treturn nil\n}\n\n// FetchNodeData sends a node state data retrieval request to the remote peer.\nfunc (p *peerConnection) FetchNodeData(hashes []common.Hash) error {\n\t// Short circuit if the peer is already fetching\n\tif !atomic.CompareAndSwapInt32(&p.stateIdle, 0, 1) {\n\t\treturn errAlreadyFetching\n\t}\n\tp.stateStarted = time.Now()\n\n\tgo p.peer.RequestNodeData(hashes)\n\n\treturn nil\n}\n\n// SetHeadersIdle sets the peer to idle, allowing it to execute new header retrieval\n// requests. Its estimated header retrieval throughput is updated with that measured\n// just now.\nfunc (p *peerConnection) SetHeadersIdle(delivered int, deliveryTime time.Time) {\n\tp.setIdle(deliveryTime.Sub(p.headerStarted), delivered, &p.headerThroughput, &p.headerIdle)\n}\n\n// SetBodiesIdle sets the peer to idle, allowing it to execute block body retrieval\n// requests. Its estimated body retrieval throughput is updated with that measured\n// just now.\nfunc (p *peerConnection) SetBodiesIdle(delivered int, deliveryTime time.Time) {\n\tp.setIdle(deliveryTime.Sub(p.blockStarted), delivered, &p.blockThroughput, &p.blockIdle)\n}\n\n// SetReceiptsIdle sets the peer to idle, allowing it to execute new receipt\n// retrieval requests. Its estimated receipt retrieval throughput is updated\n// with that measured just now.\nfunc (p *peerConnection) SetReceiptsIdle(delivered int, deliveryTime time.Time) {\n\tp.setIdle(deliveryTime.Sub(p.receiptStarted), delivered, &p.receiptThroughput, &p.receiptIdle)\n}\n\n// SetNodeDataIdle sets the peer to idle, allowing it to execute new state trie\n// data retrieval requests. Its estimated state retrieval throughput is updated\n// with that measured just now.\nfunc (p *peerConnection) SetNodeDataIdle(delivered int, deliveryTime time.Time) {\n\tp.setIdle(deliveryTime.Sub(p.stateStarted), delivered, &p.stateThroughput, &p.stateIdle)\n}\n\n// setIdle sets the peer to idle, allowing it to execute new retrieval requests.\n// Its estimated retrieval throughput is updated with that measured just now.\nfunc (p *peerConnection) setIdle(elapsed time.Duration, delivered int, throughput *float64, idle *int32) {\n\t// Irrelevant of the scaling, make sure the peer ends up idle\n\tdefer atomic.StoreInt32(idle, 0)\n\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\n\t// If nothing was delivered (hard timeout / unavailable data), reduce throughput to minimum\n\tif delivered == 0 {\n\t\t*throughput = 0\n\t\treturn\n\t}\n\t// Otherwise update the throughput with a new measurement\n\tif elapsed <= 0 {\n\t\telapsed = 1 // +1 (ns) to ensure non-zero divisor\n\t}\n\tmeasured := float64(delivered) / (float64(elapsed) / float64(time.Second))\n\n\t*throughput = (1-measurementImpact)*(*throughput) + measurementImpact*measured\n\tp.rtt = time.Duration((1-measurementImpact)*float64(p.rtt) + measurementImpact*float64(elapsed))\n\n\tp.log.Trace(\"Peer throughput measurements updated\",\n\t\t\"hps\", p.headerThroughput, \"bps\", p.blockThroughput,\n\t\t\"rps\", p.receiptThroughput, \"sps\", p.stateThroughput,\n\t\t\"miss\", len(p.lacking), \"rtt\", p.rtt)\n}\n\n// HeaderCapacity retrieves the peers header download allowance based on its\n// previously discovered throughput.\nfunc (p *peerConnection) HeaderCapacity(targetRTT time.Duration) int {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\treturn int(math.Min(1+math.Max(1, p.headerThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxHeaderFetch)))\n}\n\n// BlockCapacity retrieves the peers block download allowance based on its\n// previously discovered throughput.\nfunc (p *peerConnection) BlockCapacity(targetRTT time.Duration) int {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\treturn int(math.Min(1+math.Max(1, p.blockThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxBlockFetch)))\n}\n\n// ReceiptCapacity retrieves the peers receipt download allowance based on its\n// previously discovered throughput.\nfunc (p *peerConnection) ReceiptCapacity(targetRTT time.Duration) int {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\treturn int(math.Min(1+math.Max(1, p.receiptThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxReceiptFetch)))\n}\n\n// NodeDataCapacity retrieves the peers state download allowance based on its\n// previously discovered throughput.\nfunc (p *peerConnection) NodeDataCapacity(targetRTT time.Duration) int {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\treturn int(math.Min(1+math.Max(1, p.stateThroughput*float64(targetRTT)/float64(time.Second)), float64(MaxStateFetch)))\n}\n\n// MarkLacking appends a new entity to the set of items (blocks, receipts, states)\n// that a peer is known not to have (i.e. have been requested before). If the\n// set reaches its maximum allowed capacity, items are randomly dropped off.\nfunc (p *peerConnection) MarkLacking(hash common.Hash) {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\n\tfor len(p.lacking) >= maxLackingHashes {\n\t\tfor drop := range p.lacking {\n\t\t\tdelete(p.lacking, drop)\n\t\t\tbreak\n\t\t}\n\t}\n\tp.lacking[hash] = struct{}{}\n}\n\n// Lacks retrieves whether the hash of a blockchain item is on the peers lacking\n// list (i.e. whether we know that the peer does not have it).\nfunc (p *peerConnection) Lacks(hash common.Hash) bool {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\t_, ok := p.lacking[hash]\n\treturn ok\n}\n\n// peerSet represents the collection of active peer participating in the chain\n// download procedure.\ntype peerSet struct {\n\tpeers        map[string]*peerConnection\n\tnewPeerFeed  event.Feed\n\tpeerDropFeed event.Feed\n\tlock         sync.RWMutex\n}\n\n// newPeerSet creates a new peer set top track the active download sources.\nfunc newPeerSet() *peerSet {\n\treturn &peerSet{\n\t\tpeers: make(map[string]*peerConnection),\n\t}\n}\n\n// SubscribeNewPeers subscribes to peer arrival events.\nfunc (ps *peerSet) SubscribeNewPeers(ch chan<- *peerConnection) event.Subscription {\n\treturn ps.newPeerFeed.Subscribe(ch)\n}\n\n// SubscribePeerDrops subscribes to peer departure events.\nfunc (ps *peerSet) SubscribePeerDrops(ch chan<- *peerConnection) event.Subscription {\n\treturn ps.peerDropFeed.Subscribe(ch)\n}\n\n// Reset iterates over the current peer set, and resets each of the known peers\n// to prepare for a next batch of block retrieval.\nfunc (ps *peerSet) Reset() {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\tfor _, peer := range ps.peers {\n\t\tpeer.Reset()\n\t}\n}\n\n// Register injects a new peer into the working set, or returns an error if the\n// peer is already known.\n//\n// The method also sets the starting throughput values of the new peer to the\n// average of all existing peers, to give it a realistic chance of being used\n// for data retrievals.\nfunc (ps *peerSet) Register(p *peerConnection) error {\n\t// Retrieve the current median RTT as a sane default\n\tp.rtt = ps.medianRTT()\n\n\t// Register the new peer with some meaningful defaults\n\tps.lock.Lock()\n\tif _, ok := ps.peers[p.id]; ok {\n\t\tps.lock.Unlock()\n\t\treturn errAlreadyRegistered\n\t}\n\tif len(ps.peers) > 0 {\n\t\tp.headerThroughput, p.blockThroughput, p.receiptThroughput, p.stateThroughput = 0, 0, 0, 0\n\n\t\tfor _, peer := range ps.peers {\n\t\t\tpeer.lock.RLock()\n\t\t\tp.headerThroughput += peer.headerThroughput\n\t\t\tp.blockThroughput += peer.blockThroughput\n\t\t\tp.receiptThroughput += peer.receiptThroughput\n\t\t\tp.stateThroughput += peer.stateThroughput\n\t\t\tpeer.lock.RUnlock()\n\t\t}\n\t\tp.headerThroughput /= float64(len(ps.peers))\n\t\tp.blockThroughput /= float64(len(ps.peers))\n\t\tp.receiptThroughput /= float64(len(ps.peers))\n\t\tp.stateThroughput /= float64(len(ps.peers))\n\t}\n\tps.peers[p.id] = p\n\tps.lock.Unlock()\n\n\tps.newPeerFeed.Send(p)\n\treturn nil\n}\n\n// Unregister removes a remote peer from the active set, disabling any further\n// actions to/from that particular entity.\nfunc (ps *peerSet) Unregister(id string) error {\n\tps.lock.Lock()\n\tp, ok := ps.peers[id]\n\tif !ok {\n\t\tps.lock.Unlock()\n\t\treturn errNotRegistered\n\t}\n\tdelete(ps.peers, id)\n\tps.lock.Unlock()\n\n\tps.peerDropFeed.Send(p)\n\treturn nil\n}\n\n// Peer retrieves the registered peer with the given id.\nfunc (ps *peerSet) Peer(id string) *peerConnection {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\treturn ps.peers[id]\n}\n\n// Len returns if the current number of peers in the set.\nfunc (ps *peerSet) Len() int {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\treturn len(ps.peers)\n}\n\n// AllPeers retrieves a flat list of all the peers within the set.\nfunc (ps *peerSet) AllPeers() []*peerConnection {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\tlist := make([]*peerConnection, 0, len(ps.peers))\n\tfor _, p := range ps.peers {\n\t\tlist = append(list, p)\n\t}\n\treturn list\n}\n\n// HeaderIdlePeers retrieves a flat list of all the currently header-idle peers\n// within the active peer set, ordered by their reputation.\nfunc (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) {\n\tidle := func(p *peerConnection) bool {\n\t\treturn atomic.LoadInt32(&p.headerIdle) == 0\n\t}\n\tthroughput := func(p *peerConnection) float64 {\n\t\tp.lock.RLock()\n\t\tdefer p.lock.RUnlock()\n\t\treturn p.headerThroughput\n\t}\n\treturn ps.idlePeers(eth.ETH64, eth.ETH66, idle, throughput)\n}\n\n// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within\n// the active peer set, ordered by their reputation.\nfunc (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) {\n\tidle := func(p *peerConnection) bool {\n\t\treturn atomic.LoadInt32(&p.blockIdle) == 0\n\t}\n\tthroughput := func(p *peerConnection) float64 {\n\t\tp.lock.RLock()\n\t\tdefer p.lock.RUnlock()\n\t\treturn p.blockThroughput\n\t}\n\treturn ps.idlePeers(eth.ETH64, eth.ETH66, idle, throughput)\n}\n\n// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers\n// within the active peer set, ordered by their reputation.\nfunc (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) {\n\tidle := func(p *peerConnection) bool {\n\t\treturn atomic.LoadInt32(&p.receiptIdle) == 0\n\t}\n\tthroughput := func(p *peerConnection) float64 {\n\t\tp.lock.RLock()\n\t\tdefer p.lock.RUnlock()\n\t\treturn p.receiptThroughput\n\t}\n\treturn ps.idlePeers(eth.ETH64, eth.ETH66, idle, throughput)\n}\n\n// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle\n// peers within the active peer set, ordered by their reputation.\nfunc (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) {\n\tidle := func(p *peerConnection) bool {\n\t\treturn atomic.LoadInt32(&p.stateIdle) == 0\n\t}\n\tthroughput := func(p *peerConnection) float64 {\n\t\tp.lock.RLock()\n\t\tdefer p.lock.RUnlock()\n\t\treturn p.stateThroughput\n\t}\n\treturn ps.idlePeers(eth.ETH64, eth.ETH66, idle, throughput)\n}\n\n// idlePeers retrieves a flat list of all currently idle peers satisfying the\n// protocol version constraints, using the provided function to check idleness.\n// The resulting set of peers are sorted by their measure throughput.\nfunc (ps *peerSet) idlePeers(minProtocol, maxProtocol uint, idleCheck func(*peerConnection) bool, throughput func(*peerConnection) float64) ([]*peerConnection, int) {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\tidle, total := make([]*peerConnection, 0, len(ps.peers)), 0\n\ttps := make([]float64, 0, len(ps.peers))\n\tfor _, p := range ps.peers {\n\t\tif p.version >= minProtocol && p.version <= maxProtocol {\n\t\t\tif idleCheck(p) {\n\t\t\t\tidle = append(idle, p)\n\t\t\t\ttps = append(tps, throughput(p))\n\t\t\t}\n\t\t\ttotal++\n\t\t}\n\t}\n\t// And sort them\n\tsortPeers := &peerThroughputSort{idle, tps}\n\tsort.Sort(sortPeers)\n\treturn sortPeers.p, total\n}\n\n// medianRTT returns the median RTT of the peerset, considering only the tuning\n// peers if there are more peers available.\nfunc (ps *peerSet) medianRTT() time.Duration {\n\t// Gather all the currently measured round trip times\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\trtts := make([]float64, 0, len(ps.peers))\n\tfor _, p := range ps.peers {\n\t\tp.lock.RLock()\n\t\trtts = append(rtts, float64(p.rtt))\n\t\tp.lock.RUnlock()\n\t}\n\tsort.Float64s(rtts)\n\n\tmedian := rttMaxEstimate\n\tif qosTuningPeers <= len(rtts) {\n\t\tmedian = time.Duration(rtts[qosTuningPeers/2]) // Median of our tuning peers\n\t} else if len(rtts) > 0 {\n\t\tmedian = time.Duration(rtts[len(rtts)/2]) // Median of our connected peers (maintain even like this some baseline qos)\n\t}\n\t// Restrict the RTT into some QoS defaults, irrelevant of true RTT\n\tif median < rttMinEstimate {\n\t\tmedian = rttMinEstimate\n\t}\n\tif median > rttMaxEstimate {\n\t\tmedian = rttMaxEstimate\n\t}\n\treturn median\n}\n\n// peerThroughputSort implements the Sort interface, and allows for\n// sorting a set of peers by their throughput\n// The sorted data is with the _highest_ throughput first\ntype peerThroughputSort struct {\n\tp  []*peerConnection\n\ttp []float64\n}\n\nfunc (ps *peerThroughputSort) Len() int {\n\treturn len(ps.p)\n}\n\nfunc (ps *peerThroughputSort) Less(i, j int) bool {\n\treturn ps.tp[i] > ps.tp[j]\n}\n\nfunc (ps *peerThroughputSort) Swap(i, j int) {\n\tps.p[i], ps.p[j] = ps.p[j], ps.p[i]\n\tps.tp[i], ps.tp[j] = ps.tp[j], ps.tp[i]\n}\n"
  },
  {
    "path": "eth/downloader/peer_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of go-ethereum.\n//\n// go-ethereum is free software: you can redistribute it and/or modify\n// it under the terms of the GNU General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// go-ethereum is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU General Public License for more details.\n//\n// You should have received a copy of the GNU General Public License\n// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.\n\npackage downloader\n\nimport (\n\t\"sort\"\n\t\"testing\"\n)\n\nfunc TestPeerThroughputSorting(t *testing.T) {\n\ta := &peerConnection{\n\t\tid:               \"a\",\n\t\theaderThroughput: 1.25,\n\t}\n\tb := &peerConnection{\n\t\tid:               \"b\",\n\t\theaderThroughput: 1.21,\n\t}\n\tc := &peerConnection{\n\t\tid:               \"c\",\n\t\theaderThroughput: 1.23,\n\t}\n\n\tpeers := []*peerConnection{a, b, c}\n\ttps := []float64{a.headerThroughput,\n\t\tb.headerThroughput, c.headerThroughput}\n\tsortPeers := &peerThroughputSort{peers, tps}\n\tsort.Sort(sortPeers)\n\tif got, exp := sortPeers.p[0].id, \"a\"; got != exp {\n\t\tt.Errorf(\"sort fail, got %v exp %v\", got, exp)\n\t}\n\tif got, exp := sortPeers.p[1].id, \"c\"; got != exp {\n\t\tt.Errorf(\"sort fail, got %v exp %v\", got, exp)\n\t}\n\tif got, exp := sortPeers.p[2].id, \"b\"; got != exp {\n\t\tt.Errorf(\"sort fail, got %v exp %v\", got, exp)\n\t}\n\n}\n"
  },
  {
    "path": "eth/downloader/queue.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Contains the block download scheduler to collect download tasks and schedule\n// them in an ordered, and throttled way.\n\npackage downloader\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/prque\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/metrics\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\nconst (\n\tbodyType    = uint(0)\n\treceiptType = uint(1)\n)\n\nvar (\n\tblockCacheMaxItems     = 8192             // Maximum number of blocks to cache before throttling the download\n\tblockCacheInitialItems = 2048             // Initial number of blocks to start fetching, before we know the sizes of the blocks\n\tblockCacheMemory       = 64 * 1024 * 1024 // Maximum amount of memory to use for block caching\n\tblockCacheSizeWeight   = 0.1              // Multiplier to approximate the average block size based on past ones\n)\n\nvar (\n\terrNoFetchesPending = errors.New(\"no fetches pending\")\n\terrStaleDelivery    = errors.New(\"stale delivery\")\n)\n\n// fetchRequest is a currently running data retrieval operation.\ntype fetchRequest struct {\n\tPeer    *peerConnection // Peer to which the request was sent\n\tFrom    uint64          // [eth/62] Requested chain element index (used for skeleton fills only)\n\tHeaders []*types.Header // [eth/62] Requested headers, sorted by request order\n\tTime    time.Time       // Time when the request was made\n}\n\n// fetchResult is a struct collecting partial results from data fetchers until\n// all outstanding pieces complete and the result as a whole can be processed.\ntype fetchResult struct {\n\tpending int32 // Flag telling what deliveries are outstanding\n\n\tHeader       *types.Header\n\tUncles       []*types.Header\n\tTransactions types.Transactions\n\tReceipts     types.Receipts\n}\n\nfunc newFetchResult(header *types.Header, fastSync bool) *fetchResult {\n\titem := &fetchResult{\n\t\tHeader: header,\n\t}\n\tif !header.EmptyBody() {\n\t\titem.pending |= (1 << bodyType)\n\t}\n\tif fastSync && !header.EmptyReceipts() {\n\t\titem.pending |= (1 << receiptType)\n\t}\n\treturn item\n}\n\n// SetBodyDone flags the body as finished.\nfunc (f *fetchResult) SetBodyDone() {\n\tif v := atomic.LoadInt32(&f.pending); (v & (1 << bodyType)) != 0 {\n\t\tatomic.AddInt32(&f.pending, -1)\n\t}\n}\n\n// AllDone checks if item is done.\nfunc (f *fetchResult) AllDone() bool {\n\treturn atomic.LoadInt32(&f.pending) == 0\n}\n\n// SetReceiptsDone flags the receipts as finished.\nfunc (f *fetchResult) SetReceiptsDone() {\n\tif v := atomic.LoadInt32(&f.pending); (v & (1 << receiptType)) != 0 {\n\t\tatomic.AddInt32(&f.pending, -2)\n\t}\n}\n\n// Done checks if the given type is done already\nfunc (f *fetchResult) Done(kind uint) bool {\n\tv := atomic.LoadInt32(&f.pending)\n\treturn v&(1<<kind) == 0\n}\n\n// queue represents hashes that are either need fetching or are being fetched\ntype queue struct {\n\tmode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching\n\n\t// Headers are \"special\", they download in batches, supported by a skeleton chain\n\theaderHead      common.Hash                    // Hash of the last queued header to verify order\n\theaderTaskPool  map[uint64]*types.Header       // Pending header retrieval tasks, mapping starting indexes to skeleton headers\n\theaderTaskQueue *prque.Prque                   // Priority queue of the skeleton indexes to fetch the filling headers for\n\theaderPeerMiss  map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable\n\theaderPendPool  map[string]*fetchRequest       // Currently pending header retrieval operations\n\theaderResults   []*types.Header                // Result cache accumulating the completed headers\n\theaderProced    int                            // Number of headers already processed from the results\n\theaderOffset    uint64                         // Number of the first header in the result cache\n\theaderContCh    chan bool                      // Channel to notify when header download finishes\n\n\t// All data retrievals below are based on an already assembles header chain\n\tblockTaskPool  map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers\n\tblockTaskQueue *prque.Prque                  // Priority queue of the headers to fetch the blocks (bodies) for\n\tblockPendPool  map[string]*fetchRequest      // Currently pending block (body) retrieval operations\n\n\treceiptTaskPool  map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers\n\treceiptTaskQueue *prque.Prque                  // Priority queue of the headers to fetch the receipts for\n\treceiptPendPool  map[string]*fetchRequest      // Currently pending receipt retrieval operations\n\n\tresultCache *resultStore       // Downloaded but not yet delivered fetch results\n\tresultSize  common.StorageSize // Approximate size of a block (exponential moving average)\n\n\tlock   *sync.RWMutex\n\tactive *sync.Cond\n\tclosed bool\n\n\tlastStatLog time.Time\n}\n\n// newQueue creates a new download queue for scheduling block retrieval.\nfunc newQueue(blockCacheLimit int, thresholdInitialSize int) *queue {\n\tlock := new(sync.RWMutex)\n\tq := &queue{\n\t\theaderContCh:     make(chan bool),\n\t\tblockTaskQueue:   prque.New(nil),\n\t\treceiptTaskQueue: prque.New(nil),\n\t\tactive:           sync.NewCond(lock),\n\t\tlock:             lock,\n\t}\n\tq.Reset(blockCacheLimit, thresholdInitialSize)\n\treturn q\n}\n\n// Reset clears out the queue contents.\nfunc (q *queue) Reset(blockCacheLimit int, thresholdInitialSize int) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\tq.closed = false\n\tq.mode = FullSync\n\n\tq.headerHead = common.Hash{}\n\tq.headerPendPool = make(map[string]*fetchRequest)\n\n\tq.blockTaskPool = make(map[common.Hash]*types.Header)\n\tq.blockTaskQueue.Reset()\n\tq.blockPendPool = make(map[string]*fetchRequest)\n\n\tq.receiptTaskPool = make(map[common.Hash]*types.Header)\n\tq.receiptTaskQueue.Reset()\n\tq.receiptPendPool = make(map[string]*fetchRequest)\n\n\tq.resultCache = newResultStore(blockCacheLimit)\n\tq.resultCache.SetThrottleThreshold(uint64(thresholdInitialSize))\n}\n\n// Close marks the end of the sync, unblocking Results.\n// It may be called even if the queue is already closed.\nfunc (q *queue) Close() {\n\tq.lock.Lock()\n\tq.closed = true\n\tq.active.Signal()\n\tq.lock.Unlock()\n}\n\n// PendingHeaders retrieves the number of header requests pending for retrieval.\nfunc (q *queue) PendingHeaders() int {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\treturn q.headerTaskQueue.Size()\n}\n\n// PendingBlocks retrieves the number of block (body) requests pending for retrieval.\nfunc (q *queue) PendingBlocks() int {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\treturn q.blockTaskQueue.Size()\n}\n\n// PendingReceipts retrieves the number of block receipts pending for retrieval.\nfunc (q *queue) PendingReceipts() int {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\treturn q.receiptTaskQueue.Size()\n}\n\n// InFlightHeaders retrieves whether there are header fetch requests currently\n// in flight.\nfunc (q *queue) InFlightHeaders() bool {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\treturn len(q.headerPendPool) > 0\n}\n\n// InFlightBlocks retrieves whether there are block fetch requests currently in\n// flight.\nfunc (q *queue) InFlightBlocks() bool {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\treturn len(q.blockPendPool) > 0\n}\n\n// InFlightReceipts retrieves whether there are receipt fetch requests currently\n// in flight.\nfunc (q *queue) InFlightReceipts() bool {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\treturn len(q.receiptPendPool) > 0\n}\n\n// Idle returns if the queue is fully idle or has some data still inside.\nfunc (q *queue) Idle() bool {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\tqueued := q.blockTaskQueue.Size() + q.receiptTaskQueue.Size()\n\tpending := len(q.blockPendPool) + len(q.receiptPendPool)\n\n\treturn (queued + pending) == 0\n}\n\n// ScheduleSkeleton adds a batch of header retrieval tasks to the queue to fill\n// up an already retrieved header skeleton.\nfunc (q *queue) ScheduleSkeleton(from uint64, skeleton []*types.Header) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\t// No skeleton retrieval can be in progress, fail hard if so (huge implementation bug)\n\tif q.headerResults != nil {\n\t\tpanic(\"skeleton assembly already in progress\")\n\t}\n\t// Schedule all the header retrieval tasks for the skeleton assembly\n\tq.headerTaskPool = make(map[uint64]*types.Header)\n\tq.headerTaskQueue = prque.New(nil)\n\tq.headerPeerMiss = make(map[string]map[uint64]struct{}) // Reset availability to correct invalid chains\n\tq.headerResults = make([]*types.Header, len(skeleton)*MaxHeaderFetch)\n\tq.headerProced = 0\n\tq.headerOffset = from\n\tq.headerContCh = make(chan bool, 1)\n\n\tfor i, header := range skeleton {\n\t\tindex := from + uint64(i*MaxHeaderFetch)\n\n\t\tq.headerTaskPool[index] = header\n\t\tq.headerTaskQueue.Push(index, -int64(index))\n\t}\n}\n\n// RetrieveHeaders retrieves the header chain assemble based on the scheduled\n// skeleton.\nfunc (q *queue) RetrieveHeaders() ([]*types.Header, int) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\theaders, proced := q.headerResults, q.headerProced\n\tq.headerResults, q.headerProced = nil, 0\n\n\treturn headers, proced\n}\n\n// Schedule adds a set of headers for the download queue for scheduling, returning\n// the new headers encountered.\nfunc (q *queue) Schedule(headers []*types.Header, from uint64) []*types.Header {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\t// Insert all the headers prioritised by the contained block number\n\tinserts := make([]*types.Header, 0, len(headers))\n\tfor _, header := range headers {\n\t\t// Make sure chain order is honoured and preserved throughout\n\t\thash := header.Hash()\n\t\tif header.Number == nil || header.Number.Uint64() != from {\n\t\t\tlog.Warn(\"Header broke chain ordering\", \"number\", header.Number, \"hash\", hash, \"expected\", from)\n\t\t\tbreak\n\t\t}\n\t\tif q.headerHead != (common.Hash{}) && q.headerHead != header.ParentHash {\n\t\t\tlog.Warn(\"Header broke chain ancestry\", \"number\", header.Number, \"hash\", hash)\n\t\t\tbreak\n\t\t}\n\t\t// Make sure no duplicate requests are executed\n\t\t// We cannot skip this, even if the block is empty, since this is\n\t\t// what triggers the fetchResult creation.\n\t\tif _, ok := q.blockTaskPool[hash]; ok {\n\t\t\tlog.Warn(\"Header already scheduled for block fetch\", \"number\", header.Number, \"hash\", hash)\n\t\t} else {\n\t\t\tq.blockTaskPool[hash] = header\n\t\t\tq.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))\n\t\t}\n\t\t// Queue for receipt retrieval\n\t\tif q.mode == FastSync && !header.EmptyReceipts() {\n\t\t\tif _, ok := q.receiptTaskPool[hash]; ok {\n\t\t\t\tlog.Warn(\"Header already scheduled for receipt fetch\", \"number\", header.Number, \"hash\", hash)\n\t\t\t} else {\n\t\t\t\tq.receiptTaskPool[hash] = header\n\t\t\t\tq.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))\n\t\t\t}\n\t\t}\n\t\tinserts = append(inserts, header)\n\t\tq.headerHead = hash\n\t\tfrom++\n\t}\n\treturn inserts\n}\n\n// Results retrieves and permanently removes a batch of fetch results from\n// the cache. the result slice will be empty if the queue has been closed.\n// Results can be called concurrently with Deliver and Schedule,\n// but assumes that there are not two simultaneous callers to Results\nfunc (q *queue) Results(block bool) []*fetchResult {\n\t// Abort early if there are no items and non-blocking requested\n\tif !block && !q.resultCache.HasCompletedItems() {\n\t\treturn nil\n\t}\n\tclosed := false\n\tfor !closed && !q.resultCache.HasCompletedItems() {\n\t\t// In order to wait on 'active', we need to obtain the lock.\n\t\t// That may take a while, if someone is delivering at the same\n\t\t// time, so after obtaining the lock, we check again if there\n\t\t// are any results to fetch.\n\t\t// Also, in-between we ask for the lock and the lock is obtained,\n\t\t// someone can have closed the queue. In that case, we should\n\t\t// return the available results and stop blocking\n\t\tq.lock.Lock()\n\t\tif q.resultCache.HasCompletedItems() || q.closed {\n\t\t\tq.lock.Unlock()\n\t\t\tbreak\n\t\t}\n\t\t// No items available, and not closed\n\t\tq.active.Wait()\n\t\tclosed = q.closed\n\t\tq.lock.Unlock()\n\t}\n\t// Regardless if closed or not, we can still deliver whatever we have\n\tresults := q.resultCache.GetCompleted(maxResultsProcess)\n\tfor _, result := range results {\n\t\t// Recalculate the result item weights to prevent memory exhaustion\n\t\tsize := result.Header.Size()\n\t\tfor _, uncle := range result.Uncles {\n\t\t\tsize += uncle.Size()\n\t\t}\n\t\tfor _, receipt := range result.Receipts {\n\t\t\tsize += receipt.Size()\n\t\t}\n\t\tfor _, tx := range result.Transactions {\n\t\t\tsize += tx.Size()\n\t\t}\n\t\tq.resultSize = common.StorageSize(blockCacheSizeWeight)*size +\n\t\t\t(1-common.StorageSize(blockCacheSizeWeight))*q.resultSize\n\t}\n\t// Using the newly calibrated resultsize, figure out the new throttle limit\n\t// on the result cache\n\tthrottleThreshold := uint64((common.StorageSize(blockCacheMemory) + q.resultSize - 1) / q.resultSize)\n\tthrottleThreshold = q.resultCache.SetThrottleThreshold(throttleThreshold)\n\n\t// Log some info at certain times\n\tif time.Since(q.lastStatLog) > 60*time.Second {\n\t\tq.lastStatLog = time.Now()\n\t\tinfo := q.Stats()\n\t\tinfo = append(info, \"throttle\", throttleThreshold)\n\t\tlog.Info(\"Downloader queue stats\", info...)\n\t}\n\treturn results\n}\n\nfunc (q *queue) Stats() []interface{} {\n\tq.lock.RLock()\n\tdefer q.lock.RUnlock()\n\n\treturn q.stats()\n}\n\nfunc (q *queue) stats() []interface{} {\n\treturn []interface{}{\n\t\t\"receiptTasks\", q.receiptTaskQueue.Size(),\n\t\t\"blockTasks\", q.blockTaskQueue.Size(),\n\t\t\"itemSize\", q.resultSize,\n\t}\n}\n\n// ReserveHeaders reserves a set of headers for the given peer, skipping any\n// previously failed batches.\nfunc (q *queue) ReserveHeaders(p *peerConnection, count int) *fetchRequest {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\t// Short circuit if the peer's already downloading something (sanity check to\n\t// not corrupt state)\n\tif _, ok := q.headerPendPool[p.id]; ok {\n\t\treturn nil\n\t}\n\t// Retrieve a batch of hashes, skipping previously failed ones\n\tsend, skip := uint64(0), []uint64{}\n\tfor send == 0 && !q.headerTaskQueue.Empty() {\n\t\tfrom, _ := q.headerTaskQueue.Pop()\n\t\tif q.headerPeerMiss[p.id] != nil {\n\t\t\tif _, ok := q.headerPeerMiss[p.id][from.(uint64)]; ok {\n\t\t\t\tskip = append(skip, from.(uint64))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tsend = from.(uint64)\n\t}\n\t// Merge all the skipped batches back\n\tfor _, from := range skip {\n\t\tq.headerTaskQueue.Push(from, -int64(from))\n\t}\n\t// Assemble and return the block download request\n\tif send == 0 {\n\t\treturn nil\n\t}\n\trequest := &fetchRequest{\n\t\tPeer: p,\n\t\tFrom: send,\n\t\tTime: time.Now(),\n\t}\n\tq.headerPendPool[p.id] = request\n\treturn request\n}\n\n// ReserveBodies reserves a set of body fetches for the given peer, skipping any\n// previously failed downloads. Beside the next batch of needed fetches, it also\n// returns a flag whether empty blocks were queued requiring processing.\nfunc (q *queue) ReserveBodies(p *peerConnection, count int) (*fetchRequest, bool, bool) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\treturn q.reserveHeaders(p, count, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool, bodyType)\n}\n\n// ReserveReceipts reserves a set of receipt fetches for the given peer, skipping\n// any previously failed downloads. Beside the next batch of needed fetches, it\n// also returns a flag whether empty receipts were queued requiring importing.\nfunc (q *queue) ReserveReceipts(p *peerConnection, count int) (*fetchRequest, bool, bool) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\treturn q.reserveHeaders(p, count, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool, receiptType)\n}\n\n// reserveHeaders reserves a set of data download operations for a given peer,\n// skipping any previously failed ones. This method is a generic version used\n// by the individual special reservation functions.\n//\n// Note, this method expects the queue lock to be already held for writing. The\n// reason the lock is not obtained in here is because the parameters already need\n// to access the queue, so they already need a lock anyway.\n//\n// Returns:\n//   item     - the fetchRequest\n//   progress - whether any progress was made\n//   throttle - if the caller should throttle for a while\nfunc (q *queue) reserveHeaders(p *peerConnection, count int, taskPool map[common.Hash]*types.Header, taskQueue *prque.Prque,\n\tpendPool map[string]*fetchRequest, kind uint) (*fetchRequest, bool, bool) {\n\t// Short circuit if the pool has been depleted, or if the peer's already\n\t// downloading something (sanity check not to corrupt state)\n\tif taskQueue.Empty() {\n\t\treturn nil, false, true\n\t}\n\tif _, ok := pendPool[p.id]; ok {\n\t\treturn nil, false, false\n\t}\n\t// Retrieve a batch of tasks, skipping previously failed ones\n\tsend := make([]*types.Header, 0, count)\n\tskip := make([]*types.Header, 0)\n\tprogress := false\n\tthrottled := false\n\tfor proc := 0; len(send) < count && !taskQueue.Empty(); proc++ {\n\t\t// the task queue will pop items in order, so the highest prio block\n\t\t// is also the lowest block number.\n\t\th, _ := taskQueue.Peek()\n\t\theader := h.(*types.Header)\n\t\t// we can ask the resultcache if this header is within the\n\t\t// \"prioritized\" segment of blocks. If it is not, we need to throttle\n\n\t\tstale, throttle, item, err := q.resultCache.AddFetch(header, q.mode == FastSync)\n\t\tif stale {\n\t\t\t// Don't put back in the task queue, this item has already been\n\t\t\t// delivered upstream\n\t\t\ttaskQueue.PopItem()\n\t\t\tprogress = true\n\t\t\tdelete(taskPool, header.Hash())\n\t\t\tproc = proc - 1\n\t\t\tlog.Error(\"Fetch reservation already delivered\", \"number\", header.Number.Uint64())\n\t\t\tcontinue\n\t\t}\n\t\tif throttle {\n\t\t\t// There are no resultslots available. Leave it in the task queue\n\t\t\t// However, if there are any left as 'skipped', we should not tell\n\t\t\t// the caller to throttle, since we still want some other\n\t\t\t// peer to fetch those for us\n\t\t\tthrottled = len(skip) == 0\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\t// this most definitely should _not_ happen\n\t\t\tlog.Warn(\"Failed to reserve headers\", \"err\", err)\n\t\t\t// There are no resultslots available. Leave it in the task queue\n\t\t\tbreak\n\t\t}\n\t\tif item.Done(kind) {\n\t\t\t// If it's a noop, we can skip this task\n\t\t\tdelete(taskPool, header.Hash())\n\t\t\ttaskQueue.PopItem()\n\t\t\tproc = proc - 1\n\t\t\tprogress = true\n\t\t\tcontinue\n\t\t}\n\t\t// Remove it from the task queue\n\t\ttaskQueue.PopItem()\n\t\t// Otherwise unless the peer is known not to have the data, add to the retrieve list\n\t\tif p.Lacks(header.Hash()) {\n\t\t\tskip = append(skip, header)\n\t\t} else {\n\t\t\tsend = append(send, header)\n\t\t}\n\t}\n\t// Merge all the skipped headers back\n\tfor _, header := range skip {\n\t\ttaskQueue.Push(header, -int64(header.Number.Uint64()))\n\t}\n\tif q.resultCache.HasCompletedItems() {\n\t\t// Wake Results, resultCache was modified\n\t\tq.active.Signal()\n\t}\n\t// Assemble and return the block download request\n\tif len(send) == 0 {\n\t\treturn nil, progress, throttled\n\t}\n\trequest := &fetchRequest{\n\t\tPeer:    p,\n\t\tHeaders: send,\n\t\tTime:    time.Now(),\n\t}\n\tpendPool[p.id] = request\n\treturn request, progress, throttled\n}\n\n// CancelHeaders aborts a fetch request, returning all pending skeleton indexes to the queue.\nfunc (q *queue) CancelHeaders(request *fetchRequest) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\tq.cancel(request, q.headerTaskQueue, q.headerPendPool)\n}\n\n// CancelBodies aborts a body fetch request, returning all pending headers to the\n// task queue.\nfunc (q *queue) CancelBodies(request *fetchRequest) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\tq.cancel(request, q.blockTaskQueue, q.blockPendPool)\n}\n\n// CancelReceipts aborts a body fetch request, returning all pending headers to\n// the task queue.\nfunc (q *queue) CancelReceipts(request *fetchRequest) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\tq.cancel(request, q.receiptTaskQueue, q.receiptPendPool)\n}\n\n// Cancel aborts a fetch request, returning all pending hashes to the task queue.\nfunc (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool map[string]*fetchRequest) {\n\tif request.From > 0 {\n\t\ttaskQueue.Push(request.From, -int64(request.From))\n\t}\n\tfor _, header := range request.Headers {\n\t\ttaskQueue.Push(header, -int64(header.Number.Uint64()))\n\t}\n\tdelete(pendPool, request.Peer.id)\n}\n\n// Revoke cancels all pending requests belonging to a given peer. This method is\n// meant to be called during a peer drop to quickly reassign owned data fetches\n// to remaining nodes.\nfunc (q *queue) Revoke(peerID string) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\tif request, ok := q.blockPendPool[peerID]; ok {\n\t\tfor _, header := range request.Headers {\n\t\t\tq.blockTaskQueue.Push(header, -int64(header.Number.Uint64()))\n\t\t}\n\t\tdelete(q.blockPendPool, peerID)\n\t}\n\tif request, ok := q.receiptPendPool[peerID]; ok {\n\t\tfor _, header := range request.Headers {\n\t\t\tq.receiptTaskQueue.Push(header, -int64(header.Number.Uint64()))\n\t\t}\n\t\tdelete(q.receiptPendPool, peerID)\n\t}\n}\n\n// ExpireHeaders checks for in flight requests that exceeded a timeout allowance,\n// canceling them and returning the responsible peers for penalisation.\nfunc (q *queue) ExpireHeaders(timeout time.Duration) map[string]int {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\treturn q.expire(timeout, q.headerPendPool, q.headerTaskQueue, headerTimeoutMeter)\n}\n\n// ExpireBodies checks for in flight block body requests that exceeded a timeout\n// allowance, canceling them and returning the responsible peers for penalisation.\nfunc (q *queue) ExpireBodies(timeout time.Duration) map[string]int {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\treturn q.expire(timeout, q.blockPendPool, q.blockTaskQueue, bodyTimeoutMeter)\n}\n\n// ExpireReceipts checks for in flight receipt requests that exceeded a timeout\n// allowance, canceling them and returning the responsible peers for penalisation.\nfunc (q *queue) ExpireReceipts(timeout time.Duration) map[string]int {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\treturn q.expire(timeout, q.receiptPendPool, q.receiptTaskQueue, receiptTimeoutMeter)\n}\n\n// expire is the generic check that move expired tasks from a pending pool back\n// into a task pool, returning all entities caught with expired tasks.\n//\n// Note, this method expects the queue lock to be already held. The\n// reason the lock is not obtained in here is because the parameters already need\n// to access the queue, so they already need a lock anyway.\nfunc (q *queue) expire(timeout time.Duration, pendPool map[string]*fetchRequest, taskQueue *prque.Prque, timeoutMeter metrics.Meter) map[string]int {\n\t// Iterate over the expired requests and return each to the queue\n\texpiries := make(map[string]int)\n\tfor id, request := range pendPool {\n\t\tif time.Since(request.Time) > timeout {\n\t\t\t// Update the metrics with the timeout\n\t\t\ttimeoutMeter.Mark(1)\n\n\t\t\t// Return any non satisfied requests to the pool\n\t\t\tif request.From > 0 {\n\t\t\t\ttaskQueue.Push(request.From, -int64(request.From))\n\t\t\t}\n\t\t\tfor _, header := range request.Headers {\n\t\t\t\ttaskQueue.Push(header, -int64(header.Number.Uint64()))\n\t\t\t}\n\t\t\t// Add the peer to the expiry report along the number of failed requests\n\t\t\texpiries[id] = len(request.Headers)\n\n\t\t\t// Remove the expired requests from the pending pool directly\n\t\t\tdelete(pendPool, id)\n\t\t}\n\t}\n\treturn expiries\n}\n\n// DeliverHeaders injects a header retrieval response into the header results\n// cache. This method either accepts all headers it received, or none of them\n// if they do not map correctly to the skeleton.\n//\n// If the headers are accepted, the method makes an attempt to deliver the set\n// of ready headers to the processor to keep the pipeline full. However it will\n// not block to prevent stalling other pending deliveries.\nfunc (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh chan []*types.Header) (int, error) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\tvar logger log.Logger\n\tif len(id) < 16 {\n\t\t// Tests use short IDs, don't choke on them\n\t\tlogger = log.New(\"peer\", id)\n\t} else {\n\t\tlogger = log.New(\"peer\", id[:16])\n\t}\n\t// Short circuit if the data was never requested\n\trequest := q.headerPendPool[id]\n\tif request == nil {\n\t\treturn 0, errNoFetchesPending\n\t}\n\theaderReqTimer.UpdateSince(request.Time)\n\tdelete(q.headerPendPool, id)\n\n\t// Ensure headers can be mapped onto the skeleton chain\n\ttarget := q.headerTaskPool[request.From].Hash()\n\n\taccepted := len(headers) == MaxHeaderFetch\n\tif accepted {\n\t\tif headers[0].Number.Uint64() != request.From {\n\t\t\tlogger.Trace(\"First header broke chain ordering\", \"number\", headers[0].Number, \"hash\", headers[0].Hash(), \"expected\", request.From)\n\t\t\taccepted = false\n\t\t} else if headers[len(headers)-1].Hash() != target {\n\t\t\tlogger.Trace(\"Last header broke skeleton structure \", \"number\", headers[len(headers)-1].Number, \"hash\", headers[len(headers)-1].Hash(), \"expected\", target)\n\t\t\taccepted = false\n\t\t}\n\t}\n\tif accepted {\n\t\tparentHash := headers[0].Hash()\n\t\tfor i, header := range headers[1:] {\n\t\t\thash := header.Hash()\n\t\t\tif want := request.From + 1 + uint64(i); header.Number.Uint64() != want {\n\t\t\t\tlogger.Warn(\"Header broke chain ordering\", \"number\", header.Number, \"hash\", hash, \"expected\", want)\n\t\t\t\taccepted = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif parentHash != header.ParentHash {\n\t\t\t\tlogger.Warn(\"Header broke chain ancestry\", \"number\", header.Number, \"hash\", hash)\n\t\t\t\taccepted = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// Set-up parent hash for next round\n\t\t\tparentHash = hash\n\t\t}\n\t}\n\t// If the batch of headers wasn't accepted, mark as unavailable\n\tif !accepted {\n\t\tlogger.Trace(\"Skeleton filling not accepted\", \"from\", request.From)\n\n\t\tmiss := q.headerPeerMiss[id]\n\t\tif miss == nil {\n\t\t\tq.headerPeerMiss[id] = make(map[uint64]struct{})\n\t\t\tmiss = q.headerPeerMiss[id]\n\t\t}\n\t\tmiss[request.From] = struct{}{}\n\n\t\tq.headerTaskQueue.Push(request.From, -int64(request.From))\n\t\treturn 0, errors.New(\"delivery not accepted\")\n\t}\n\t// Clean up a successful fetch and try to deliver any sub-results\n\tcopy(q.headerResults[request.From-q.headerOffset:], headers)\n\tdelete(q.headerTaskPool, request.From)\n\n\tready := 0\n\tfor q.headerProced+ready < len(q.headerResults) && q.headerResults[q.headerProced+ready] != nil {\n\t\tready += MaxHeaderFetch\n\t}\n\tif ready > 0 {\n\t\t// Headers are ready for delivery, gather them and push forward (non blocking)\n\t\tprocess := make([]*types.Header, ready)\n\t\tcopy(process, q.headerResults[q.headerProced:q.headerProced+ready])\n\n\t\tselect {\n\t\tcase headerProcCh <- process:\n\t\t\tlogger.Trace(\"Pre-scheduled new headers\", \"count\", len(process), \"from\", process[0].Number)\n\t\t\tq.headerProced += len(process)\n\t\tdefault:\n\t\t}\n\t}\n\t// Check for termination and return\n\tif len(q.headerTaskPool) == 0 {\n\t\tq.headerContCh <- false\n\t}\n\treturn len(headers), nil\n}\n\n// DeliverBodies injects a block body retrieval response into the results queue.\n// The method returns the number of blocks bodies accepted from the delivery and\n// also wakes any threads waiting for data delivery.\nfunc (q *queue) DeliverBodies(id string, txLists [][]*types.Transaction, uncleLists [][]*types.Header) (int, error) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\tvalidate := func(index int, header *types.Header) error {\n\t\tif types.DeriveSha(types.Transactions(txLists[index]), trie.NewStackTrie(nil)) != header.TxHash {\n\t\t\treturn errInvalidBody\n\t\t}\n\t\tif types.CalcUncleHash(uncleLists[index]) != header.UncleHash {\n\t\t\treturn errInvalidBody\n\t\t}\n\t\treturn nil\n\t}\n\n\treconstruct := func(index int, result *fetchResult) {\n\t\tresult.Transactions = txLists[index]\n\t\tresult.Uncles = uncleLists[index]\n\t\tresult.SetBodyDone()\n\t}\n\treturn q.deliver(id, q.blockTaskPool, q.blockTaskQueue, q.blockPendPool,\n\t\tbodyReqTimer, len(txLists), validate, reconstruct)\n}\n\n// DeliverReceipts injects a receipt retrieval response into the results queue.\n// The method returns the number of transaction receipts accepted from the delivery\n// and also wakes any threads waiting for data delivery.\nfunc (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt) (int, error) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\tvalidate := func(index int, header *types.Header) error {\n\t\tif types.DeriveSha(types.Receipts(receiptList[index]), trie.NewStackTrie(nil)) != header.ReceiptHash {\n\t\t\treturn errInvalidReceipt\n\t\t}\n\t\treturn nil\n\t}\n\treconstruct := func(index int, result *fetchResult) {\n\t\tresult.Receipts = receiptList[index]\n\t\tresult.SetReceiptsDone()\n\t}\n\treturn q.deliver(id, q.receiptTaskPool, q.receiptTaskQueue, q.receiptPendPool,\n\t\treceiptReqTimer, len(receiptList), validate, reconstruct)\n}\n\n// deliver injects a data retrieval response into the results queue.\n//\n// Note, this method expects the queue lock to be already held for writing. The\n// reason this lock is not obtained in here is because the parameters already need\n// to access the queue, so they already need a lock anyway.\nfunc (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,\n\ttaskQueue *prque.Prque, pendPool map[string]*fetchRequest, reqTimer metrics.Timer,\n\tresults int, validate func(index int, header *types.Header) error,\n\treconstruct func(index int, result *fetchResult)) (int, error) {\n\n\t// Short circuit if the data was never requested\n\trequest := pendPool[id]\n\tif request == nil {\n\t\treturn 0, errNoFetchesPending\n\t}\n\treqTimer.UpdateSince(request.Time)\n\tdelete(pendPool, id)\n\n\t// If no data items were retrieved, mark them as unavailable for the origin peer\n\tif results == 0 {\n\t\tfor _, header := range request.Headers {\n\t\t\trequest.Peer.MarkLacking(header.Hash())\n\t\t}\n\t}\n\t// Assemble each of the results with their headers and retrieved data parts\n\tvar (\n\t\taccepted int\n\t\tfailure  error\n\t\ti        int\n\t\thashes   []common.Hash\n\t)\n\tfor _, header := range request.Headers {\n\t\t// Short circuit assembly if no more fetch results are found\n\t\tif i >= results {\n\t\t\tbreak\n\t\t}\n\t\t// Validate the fields\n\t\tif err := validate(i, header); err != nil {\n\t\t\tfailure = err\n\t\t\tbreak\n\t\t}\n\t\thashes = append(hashes, header.Hash())\n\t\ti++\n\t}\n\n\tfor _, header := range request.Headers[:i] {\n\t\tif res, stale, err := q.resultCache.GetDeliverySlot(header.Number.Uint64()); err == nil {\n\t\t\treconstruct(accepted, res)\n\t\t} else {\n\t\t\t// else: betweeen here and above, some other peer filled this result,\n\t\t\t// or it was indeed a no-op. This should not happen, but if it does it's\n\t\t\t// not something to panic about\n\t\t\tlog.Error(\"Delivery stale\", \"stale\", stale, \"number\", header.Number.Uint64(), \"err\", err)\n\t\t\tfailure = errStaleDelivery\n\t\t}\n\t\t// Clean up a successful fetch\n\t\tdelete(taskPool, hashes[accepted])\n\t\taccepted++\n\t}\n\t// Return all failed or missing fetches to the queue\n\tfor _, header := range request.Headers[accepted:] {\n\t\ttaskQueue.Push(header, -int64(header.Number.Uint64()))\n\t}\n\t// Wake up Results\n\tif accepted > 0 {\n\t\tq.active.Signal()\n\t}\n\tif failure == nil {\n\t\treturn accepted, nil\n\t}\n\t// If none of the data was good, it's a stale delivery\n\tif accepted > 0 {\n\t\treturn accepted, fmt.Errorf(\"partial failure: %v\", failure)\n\t}\n\treturn accepted, fmt.Errorf(\"%w: %v\", failure, errStaleDelivery)\n}\n\n// Prepare configures the result cache to allow accepting and caching inbound\n// fetch results.\nfunc (q *queue) Prepare(offset uint64, mode SyncMode) {\n\tq.lock.Lock()\n\tdefer q.lock.Unlock()\n\n\t// Prepare the queue for sync results\n\tq.resultCache.Prepare(offset)\n\tq.mode = mode\n}\n"
  },
  {
    "path": "eth/downloader/queue_test.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage downloader\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"math/rand\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/params\"\n)\n\nvar (\n\ttestdb  = rawdb.NewMemoryDatabase()\n\tgenesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))\n)\n\n// makeChain creates a chain of n blocks starting at and including parent.\n// the returned hash chain is ordered head->parent. In addition, every 3rd block\n// contains a transaction and every 5th an uncle to allow testing correct block\n// reassembly.\nfunc makeChain(n int, seed byte, parent *types.Block, empty bool) ([]*types.Block, []types.Receipts) {\n\tblocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testdb, n, func(i int, block *core.BlockGen) {\n\t\tblock.SetCoinbase(common.Address{seed})\n\t\t// Add one tx to every secondblock\n\t\tif !empty && i%2 == 0 {\n\t\t\tsigner := types.MakeSigner(params.TestChainConfig, block.Number())\n\t\t\ttx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tblock.AddTx(tx)\n\t\t}\n\t})\n\treturn blocks, receipts\n}\n\ntype chainData struct {\n\tblocks []*types.Block\n\toffset int\n}\n\nvar chain *chainData\nvar emptyChain *chainData\n\nfunc init() {\n\t// Create a chain of blocks to import\n\ttargetBlocks := 128\n\tblocks, _ := makeChain(targetBlocks, 0, genesis, false)\n\tchain = &chainData{blocks, 0}\n\n\tblocks, _ = makeChain(targetBlocks, 0, genesis, true)\n\temptyChain = &chainData{blocks, 0}\n}\n\nfunc (chain *chainData) headers() []*types.Header {\n\thdrs := make([]*types.Header, len(chain.blocks))\n\tfor i, b := range chain.blocks {\n\t\thdrs[i] = b.Header()\n\t}\n\treturn hdrs\n}\n\nfunc (chain *chainData) Len() int {\n\treturn len(chain.blocks)\n}\n\nfunc dummyPeer(id string) *peerConnection {\n\tp := &peerConnection{\n\t\tid:      id,\n\t\tlacking: make(map[common.Hash]struct{}),\n\t}\n\treturn p\n}\n\nfunc TestBasics(t *testing.T) {\n\tnumOfBlocks := len(emptyChain.blocks)\n\tnumOfReceipts := len(emptyChain.blocks) / 2\n\n\tq := newQueue(10, 10)\n\tif !q.Idle() {\n\t\tt.Errorf(\"new queue should be idle\")\n\t}\n\tq.Prepare(1, FastSync)\n\tif res := q.Results(false); len(res) != 0 {\n\t\tt.Fatal(\"new queue should have 0 results\")\n\t}\n\n\t// Schedule a batch of headers\n\tq.Schedule(chain.headers(), 1)\n\tif q.Idle() {\n\t\tt.Errorf(\"queue should not be idle\")\n\t}\n\tif got, exp := q.PendingBlocks(), chain.Len(); got != exp {\n\t\tt.Errorf(\"wrong pending block count, got %d, exp %d\", got, exp)\n\t}\n\t// Only non-empty receipts get added to task-queue\n\tif got, exp := q.PendingReceipts(), 64; got != exp {\n\t\tt.Errorf(\"wrong pending receipt count, got %d, exp %d\", got, exp)\n\t}\n\t// Items are now queued for downloading, next step is that we tell the\n\t// queue that a certain peer will deliver them for us\n\t{\n\t\tpeer := dummyPeer(\"peer-1\")\n\t\tfetchReq, _, throttle := q.ReserveBodies(peer, 50)\n\t\tif !throttle {\n\t\t\t// queue size is only 10, so throttling should occur\n\t\t\tt.Fatal(\"should throttle\")\n\t\t}\n\t\t// But we should still get the first things to fetch\n\t\tif got, exp := len(fetchReq.Headers), 5; got != exp {\n\t\t\tt.Fatalf(\"expected %d requests, got %d\", exp, got)\n\t\t}\n\t\tif got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp {\n\t\t\tt.Fatalf(\"expected header %d, got %d\", exp, got)\n\t\t}\n\t}\n\tif exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got {\n\t\tt.Errorf(\"expected block task queue to be %d, got %d\", exp, got)\n\t}\n\tif exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got {\n\t\tt.Errorf(\"expected receipt task queue to be %d, got %d\", exp, got)\n\t}\n\t{\n\t\tpeer := dummyPeer(\"peer-2\")\n\t\tfetchReq, _, throttle := q.ReserveBodies(peer, 50)\n\n\t\t// The second peer should hit throttling\n\t\tif !throttle {\n\t\t\tt.Fatalf(\"should not throttle\")\n\t\t}\n\t\t// And not get any fetches at all, since it was throttled to begin with\n\t\tif fetchReq != nil {\n\t\t\tt.Fatalf(\"should have no fetches, got %d\", len(fetchReq.Headers))\n\t\t}\n\t}\n\tif exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got {\n\t\tt.Errorf(\"expected block task queue to be %d, got %d\", exp, got)\n\t}\n\tif exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got {\n\t\tt.Errorf(\"expected receipt task queue to be %d, got %d\", exp, got)\n\t}\n\t{\n\t\t// The receipt delivering peer should not be affected\n\t\t// by the throttling of body deliveries\n\t\tpeer := dummyPeer(\"peer-3\")\n\t\tfetchReq, _, throttle := q.ReserveReceipts(peer, 50)\n\t\tif !throttle {\n\t\t\t// queue size is only 10, so throttling should occur\n\t\t\tt.Fatal(\"should throttle\")\n\t\t}\n\t\t// But we should still get the first things to fetch\n\t\tif got, exp := len(fetchReq.Headers), 5; got != exp {\n\t\t\tt.Fatalf(\"expected %d requests, got %d\", exp, got)\n\t\t}\n\t\tif got, exp := fetchReq.Headers[0].Number.Uint64(), uint64(1); got != exp {\n\t\t\tt.Fatalf(\"expected header %d, got %d\", exp, got)\n\t\t}\n\n\t}\n\tif exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got {\n\t\tt.Errorf(\"expected block task queue to be %d, got %d\", exp, got)\n\t}\n\tif exp, got := q.receiptTaskQueue.Size(), numOfReceipts-5; exp != got {\n\t\tt.Errorf(\"expected receipt task queue to be %d, got %d\", exp, got)\n\t}\n\tif got, exp := q.resultCache.countCompleted(), 0; got != exp {\n\t\tt.Errorf(\"wrong processable count, got %d, exp %d\", got, exp)\n\t}\n}\n\nfunc TestEmptyBlocks(t *testing.T) {\n\tnumOfBlocks := len(emptyChain.blocks)\n\n\tq := newQueue(10, 10)\n\n\tq.Prepare(1, FastSync)\n\t// Schedule a batch of headers\n\tq.Schedule(emptyChain.headers(), 1)\n\tif q.Idle() {\n\t\tt.Errorf(\"queue should not be idle\")\n\t}\n\tif got, exp := q.PendingBlocks(), len(emptyChain.blocks); got != exp {\n\t\tt.Errorf(\"wrong pending block count, got %d, exp %d\", got, exp)\n\t}\n\tif got, exp := q.PendingReceipts(), 0; got != exp {\n\t\tt.Errorf(\"wrong pending receipt count, got %d, exp %d\", got, exp)\n\t}\n\t// They won't be processable, because the fetchresults haven't been\n\t// created yet\n\tif got, exp := q.resultCache.countCompleted(), 0; got != exp {\n\t\tt.Errorf(\"wrong processable count, got %d, exp %d\", got, exp)\n\t}\n\n\t// Items are now queued for downloading, next step is that we tell the\n\t// queue that a certain peer will deliver them for us\n\t// That should trigger all of them to suddenly become 'done'\n\t{\n\t\t// Reserve blocks\n\t\tpeer := dummyPeer(\"peer-1\")\n\t\tfetchReq, _, _ := q.ReserveBodies(peer, 50)\n\n\t\t// there should be nothing to fetch, blocks are empty\n\t\tif fetchReq != nil {\n\t\t\tt.Fatal(\"there should be no body fetch tasks remaining\")\n\t\t}\n\n\t}\n\tif q.blockTaskQueue.Size() != numOfBlocks-10 {\n\t\tt.Errorf(\"expected block task queue to be %d, got %d\", numOfBlocks-10, q.blockTaskQueue.Size())\n\t}\n\tif q.receiptTaskQueue.Size() != 0 {\n\t\tt.Errorf(\"expected receipt task queue to be %d, got %d\", 0, q.receiptTaskQueue.Size())\n\t}\n\t{\n\t\tpeer := dummyPeer(\"peer-3\")\n\t\tfetchReq, _, _ := q.ReserveReceipts(peer, 50)\n\n\t\t// there should be nothing to fetch, blocks are empty\n\t\tif fetchReq != nil {\n\t\t\tt.Fatal(\"there should be no body fetch tasks remaining\")\n\t\t}\n\t}\n\tif q.blockTaskQueue.Size() != numOfBlocks-10 {\n\t\tt.Errorf(\"expected block task queue to be %d, got %d\", numOfBlocks-10, q.blockTaskQueue.Size())\n\t}\n\tif q.receiptTaskQueue.Size() != 0 {\n\t\tt.Errorf(\"expected receipt task queue to be %d, got %d\", 0, q.receiptTaskQueue.Size())\n\t}\n\tif got, exp := q.resultCache.countCompleted(), 10; got != exp {\n\t\tt.Errorf(\"wrong processable count, got %d, exp %d\", got, exp)\n\t}\n}\n\n// XTestDelivery does some more extensive testing of events that happen,\n// blocks that become known and peers that make reservations and deliveries.\n// disabled since it's not really a unit-test, but can be executed to test\n// some more advanced scenarios\nfunc XTestDelivery(t *testing.T) {\n\t// the outside network, holding blocks\n\tblo, rec := makeChain(128, 0, genesis, false)\n\tworld := newNetwork()\n\tworld.receipts = rec\n\tworld.chain = blo\n\tworld.progress(10)\n\tif false {\n\t\tlog.Root().SetHandler(log.StdoutHandler)\n\n\t}\n\tq := newQueue(10, 10)\n\tvar wg sync.WaitGroup\n\tq.Prepare(1, FastSync)\n\twg.Add(1)\n\tgo func() {\n\t\t// deliver headers\n\t\tdefer wg.Done()\n\t\tc := 1\n\t\tfor {\n\t\t\t//fmt.Printf(\"getting headers from %d\\n\", c)\n\t\t\thdrs := world.headers(c)\n\t\t\tl := len(hdrs)\n\t\t\t//fmt.Printf(\"scheduling %d headers, first %d last %d\\n\",\n\t\t\t//\tl, hdrs[0].Number.Uint64(), hdrs[len(hdrs)-1].Number.Uint64())\n\t\t\tq.Schedule(hdrs, uint64(c))\n\t\t\tc += l\n\t\t}\n\t}()\n\twg.Add(1)\n\tgo func() {\n\t\t// collect results\n\t\tdefer wg.Done()\n\t\ttot := 0\n\t\tfor {\n\t\t\tres := q.Results(true)\n\t\t\ttot += len(res)\n\t\t\tfmt.Printf(\"got %d results, %d tot\\n\", len(res), tot)\n\t\t\t// Now we can forget about these\n\t\t\tworld.forget(res[len(res)-1].Header.Number.Uint64())\n\n\t\t}\n\t}()\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\t// reserve body fetch\n\t\ti := 4\n\t\tfor {\n\t\t\tpeer := dummyPeer(fmt.Sprintf(\"peer-%d\", i))\n\t\t\tf, _, _ := q.ReserveBodies(peer, rand.Intn(30))\n\t\t\tif f != nil {\n\t\t\t\tvar emptyList []*types.Header\n\t\t\t\tvar txs [][]*types.Transaction\n\t\t\t\tvar uncles [][]*types.Header\n\t\t\t\tnumToSkip := rand.Intn(len(f.Headers))\n\t\t\t\tfor _, hdr := range f.Headers[0 : len(f.Headers)-numToSkip] {\n\t\t\t\t\ttxs = append(txs, world.getTransactions(hdr.Number.Uint64()))\n\t\t\t\t\tuncles = append(uncles, emptyList)\n\t\t\t\t}\n\t\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t\t_, err := q.DeliverBodies(peer.id, txs, uncles)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"delivered %d bodies %v\\n\", len(txs), err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ti++\n\t\t\t\ttime.Sleep(200 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t}()\n\tgo func() {\n\t\tdefer wg.Done()\n\t\t// reserve receiptfetch\n\t\tpeer := dummyPeer(\"peer-3\")\n\t\tfor {\n\t\t\tf, _, _ := q.ReserveReceipts(peer, rand.Intn(50))\n\t\t\tif f != nil {\n\t\t\t\tvar rcs [][]*types.Receipt\n\t\t\t\tfor _, hdr := range f.Headers {\n\t\t\t\t\trcs = append(rcs, world.getReceipts(hdr.Number.Uint64()))\n\t\t\t\t}\n\t\t\t\t_, err := q.DeliverReceipts(peer.id, rcs)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"delivered %d receipts %v\\n\", len(rcs), err)\n\t\t\t\t}\n\t\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\t} else {\n\t\t\t\ttime.Sleep(200 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t}()\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < 50; i++ {\n\t\t\ttime.Sleep(300 * time.Millisecond)\n\t\t\t//world.tick()\n\t\t\t//fmt.Printf(\"trying to progress\\n\")\n\t\t\tworld.progress(rand.Intn(100))\n\t\t}\n\t\tfor i := 0; i < 50; i++ {\n\t\t\ttime.Sleep(2990 * time.Millisecond)\n\n\t\t}\n\t}()\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor {\n\t\t\ttime.Sleep(990 * time.Millisecond)\n\t\t\tfmt.Printf(\"world block tip is %d\\n\",\n\t\t\t\tworld.chain[len(world.chain)-1].Header().Number.Uint64())\n\t\t\tfmt.Println(q.Stats())\n\t\t}\n\t}()\n\twg.Wait()\n}\n\nfunc newNetwork() *network {\n\tvar l sync.RWMutex\n\treturn &network{\n\t\tcond:   sync.NewCond(&l),\n\t\toffset: 1, // block 1 is at blocks[0]\n\t}\n}\n\n// represents the network\ntype network struct {\n\toffset   int\n\tchain    []*types.Block\n\treceipts []types.Receipts\n\tlock     sync.RWMutex\n\tcond     *sync.Cond\n}\n\nfunc (n *network) getTransactions(blocknum uint64) types.Transactions {\n\tindex := blocknum - uint64(n.offset)\n\treturn n.chain[index].Transactions()\n}\nfunc (n *network) getReceipts(blocknum uint64) types.Receipts {\n\tindex := blocknum - uint64(n.offset)\n\tif got := n.chain[index].Header().Number.Uint64(); got != blocknum {\n\t\tfmt.Printf(\"Err, got %d exp %d\\n\", got, blocknum)\n\t\tpanic(\"sd\")\n\t}\n\treturn n.receipts[index]\n}\n\nfunc (n *network) forget(blocknum uint64) {\n\tindex := blocknum - uint64(n.offset)\n\tn.chain = n.chain[index:]\n\tn.receipts = n.receipts[index:]\n\tn.offset = int(blocknum)\n\n}\nfunc (n *network) progress(numBlocks int) {\n\n\tn.lock.Lock()\n\tdefer n.lock.Unlock()\n\t//fmt.Printf(\"progressing...\\n\")\n\tnewBlocks, newR := makeChain(numBlocks, 0, n.chain[len(n.chain)-1], false)\n\tn.chain = append(n.chain, newBlocks...)\n\tn.receipts = append(n.receipts, newR...)\n\tn.cond.Broadcast()\n\n}\n\nfunc (n *network) headers(from int) []*types.Header {\n\tnumHeaders := 128\n\tvar hdrs []*types.Header\n\tindex := from - n.offset\n\n\tfor index >= len(n.chain) {\n\t\t// wait for progress\n\t\tn.cond.L.Lock()\n\t\t//fmt.Printf(\"header going into wait\\n\")\n\t\tn.cond.Wait()\n\t\tindex = from - n.offset\n\t\tn.cond.L.Unlock()\n\t}\n\tn.lock.RLock()\n\tdefer n.lock.RUnlock()\n\tfor i, b := range n.chain[index:] {\n\t\thdrs = append(hdrs, b.Header())\n\t\tif i >= numHeaders {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn hdrs\n}\n"
  },
  {
    "path": "eth/downloader/resultstore.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage downloader\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"sync/atomic\"\n\n\t\"github.com/ethereum/go-ethereum/core/types\"\n)\n\n// resultStore implements a structure for maintaining fetchResults, tracking their\n// download-progress and delivering (finished) results.\ntype resultStore struct {\n\titems        []*fetchResult // Downloaded but not yet delivered fetch results\n\tresultOffset uint64         // Offset of the first cached fetch result in the block chain\n\n\t// Internal index of first non-completed entry, updated atomically when needed.\n\t// If all items are complete, this will equal length(items), so\n\t// *important* : is not safe to use for indexing without checking against length\n\tindexIncomplete int32 // atomic access\n\n\t// throttleThreshold is the limit up to which we _want_ to fill the\n\t// results. If blocks are large, we want to limit the results to less\n\t// than the number of available slots, and maybe only fill 1024 out of\n\t// 8192 possible places. The queue will, at certain times, recalibrate\n\t// this index.\n\tthrottleThreshold uint64\n\n\tlock sync.RWMutex\n}\n\nfunc newResultStore(size int) *resultStore {\n\treturn &resultStore{\n\t\tresultOffset:      0,\n\t\titems:             make([]*fetchResult, size),\n\t\tthrottleThreshold: uint64(size),\n\t}\n}\n\n// SetThrottleThreshold updates the throttling threshold based on the requested\n// limit and the total queue capacity. It returns the (possibly capped) threshold\nfunc (r *resultStore) SetThrottleThreshold(threshold uint64) uint64 {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tlimit := uint64(len(r.items))\n\tif threshold >= limit {\n\t\tthreshold = limit\n\t}\n\tr.throttleThreshold = threshold\n\treturn r.throttleThreshold\n}\n\n// AddFetch adds a header for body/receipt fetching. This is used when the queue\n// wants to reserve headers for fetching.\n//\n// It returns the following:\n//   stale     - if true, this item is already passed, and should not be requested again\n//   throttled - if true, the store is at capacity, this particular header is not prio now\n//   item      - the result to store data into\n//   err       - any error that occurred\nfunc (r *resultStore) AddFetch(header *types.Header, fastSync bool) (stale, throttled bool, item *fetchResult, err error) {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tvar index int\n\titem, index, stale, throttled, err = r.getFetchResult(header.Number.Uint64())\n\tif err != nil || stale || throttled {\n\t\treturn stale, throttled, item, err\n\t}\n\tif item == nil {\n\t\titem = newFetchResult(header, fastSync)\n\t\tr.items[index] = item\n\t}\n\treturn stale, throttled, item, err\n}\n\n// GetDeliverySlot returns the fetchResult for the given header. If the 'stale' flag\n// is true, that means the header has already been delivered 'upstream'. This method\n// does not bubble up the 'throttle' flag, since it's moot at the point in time when\n// the item is downloaded and ready for delivery\nfunc (r *resultStore) GetDeliverySlot(headerNumber uint64) (*fetchResult, bool, error) {\n\tr.lock.RLock()\n\tdefer r.lock.RUnlock()\n\n\tres, _, stale, _, err := r.getFetchResult(headerNumber)\n\treturn res, stale, err\n}\n\n// getFetchResult returns the fetchResult corresponding to the given item, and\n// the index where the result is stored.\nfunc (r *resultStore) getFetchResult(headerNumber uint64) (item *fetchResult, index int, stale, throttle bool, err error) {\n\tindex = int(int64(headerNumber) - int64(r.resultOffset))\n\tthrottle = index >= int(r.throttleThreshold)\n\tstale = index < 0\n\n\tif index >= len(r.items) {\n\t\terr = fmt.Errorf(\"%w: index allocation went beyond available resultStore space \"+\n\t\t\t\"(index [%d] = header [%d] - resultOffset [%d], len(resultStore) = %d\", errInvalidChain,\n\t\t\tindex, headerNumber, r.resultOffset, len(r.items))\n\t\treturn nil, index, stale, throttle, err\n\t}\n\tif stale {\n\t\treturn nil, index, stale, throttle, nil\n\t}\n\titem = r.items[index]\n\treturn item, index, stale, throttle, nil\n}\n\n// hasCompletedItems returns true if there are processable items available\n// this method is cheaper than countCompleted\nfunc (r *resultStore) HasCompletedItems() bool {\n\tr.lock.RLock()\n\tdefer r.lock.RUnlock()\n\n\tif len(r.items) == 0 {\n\t\treturn false\n\t}\n\tif item := r.items[0]; item != nil && item.AllDone() {\n\t\treturn true\n\t}\n\treturn false\n}\n\n// countCompleted returns the number of items ready for delivery, stopping at\n// the first non-complete item.\n//\n// The mthod assumes (at least) rlock is held.\nfunc (r *resultStore) countCompleted() int {\n\t// We iterate from the already known complete point, and see\n\t// if any more has completed since last count\n\tindex := atomic.LoadInt32(&r.indexIncomplete)\n\tfor ; ; index++ {\n\t\tif index >= int32(len(r.items)) {\n\t\t\tbreak\n\t\t}\n\t\tresult := r.items[index]\n\t\tif result == nil || !result.AllDone() {\n\t\t\tbreak\n\t\t}\n\t}\n\tatomic.StoreInt32(&r.indexIncomplete, index)\n\treturn int(index)\n}\n\n// GetCompleted returns the next batch of completed fetchResults\nfunc (r *resultStore) GetCompleted(limit int) []*fetchResult {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tcompleted := r.countCompleted()\n\tif limit > completed {\n\t\tlimit = completed\n\t}\n\tresults := make([]*fetchResult, limit)\n\tcopy(results, r.items[:limit])\n\n\t// Delete the results from the cache and clear the tail.\n\tcopy(r.items, r.items[limit:])\n\tfor i := len(r.items) - limit; i < len(r.items); i++ {\n\t\tr.items[i] = nil\n\t}\n\t// Advance the expected block number of the first cache entry\n\tr.resultOffset += uint64(limit)\n\tatomic.AddInt32(&r.indexIncomplete, int32(-limit))\n\n\treturn results\n}\n\n// Prepare initialises the offset with the given block number\nfunc (r *resultStore) Prepare(offset uint64) {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\tif r.resultOffset < offset {\n\t\tr.resultOffset = offset\n\t}\n}\n"
  },
  {
    "path": "eth/downloader/statesync.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage downloader\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n\t\"golang.org/x/crypto/sha3\"\n)\n\n// stateReq represents a batch of state fetch requests grouped together into\n// a single data retrieval network packet.\ntype stateReq struct {\n\tnItems    uint16                    // Number of items requested for download (max is 384, so uint16 is sufficient)\n\ttrieTasks map[common.Hash]*trieTask // Trie node download tasks to track previous attempts\n\tcodeTasks map[common.Hash]*codeTask // Byte code download tasks to track previous attempts\n\ttimeout   time.Duration             // Maximum round trip time for this to complete\n\ttimer     *time.Timer               // Timer to fire when the RTT timeout expires\n\tpeer      *peerConnection           // Peer that we're requesting from\n\tdelivered time.Time                 // Time when the packet was delivered (independent when we process it)\n\tresponse  [][]byte                  // Response data of the peer (nil for timeouts)\n\tdropped   bool                      // Flag whether the peer dropped off early\n}\n\n// timedOut returns if this request timed out.\nfunc (req *stateReq) timedOut() bool {\n\treturn req.response == nil\n}\n\n// stateSyncStats is a collection of progress stats to report during a state trie\n// sync to RPC requests as well as to display in user logs.\ntype stateSyncStats struct {\n\tprocessed  uint64 // Number of state entries processed\n\tduplicate  uint64 // Number of state entries downloaded twice\n\tunexpected uint64 // Number of non-requested state entries received\n\tpending    uint64 // Number of still pending state entries\n}\n\n// syncState starts downloading state with the given root hash.\nfunc (d *Downloader) syncState(root common.Hash) *stateSync {\n\t// Create the state sync\n\ts := newStateSync(d, root)\n\tselect {\n\tcase d.stateSyncStart <- s:\n\t\t// If we tell the statesync to restart with a new root, we also need\n\t\t// to wait for it to actually also start -- when old requests have timed\n\t\t// out or been delivered\n\t\t<-s.started\n\tcase <-d.quitCh:\n\t\ts.err = errCancelStateFetch\n\t\tclose(s.done)\n\t}\n\treturn s\n}\n\n// stateFetcher manages the active state sync and accepts requests\n// on its behalf.\nfunc (d *Downloader) stateFetcher() {\n\tfor {\n\t\tselect {\n\t\tcase s := <-d.stateSyncStart:\n\t\t\tfor next := s; next != nil; {\n\t\t\t\tnext = d.runStateSync(next)\n\t\t\t}\n\t\tcase <-d.stateCh:\n\t\t\t// Ignore state responses while no sync is running.\n\t\tcase <-d.quitCh:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// runStateSync runs a state synchronisation until it completes or another root\n// hash is requested to be switched over to.\nfunc (d *Downloader) runStateSync(s *stateSync) *stateSync {\n\tvar (\n\t\tactive   = make(map[string]*stateReq) // Currently in-flight requests\n\t\tfinished []*stateReq                  // Completed or failed requests\n\t\ttimeout  = make(chan *stateReq)       // Timed out active requests\n\t)\n\tlog.Trace(\"State sync starting\", \"root\", s.root)\n\n\tdefer func() {\n\t\t// Cancel active request timers on exit. Also set peers to idle so they're\n\t\t// available for the next sync.\n\t\tfor _, req := range active {\n\t\t\treq.timer.Stop()\n\t\t\treq.peer.SetNodeDataIdle(int(req.nItems), time.Now())\n\t\t}\n\t}()\n\tgo s.run()\n\tdefer s.Cancel()\n\n\t// Listen for peer departure events to cancel assigned tasks\n\tpeerDrop := make(chan *peerConnection, 1024)\n\tpeerSub := s.d.peers.SubscribePeerDrops(peerDrop)\n\tdefer peerSub.Unsubscribe()\n\n\tfor {\n\t\t// Enable sending of the first buffered element if there is one.\n\t\tvar (\n\t\t\tdeliverReq   *stateReq\n\t\t\tdeliverReqCh chan *stateReq\n\t\t)\n\t\tif len(finished) > 0 {\n\t\t\tdeliverReq = finished[0]\n\t\t\tdeliverReqCh = s.deliver\n\t\t}\n\n\t\tselect {\n\t\t// The stateSync lifecycle:\n\t\tcase next := <-d.stateSyncStart:\n\t\t\td.spindownStateSync(active, finished, timeout, peerDrop)\n\t\t\treturn next\n\n\t\tcase <-s.done:\n\t\t\td.spindownStateSync(active, finished, timeout, peerDrop)\n\t\t\treturn nil\n\n\t\t// Send the next finished request to the current sync:\n\t\tcase deliverReqCh <- deliverReq:\n\t\t\t// Shift out the first request, but also set the emptied slot to nil for GC\n\t\t\tcopy(finished, finished[1:])\n\t\t\tfinished[len(finished)-1] = nil\n\t\t\tfinished = finished[:len(finished)-1]\n\n\t\t// Handle incoming state packs:\n\t\tcase pack := <-d.stateCh:\n\t\t\t// Discard any data not requested (or previously timed out)\n\t\t\treq := active[pack.PeerId()]\n\t\t\tif req == nil {\n\t\t\t\tlog.Debug(\"Unrequested node data\", \"peer\", pack.PeerId(), \"len\", pack.Items())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Finalize the request and queue up for processing\n\t\t\treq.timer.Stop()\n\t\t\treq.response = pack.(*statePack).states\n\t\t\treq.delivered = time.Now()\n\n\t\t\tfinished = append(finished, req)\n\t\t\tdelete(active, pack.PeerId())\n\n\t\t// Handle dropped peer connections:\n\t\tcase p := <-peerDrop:\n\t\t\t// Skip if no request is currently pending\n\t\t\treq := active[p.id]\n\t\t\tif req == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Finalize the request and queue up for processing\n\t\t\treq.timer.Stop()\n\t\t\treq.dropped = true\n\t\t\treq.delivered = time.Now()\n\n\t\t\tfinished = append(finished, req)\n\t\t\tdelete(active, p.id)\n\n\t\t// Handle timed-out requests:\n\t\tcase req := <-timeout:\n\t\t\t// If the peer is already requesting something else, ignore the stale timeout.\n\t\t\t// This can happen when the timeout and the delivery happens simultaneously,\n\t\t\t// causing both pathways to trigger.\n\t\t\tif active[req.peer.id] != req {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treq.delivered = time.Now()\n\t\t\t// Move the timed out data back into the download queue\n\t\t\tfinished = append(finished, req)\n\t\t\tdelete(active, req.peer.id)\n\n\t\t// Track outgoing state requests:\n\t\tcase req := <-d.trackStateReq:\n\t\t\t// If an active request already exists for this peer, we have a problem. In\n\t\t\t// theory the trie node schedule must never assign two requests to the same\n\t\t\t// peer. In practice however, a peer might receive a request, disconnect and\n\t\t\t// immediately reconnect before the previous times out. In this case the first\n\t\t\t// request is never honored, alas we must not silently overwrite it, as that\n\t\t\t// causes valid requests to go missing and sync to get stuck.\n\t\t\tif old := active[req.peer.id]; old != nil {\n\t\t\t\tlog.Warn(\"Busy peer assigned new state fetch\", \"peer\", old.peer.id)\n\t\t\t\t// Move the previous request to the finished set\n\t\t\t\told.timer.Stop()\n\t\t\t\told.dropped = true\n\t\t\t\told.delivered = time.Now()\n\t\t\t\tfinished = append(finished, old)\n\t\t\t}\n\t\t\t// Start a timer to notify the sync loop if the peer stalled.\n\t\t\treq.timer = time.AfterFunc(req.timeout, func() {\n\t\t\t\ttimeout <- req\n\t\t\t})\n\t\t\tactive[req.peer.id] = req\n\t\t}\n\t}\n}\n\n// spindownStateSync 'drains' the outstanding requests; some will be delivered and other\n// will time out. This is to ensure that when the next stateSync starts working, all peers\n// are marked as idle and de facto _are_ idle.\nfunc (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []*stateReq, timeout chan *stateReq, peerDrop chan *peerConnection) {\n\tlog.Trace(\"State sync spinning down\", \"active\", len(active), \"finished\", len(finished))\n\tfor len(active) > 0 {\n\t\tvar (\n\t\t\treq    *stateReq\n\t\t\treason string\n\t\t)\n\t\tselect {\n\t\t// Handle (drop) incoming state packs:\n\t\tcase pack := <-d.stateCh:\n\t\t\treq = active[pack.PeerId()]\n\t\t\treason = \"delivered\"\n\t\t// Handle dropped peer connections:\n\t\tcase p := <-peerDrop:\n\t\t\treq = active[p.id]\n\t\t\treason = \"peerdrop\"\n\t\t// Handle timed-out requests:\n\t\tcase req = <-timeout:\n\t\t\treason = \"timeout\"\n\t\t}\n\t\tif req == nil {\n\t\t\tcontinue\n\t\t}\n\t\treq.peer.log.Trace(\"State peer marked idle (spindown)\", \"req.items\", int(req.nItems), \"reason\", reason)\n\t\treq.timer.Stop()\n\t\tdelete(active, req.peer.id)\n\t\treq.peer.SetNodeDataIdle(int(req.nItems), time.Now())\n\t}\n\t// The 'finished' set contains deliveries that we were going to pass to processing.\n\t// Those are now moot, but we still need to set those peers as idle, which would\n\t// otherwise have been done after processing\n\tfor _, req := range finished {\n\t\treq.peer.SetNodeDataIdle(int(req.nItems), time.Now())\n\t}\n}\n\n// stateSync schedules requests for downloading a particular state trie defined\n// by a given state root.\ntype stateSync struct {\n\td *Downloader // Downloader instance to access and manage current peerset\n\n\troot   common.Hash        // State root currently being synced\n\tsched  *trie.Sync         // State trie sync scheduler defining the tasks\n\tkeccak crypto.KeccakState // Keccak256 hasher to verify deliveries with\n\n\ttrieTasks map[common.Hash]*trieTask // Set of trie node tasks currently queued for retrieval\n\tcodeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval\n\n\tnumUncommitted   int\n\tbytesUncommitted int\n\n\tstarted chan struct{} // Started is signalled once the sync loop starts\n\n\tdeliver    chan *stateReq // Delivery channel multiplexing peer responses\n\tcancel     chan struct{}  // Channel to signal a termination request\n\tcancelOnce sync.Once      // Ensures cancel only ever gets called once\n\tdone       chan struct{}  // Channel to signal termination completion\n\terr        error          // Any error hit during sync (set before completion)\n}\n\n// trieTask represents a single trie node download task, containing a set of\n// peers already attempted retrieval from to detect stalled syncs and abort.\ntype trieTask struct {\n\tpath     [][]byte\n\tattempts map[string]struct{}\n}\n\n// codeTask represents a single byte code download task, containing a set of\n// peers already attempted retrieval from to detect stalled syncs and abort.\ntype codeTask struct {\n\tattempts map[string]struct{}\n}\n\n// newStateSync creates a new state trie download scheduler. This method does not\n// yet start the sync. The user needs to call run to initiate.\nfunc newStateSync(d *Downloader, root common.Hash) *stateSync {\n\treturn &stateSync{\n\t\td:         d,\n\t\troot:      root,\n\t\tsched:     state.NewStateSync(root, d.stateDB, d.stateBloom),\n\t\tkeccak:    sha3.NewLegacyKeccak256().(crypto.KeccakState),\n\t\ttrieTasks: make(map[common.Hash]*trieTask),\n\t\tcodeTasks: make(map[common.Hash]*codeTask),\n\t\tdeliver:   make(chan *stateReq),\n\t\tcancel:    make(chan struct{}),\n\t\tdone:      make(chan struct{}),\n\t\tstarted:   make(chan struct{}),\n\t}\n}\n\n// run starts the task assignment and response processing loop, blocking until\n// it finishes, and finally notifying any goroutines waiting for the loop to\n// finish.\nfunc (s *stateSync) run() {\n\tclose(s.started)\n\tif s.d.snapSync {\n\t\ts.err = s.d.SnapSyncer.Sync(s.root, s.cancel)\n\t} else {\n\t\ts.err = s.loop()\n\t}\n\tclose(s.done)\n}\n\n// Wait blocks until the sync is done or canceled.\nfunc (s *stateSync) Wait() error {\n\t<-s.done\n\treturn s.err\n}\n\n// Cancel cancels the sync and waits until it has shut down.\nfunc (s *stateSync) Cancel() error {\n\ts.cancelOnce.Do(func() {\n\t\tclose(s.cancel)\n\t})\n\treturn s.Wait()\n}\n\n// loop is the main event loop of a state trie sync. It it responsible for the\n// assignment of new tasks to peers (including sending it to them) as well as\n// for the processing of inbound data. Note, that the loop does not directly\n// receive data from peers, rather those are buffered up in the downloader and\n// pushed here async. The reason is to decouple processing from data receipt\n// and timeouts.\nfunc (s *stateSync) loop() (err error) {\n\t// Listen for new peer events to assign tasks to them\n\tnewPeer := make(chan *peerConnection, 1024)\n\tpeerSub := s.d.peers.SubscribeNewPeers(newPeer)\n\tdefer peerSub.Unsubscribe()\n\tdefer func() {\n\t\tcerr := s.commit(true)\n\t\tif err == nil {\n\t\t\terr = cerr\n\t\t}\n\t}()\n\n\t// Keep assigning new tasks until the sync completes or aborts\n\tfor s.sched.Pending() > 0 {\n\t\tif err = s.commit(false); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.assignTasks()\n\t\t// Tasks assigned, wait for something to happen\n\t\tselect {\n\t\tcase <-newPeer:\n\t\t\t// New peer arrived, try to assign it download tasks\n\n\t\tcase <-s.cancel:\n\t\t\treturn errCancelStateFetch\n\n\t\tcase <-s.d.cancelCh:\n\t\t\treturn errCanceled\n\n\t\tcase req := <-s.deliver:\n\t\t\t// Response, disconnect or timeout triggered, drop the peer if stalling\n\t\t\tlog.Trace(\"Received node data response\", \"peer\", req.peer.id, \"count\", len(req.response), \"dropped\", req.dropped, \"timeout\", !req.dropped && req.timedOut())\n\t\t\tif req.nItems <= 2 && !req.dropped && req.timedOut() {\n\t\t\t\t// 2 items are the minimum requested, if even that times out, we've no use of\n\t\t\t\t// this peer at the moment.\n\t\t\t\tlog.Warn(\"Stalling state sync, dropping peer\", \"peer\", req.peer.id)\n\t\t\t\tif s.d.dropPeer == nil {\n\t\t\t\t\t// The dropPeer method is nil when `--copydb` is used for a local copy.\n\t\t\t\t\t// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored\n\t\t\t\t\treq.peer.log.Warn(\"Downloader wants to drop peer, but peerdrop-function is not set\", \"peer\", req.peer.id)\n\t\t\t\t} else {\n\t\t\t\t\ts.d.dropPeer(req.peer.id)\n\n\t\t\t\t\t// If this peer was the master peer, abort sync immediately\n\t\t\t\t\ts.d.cancelLock.RLock()\n\t\t\t\t\tmaster := req.peer.id == s.d.cancelPeer\n\t\t\t\t\ts.d.cancelLock.RUnlock()\n\n\t\t\t\t\tif master {\n\t\t\t\t\t\ts.d.cancel()\n\t\t\t\t\t\treturn errTimeout\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Process all the received blobs and check for stale delivery\n\t\t\tdelivered, err := s.process(req)\n\t\t\treq.peer.SetNodeDataIdle(delivered, req.delivered)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warn(\"Node data write error\", \"err\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *stateSync) commit(force bool) error {\n\tif !force && s.bytesUncommitted < ethdb.IdealBatchSize {\n\t\treturn nil\n\t}\n\tstart := time.Now()\n\tb := s.d.stateDB.NewBatch()\n\tif err := s.sched.Commit(b); err != nil {\n\t\treturn err\n\t}\n\tif err := b.Write(); err != nil {\n\t\treturn fmt.Errorf(\"DB write error: %v\", err)\n\t}\n\ts.updateStats(s.numUncommitted, 0, 0, time.Since(start))\n\ts.numUncommitted = 0\n\ts.bytesUncommitted = 0\n\treturn nil\n}\n\n// assignTasks attempts to assign new tasks to all idle peers, either from the\n// batch currently being retried, or fetching new data from the trie sync itself.\nfunc (s *stateSync) assignTasks() {\n\t// Iterate over all idle peers and try to assign them state fetches\n\tpeers, _ := s.d.peers.NodeDataIdlePeers()\n\tfor _, p := range peers {\n\t\t// Assign a batch of fetches proportional to the estimated latency/bandwidth\n\t\tcap := p.NodeDataCapacity(s.d.requestRTT())\n\t\treq := &stateReq{peer: p, timeout: s.d.requestTTL()}\n\n\t\tnodes, _, codes := s.fillTasks(cap, req)\n\n\t\t// If the peer was assigned tasks to fetch, send the network request\n\t\tif len(nodes)+len(codes) > 0 {\n\t\t\treq.peer.log.Trace(\"Requesting batch of state data\", \"nodes\", len(nodes), \"codes\", len(codes), \"root\", s.root)\n\t\t\tselect {\n\t\t\tcase s.d.trackStateReq <- req:\n\t\t\t\treq.peer.FetchNodeData(append(nodes, codes...)) // Unified retrieval under eth/6x\n\t\t\tcase <-s.cancel:\n\t\t\tcase <-s.d.cancelCh:\n\t\t\t}\n\t\t}\n\t}\n}\n\n// fillTasks fills the given request object with a maximum of n state download\n// tasks to send to the remote peer.\nfunc (s *stateSync) fillTasks(n int, req *stateReq) (nodes []common.Hash, paths []trie.SyncPath, codes []common.Hash) {\n\t// Refill available tasks from the scheduler.\n\tif fill := n - (len(s.trieTasks) + len(s.codeTasks)); fill > 0 {\n\t\tnodes, paths, codes := s.sched.Missing(fill)\n\t\tfor i, hash := range nodes {\n\t\t\ts.trieTasks[hash] = &trieTask{\n\t\t\t\tpath:     paths[i],\n\t\t\t\tattempts: make(map[string]struct{}),\n\t\t\t}\n\t\t}\n\t\tfor _, hash := range codes {\n\t\t\ts.codeTasks[hash] = &codeTask{\n\t\t\t\tattempts: make(map[string]struct{}),\n\t\t\t}\n\t\t}\n\t}\n\t// Find tasks that haven't been tried with the request's peer. Prefer code\n\t// over trie nodes as those can be written to disk and forgotten about.\n\tnodes = make([]common.Hash, 0, n)\n\tpaths = make([]trie.SyncPath, 0, n)\n\tcodes = make([]common.Hash, 0, n)\n\n\treq.trieTasks = make(map[common.Hash]*trieTask, n)\n\treq.codeTasks = make(map[common.Hash]*codeTask, n)\n\n\tfor hash, t := range s.codeTasks {\n\t\t// Stop when we've gathered enough requests\n\t\tif len(nodes)+len(codes) == n {\n\t\t\tbreak\n\t\t}\n\t\t// Skip any requests we've already tried from this peer\n\t\tif _, ok := t.attempts[req.peer.id]; ok {\n\t\t\tcontinue\n\t\t}\n\t\t// Assign the request to this peer\n\t\tt.attempts[req.peer.id] = struct{}{}\n\t\tcodes = append(codes, hash)\n\t\treq.codeTasks[hash] = t\n\t\tdelete(s.codeTasks, hash)\n\t}\n\tfor hash, t := range s.trieTasks {\n\t\t// Stop when we've gathered enough requests\n\t\tif len(nodes)+len(codes) == n {\n\t\t\tbreak\n\t\t}\n\t\t// Skip any requests we've already tried from this peer\n\t\tif _, ok := t.attempts[req.peer.id]; ok {\n\t\t\tcontinue\n\t\t}\n\t\t// Assign the request to this peer\n\t\tt.attempts[req.peer.id] = struct{}{}\n\n\t\tnodes = append(nodes, hash)\n\t\tpaths = append(paths, t.path)\n\n\t\treq.trieTasks[hash] = t\n\t\tdelete(s.trieTasks, hash)\n\t}\n\treq.nItems = uint16(len(nodes) + len(codes))\n\treturn nodes, paths, codes\n}\n\n// process iterates over a batch of delivered state data, injecting each item\n// into a running state sync, re-queuing any items that were requested but not\n// delivered. Returns whether the peer actually managed to deliver anything of\n// value, and any error that occurred.\nfunc (s *stateSync) process(req *stateReq) (int, error) {\n\t// Collect processing stats and update progress if valid data was received\n\tduplicate, unexpected, successful := 0, 0, 0\n\n\tdefer func(start time.Time) {\n\t\tif duplicate > 0 || unexpected > 0 {\n\t\t\ts.updateStats(0, duplicate, unexpected, time.Since(start))\n\t\t}\n\t}(time.Now())\n\n\t// Iterate over all the delivered data and inject one-by-one into the trie\n\tfor _, blob := range req.response {\n\t\thash, err := s.processNodeData(blob)\n\t\tswitch err {\n\t\tcase nil:\n\t\t\ts.numUncommitted++\n\t\t\ts.bytesUncommitted += len(blob)\n\t\t\tsuccessful++\n\t\tcase trie.ErrNotRequested:\n\t\t\tunexpected++\n\t\tcase trie.ErrAlreadyProcessed:\n\t\t\tduplicate++\n\t\tdefault:\n\t\t\treturn successful, fmt.Errorf(\"invalid state node %s: %v\", hash.TerminalString(), err)\n\t\t}\n\t\t// Delete from both queues (one delivery is enough for the syncer)\n\t\tdelete(req.trieTasks, hash)\n\t\tdelete(req.codeTasks, hash)\n\t}\n\t// Put unfulfilled tasks back into the retry queue\n\tnpeers := s.d.peers.Len()\n\tfor hash, task := range req.trieTasks {\n\t\t// If the node did deliver something, missing items may be due to a protocol\n\t\t// limit or a previous timeout + delayed delivery. Both cases should permit\n\t\t// the node to retry the missing items (to avoid single-peer stalls).\n\t\tif len(req.response) > 0 || req.timedOut() {\n\t\t\tdelete(task.attempts, req.peer.id)\n\t\t}\n\t\t// If we've requested the node too many times already, it may be a malicious\n\t\t// sync where nobody has the right data. Abort.\n\t\tif len(task.attempts) >= npeers {\n\t\t\treturn successful, fmt.Errorf(\"trie node %s failed with all peers (%d tries, %d peers)\", hash.TerminalString(), len(task.attempts), npeers)\n\t\t}\n\t\t// Missing item, place into the retry queue.\n\t\ts.trieTasks[hash] = task\n\t}\n\tfor hash, task := range req.codeTasks {\n\t\t// If the node did deliver something, missing items may be due to a protocol\n\t\t// limit or a previous timeout + delayed delivery. Both cases should permit\n\t\t// the node to retry the missing items (to avoid single-peer stalls).\n\t\tif len(req.response) > 0 || req.timedOut() {\n\t\t\tdelete(task.attempts, req.peer.id)\n\t\t}\n\t\t// If we've requested the node too many times already, it may be a malicious\n\t\t// sync where nobody has the right data. Abort.\n\t\tif len(task.attempts) >= npeers {\n\t\t\treturn successful, fmt.Errorf(\"byte code %s failed with all peers (%d tries, %d peers)\", hash.TerminalString(), len(task.attempts), npeers)\n\t\t}\n\t\t// Missing item, place into the retry queue.\n\t\ts.codeTasks[hash] = task\n\t}\n\treturn successful, nil\n}\n\n// processNodeData tries to inject a trie node data blob delivered from a remote\n// peer into the state trie, returning whether anything useful was written or any\n// error occurred.\nfunc (s *stateSync) processNodeData(blob []byte) (common.Hash, error) {\n\tres := trie.SyncResult{Data: blob}\n\ts.keccak.Reset()\n\ts.keccak.Write(blob)\n\ts.keccak.Read(res.Hash[:])\n\terr := s.sched.Process(res)\n\treturn res.Hash, err\n}\n\n// updateStats bumps the various state sync progress counters and displays a log\n// message for the user to see.\nfunc (s *stateSync) updateStats(written, duplicate, unexpected int, duration time.Duration) {\n\ts.d.syncStatsLock.Lock()\n\tdefer s.d.syncStatsLock.Unlock()\n\n\ts.d.syncStatsState.pending = uint64(s.sched.Pending())\n\ts.d.syncStatsState.processed += uint64(written)\n\ts.d.syncStatsState.duplicate += uint64(duplicate)\n\ts.d.syncStatsState.unexpected += uint64(unexpected)\n\n\tif written > 0 || duplicate > 0 || unexpected > 0 {\n\t\tlog.Info(\"Imported new state entries\", \"count\", written, \"elapsed\", common.PrettyDuration(duration), \"processed\", s.d.syncStatsState.processed, \"pending\", s.d.syncStatsState.pending, \"trieretry\", len(s.trieTasks), \"coderetry\", len(s.codeTasks), \"duplicate\", s.d.syncStatsState.duplicate, \"unexpected\", s.d.syncStatsState.unexpected)\n\t}\n\tif written > 0 {\n\t\trawdb.WriteFastTrieProgress(s.d.stateDB, s.d.syncStatsState.processed)\n\t}\n}\n"
  },
  {
    "path": "eth/downloader/testchain_test.go",
    "content": "// Copyright 2018 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage downloader\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"sync\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/params\"\n)\n\n// Test chain parameters.\nvar (\n\ttestKey, _  = crypto.HexToECDSA(\"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291\")\n\ttestAddress = crypto.PubkeyToAddress(testKey.PublicKey)\n\ttestDB      = rawdb.NewMemoryDatabase()\n\ttestGenesis = core.GenesisBlockForTesting(testDB, testAddress, big.NewInt(1000000000))\n)\n\n// The common prefix of all test chains:\nvar testChainBase = newTestChain(blockCacheMaxItems+200, testGenesis)\n\n// Different forks on top of the base chain:\nvar testChainForkLightA, testChainForkLightB, testChainForkHeavy *testChain\n\nfunc init() {\n\tvar forkLen = int(fullMaxForkAncestry + 50)\n\tvar wg sync.WaitGroup\n\twg.Add(3)\n\tgo func() { testChainForkLightA = testChainBase.makeFork(forkLen, false, 1); wg.Done() }()\n\tgo func() { testChainForkLightB = testChainBase.makeFork(forkLen, false, 2); wg.Done() }()\n\tgo func() { testChainForkHeavy = testChainBase.makeFork(forkLen, true, 3); wg.Done() }()\n\twg.Wait()\n}\n\ntype testChain struct {\n\tgenesis  *types.Block\n\tchain    []common.Hash\n\theaderm  map[common.Hash]*types.Header\n\tblockm   map[common.Hash]*types.Block\n\treceiptm map[common.Hash][]*types.Receipt\n\ttdm      map[common.Hash]*big.Int\n}\n\n// newTestChain creates a blockchain of the given length.\nfunc newTestChain(length int, genesis *types.Block) *testChain {\n\ttc := new(testChain).copy(length)\n\ttc.genesis = genesis\n\ttc.chain = append(tc.chain, genesis.Hash())\n\ttc.headerm[tc.genesis.Hash()] = tc.genesis.Header()\n\ttc.tdm[tc.genesis.Hash()] = tc.genesis.Difficulty()\n\ttc.blockm[tc.genesis.Hash()] = tc.genesis\n\ttc.generate(length-1, 0, genesis, false)\n\treturn tc\n}\n\n// makeFork creates a fork on top of the test chain.\nfunc (tc *testChain) makeFork(length int, heavy bool, seed byte) *testChain {\n\tfork := tc.copy(tc.len() + length)\n\tfork.generate(length, seed, tc.headBlock(), heavy)\n\treturn fork\n}\n\n// shorten creates a copy of the chain with the given length. It panics if the\n// length is longer than the number of available blocks.\nfunc (tc *testChain) shorten(length int) *testChain {\n\tif length > tc.len() {\n\t\tpanic(fmt.Errorf(\"can't shorten test chain to %d blocks, it's only %d blocks long\", length, tc.len()))\n\t}\n\treturn tc.copy(length)\n}\n\nfunc (tc *testChain) copy(newlen int) *testChain {\n\tcpy := &testChain{\n\t\tgenesis:  tc.genesis,\n\t\theaderm:  make(map[common.Hash]*types.Header, newlen),\n\t\tblockm:   make(map[common.Hash]*types.Block, newlen),\n\t\treceiptm: make(map[common.Hash][]*types.Receipt, newlen),\n\t\ttdm:      make(map[common.Hash]*big.Int, newlen),\n\t}\n\tfor i := 0; i < len(tc.chain) && i < newlen; i++ {\n\t\thash := tc.chain[i]\n\t\tcpy.chain = append(cpy.chain, tc.chain[i])\n\t\tcpy.tdm[hash] = tc.tdm[hash]\n\t\tcpy.blockm[hash] = tc.blockm[hash]\n\t\tcpy.headerm[hash] = tc.headerm[hash]\n\t\tcpy.receiptm[hash] = tc.receiptm[hash]\n\t}\n\treturn cpy\n}\n\n// generate creates a chain of n blocks starting at and including parent.\n// the returned hash chain is ordered head->parent. In addition, every 22th block\n// contains a transaction and every 5th an uncle to allow testing correct block\n// reassembly.\nfunc (tc *testChain) generate(n int, seed byte, parent *types.Block, heavy bool) {\n\t// start := time.Now()\n\t// defer func() { fmt.Printf(\"test chain generated in %v\\n\", time.Since(start)) }()\n\n\tblocks, receipts := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testDB, n, func(i int, block *core.BlockGen) {\n\t\tblock.SetCoinbase(common.Address{seed})\n\t\t// If a heavy chain is requested, delay blocks to raise difficulty\n\t\tif heavy {\n\t\t\tblock.OffsetTime(-1)\n\t\t}\n\t\t// Include transactions to the miner to make blocks more interesting.\n\t\tif parent == tc.genesis && i%22 == 0 {\n\t\t\tsigner := types.MakeSigner(params.TestChainConfig, block.Number())\n\t\t\ttx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tblock.AddTx(tx)\n\t\t}\n\t\t// if the block number is a multiple of 5, add a bonus uncle to the block\n\t\tif i > 0 && i%5 == 0 {\n\t\t\tblock.AddUncle(&types.Header{\n\t\t\t\tParentHash: block.PrevBlock(i - 1).Hash(),\n\t\t\t\tNumber:     big.NewInt(block.Number().Int64() - 1),\n\t\t\t})\n\t\t}\n\t})\n\n\t// Convert the block-chain into a hash-chain and header/block maps\n\ttd := new(big.Int).Set(tc.td(parent.Hash()))\n\tfor i, b := range blocks {\n\t\ttd := td.Add(td, b.Difficulty())\n\t\thash := b.Hash()\n\t\ttc.chain = append(tc.chain, hash)\n\t\ttc.blockm[hash] = b\n\t\ttc.headerm[hash] = b.Header()\n\t\ttc.receiptm[hash] = receipts[i]\n\t\ttc.tdm[hash] = new(big.Int).Set(td)\n\t}\n}\n\n// len returns the total number of blocks in the chain.\nfunc (tc *testChain) len() int {\n\treturn len(tc.chain)\n}\n\n// headBlock returns the head of the chain.\nfunc (tc *testChain) headBlock() *types.Block {\n\treturn tc.blockm[tc.chain[len(tc.chain)-1]]\n}\n\n// td returns the total difficulty of the given block.\nfunc (tc *testChain) td(hash common.Hash) *big.Int {\n\treturn tc.tdm[hash]\n}\n\n// headersByHash returns headers in order from the given hash.\nfunc (tc *testChain) headersByHash(origin common.Hash, amount int, skip int, reverse bool) []*types.Header {\n\tnum, _ := tc.hashToNumber(origin)\n\treturn tc.headersByNumber(num, amount, skip, reverse)\n}\n\n// headersByNumber returns headers from the given number.\nfunc (tc *testChain) headersByNumber(origin uint64, amount int, skip int, reverse bool) []*types.Header {\n\tresult := make([]*types.Header, 0, amount)\n\n\tif !reverse {\n\t\tfor num := origin; num < uint64(len(tc.chain)) && len(result) < amount; num += uint64(skip) + 1 {\n\t\t\tif header, ok := tc.headerm[tc.chain[int(num)]]; ok {\n\t\t\t\tresult = append(result, header)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfor num := int64(origin); num >= 0 && len(result) < amount; num -= int64(skip) + 1 {\n\t\t\tif header, ok := tc.headerm[tc.chain[int(num)]]; ok {\n\t\t\t\tresult = append(result, header)\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n\n// receipts returns the receipts of the given block hashes.\nfunc (tc *testChain) receipts(hashes []common.Hash) [][]*types.Receipt {\n\tresults := make([][]*types.Receipt, 0, len(hashes))\n\tfor _, hash := range hashes {\n\t\tif receipt, ok := tc.receiptm[hash]; ok {\n\t\t\tresults = append(results, receipt)\n\t\t}\n\t}\n\treturn results\n}\n\n// bodies returns the block bodies of the given block hashes.\nfunc (tc *testChain) bodies(hashes []common.Hash) ([][]*types.Transaction, [][]*types.Header) {\n\ttransactions := make([][]*types.Transaction, 0, len(hashes))\n\tuncles := make([][]*types.Header, 0, len(hashes))\n\tfor _, hash := range hashes {\n\t\tif block, ok := tc.blockm[hash]; ok {\n\t\t\ttransactions = append(transactions, block.Transactions())\n\t\t\tuncles = append(uncles, block.Uncles())\n\t\t}\n\t}\n\treturn transactions, uncles\n}\n\nfunc (tc *testChain) hashToNumber(target common.Hash) (uint64, bool) {\n\tfor num, hash := range tc.chain {\n\t\tif hash == target {\n\t\t\treturn uint64(num), true\n\t\t}\n\t}\n\treturn 0, false\n}\n"
  },
  {
    "path": "eth/downloader/types.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage downloader\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/ethereum/go-ethereum/core/types\"\n)\n\n// peerDropFn is a callback type for dropping a peer detected as malicious.\ntype peerDropFn func(id string)\n\n// dataPack is a data message returned by a peer for some query.\ntype dataPack interface {\n\tPeerId() string\n\tItems() int\n\tStats() string\n}\n\n// headerPack is a batch of block headers returned by a peer.\ntype headerPack struct {\n\tpeerID  string\n\theaders []*types.Header\n}\n\nfunc (p *headerPack) PeerId() string { return p.peerID }\nfunc (p *headerPack) Items() int     { return len(p.headers) }\nfunc (p *headerPack) Stats() string  { return fmt.Sprintf(\"%d\", len(p.headers)) }\n\n// bodyPack is a batch of block bodies returned by a peer.\ntype bodyPack struct {\n\tpeerID       string\n\ttransactions [][]*types.Transaction\n\tuncles       [][]*types.Header\n}\n\nfunc (p *bodyPack) PeerId() string { return p.peerID }\nfunc (p *bodyPack) Items() int {\n\tif len(p.transactions) <= len(p.uncles) {\n\t\treturn len(p.transactions)\n\t}\n\treturn len(p.uncles)\n}\nfunc (p *bodyPack) Stats() string { return fmt.Sprintf(\"%d:%d\", len(p.transactions), len(p.uncles)) }\n\n// receiptPack is a batch of receipts returned by a peer.\ntype receiptPack struct {\n\tpeerID   string\n\treceipts [][]*types.Receipt\n}\n\nfunc (p *receiptPack) PeerId() string { return p.peerID }\nfunc (p *receiptPack) Items() int     { return len(p.receipts) }\nfunc (p *receiptPack) Stats() string  { return fmt.Sprintf(\"%d\", len(p.receipts)) }\n\n// statePack is a batch of states returned by a peer.\ntype statePack struct {\n\tpeerID string\n\tstates [][]byte\n}\n\nfunc (p *statePack) PeerId() string { return p.peerID }\nfunc (p *statePack) Items() int     { return len(p.states) }\nfunc (p *statePack) Stats() string  { return fmt.Sprintf(\"%d\", len(p.states)) }\n"
  },
  {
    "path": "eth/ethconfig/config.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Package ethconfig contains the configuration of the ETH and LES protocols.\npackage ethconfig\n\nimport (\n\t\"math/big\"\n\t\"os\"\n\t\"os/user\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus\"\n\t\"github.com/ethereum/go-ethereum/consensus/clique\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/eth/gasprice\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/miner\"\n\t\"github.com/ethereum/go-ethereum/node\"\n\t\"github.com/ethereum/go-ethereum/params\"\n)\n\n// FullNodeGPO contains default gasprice oracle settings for full node.\nvar FullNodeGPO = gasprice.Config{\n\tBlocks:     20,\n\tPercentile: 60,\n\tMaxPrice:   gasprice.DefaultMaxPrice,\n}\n\n// LightClientGPO contains default gasprice oracle settings for light client.\nvar LightClientGPO = gasprice.Config{\n\tBlocks:     2,\n\tPercentile: 60,\n\tMaxPrice:   gasprice.DefaultMaxPrice,\n}\n\n// Defaults contains default settings for use on the Ethereum main net.\nvar Defaults = Config{\n\tSyncMode: downloader.FastSync,\n\tEthash: ethash.Config{\n\t\tCacheDir:         \"ethash\",\n\t\tCachesInMem:      2,\n\t\tCachesOnDisk:     3,\n\t\tCachesLockMmap:   false,\n\t\tDatasetsInMem:    1,\n\t\tDatasetsOnDisk:   2,\n\t\tDatasetsLockMmap: false,\n\t},\n\tNetworkId:               1,\n\tTxLookupLimit:           2350000,\n\tLightPeers:              100,\n\tUltraLightFraction:      75,\n\tDatabaseCache:           512,\n\tTrieCleanCache:          154,\n\tTrieCleanCacheJournal:   \"triecache\",\n\tTrieCleanCacheRejournal: 60 * time.Minute,\n\tTrieDirtyCache:          256,\n\tTrieTimeout:             60 * time.Minute,\n\tSnapshotCache:           102,\n\tMiner: miner.Config{\n\t\tGasFloor: 8000000,\n\t\tGasCeil:  8000000,\n\t\tGasPrice: big.NewInt(params.GWei),\n\t\tRecommit: 3 * time.Second,\n\t},\n\tTxPool:      core.DefaultTxPoolConfig,\n\tRPCGasCap:   25000000,\n\tGPO:         FullNodeGPO,\n\tRPCTxFeeCap: 1, // 1 ether\n}\n\nfunc init() {\n\thome := os.Getenv(\"HOME\")\n\tif home == \"\" {\n\t\tif user, err := user.Current(); err == nil {\n\t\t\thome = user.HomeDir\n\t\t}\n\t}\n\tif runtime.GOOS == \"darwin\" {\n\t\tDefaults.Ethash.DatasetDir = filepath.Join(home, \"Library\", \"Ethash\")\n\t} else if runtime.GOOS == \"windows\" {\n\t\tlocalappdata := os.Getenv(\"LOCALAPPDATA\")\n\t\tif localappdata != \"\" {\n\t\t\tDefaults.Ethash.DatasetDir = filepath.Join(localappdata, \"Ethash\")\n\t\t} else {\n\t\t\tDefaults.Ethash.DatasetDir = filepath.Join(home, \"AppData\", \"Local\", \"Ethash\")\n\t\t}\n\t} else {\n\t\tDefaults.Ethash.DatasetDir = filepath.Join(home, \".ethash\")\n\t}\n}\n\n//go:generate gencodec -type Config -formats toml -out gen_config.go\n\n// Config contains configuration options for of the ETH and LES protocols.\ntype Config struct {\n\t// The genesis block, which is inserted if the database is empty.\n\t// If nil, the Ethereum main net block is used.\n\tGenesis *core.Genesis `toml:\",omitempty\"`\n\n\t// Protocol options\n\tNetworkId uint64 // Network ID to use for selecting peers to connect to\n\tSyncMode  downloader.SyncMode\n\n\t// This can be set to list of enrtree:// URLs which will be queried for\n\t// for nodes to connect to.\n\tEthDiscoveryURLs  []string\n\tSnapDiscoveryURLs []string\n\n\tNoPruning  bool // Whether to disable pruning and flush everything to disk\n\tNoPrefetch bool // Whether to disable prefetching and only load state on demand\n\n\tTxLookupLimit uint64 `toml:\",omitempty\"` // The maximum number of blocks from head whose tx indices are reserved.\n\n\t// Whitelist of required block number -> hash values to accept\n\tWhitelist map[uint64]common.Hash `toml:\"-\"`\n\n\t// Light client options\n\tLightServ          int  `toml:\",omitempty\"` // Maximum percentage of time allowed for serving LES requests\n\tLightIngress       int  `toml:\",omitempty\"` // Incoming bandwidth limit for light servers\n\tLightEgress        int  `toml:\",omitempty\"` // Outgoing bandwidth limit for light servers\n\tLightPeers         int  `toml:\",omitempty\"` // Maximum number of LES client peers\n\tLightNoPrune       bool `toml:\",omitempty\"` // Whether to disable light chain pruning\n\tLightNoSyncServe   bool `toml:\",omitempty\"` // Whether to serve light clients before syncing\n\tSyncFromCheckpoint bool `toml:\",omitempty\"` // Whether to sync the header chain from the configured checkpoint\n\n\t// Ultra Light client options\n\tUltraLightServers      []string `toml:\",omitempty\"` // List of trusted ultra light servers\n\tUltraLightFraction     int      `toml:\",omitempty\"` // Percentage of trusted servers to accept an announcement\n\tUltraLightOnlyAnnounce bool     `toml:\",omitempty\"` // Whether to only announce headers, or also serve them\n\n\t// Database options\n\tSkipBcVersionCheck bool `toml:\"-\"`\n\tDatabaseHandles    int  `toml:\"-\"`\n\tDatabaseCache      int\n\tDatabaseFreezer    string\n\n\tTrieCleanCache          int\n\tTrieCleanCacheJournal   string        `toml:\",omitempty\"` // Disk journal directory for trie cache to survive node restarts\n\tTrieCleanCacheRejournal time.Duration `toml:\",omitempty\"` // Time interval to regenerate the journal for clean cache\n\tTrieDirtyCache          int\n\tTrieTimeout             time.Duration\n\tSnapshotCache           int\n\tPreimages               bool\n\n\t// Mining options\n\tMiner miner.Config\n\n\t// Ethash options\n\tEthash ethash.Config\n\n\t// Transaction pool options\n\tTxPool core.TxPoolConfig\n\n\t// Gas Price Oracle options\n\tGPO gasprice.Config\n\n\t// Enables tracking of SHA3 preimages in the VM\n\tEnablePreimageRecording bool\n\n\t// Miscellaneous options\n\tDocRoot string `toml:\"-\"`\n\n\t// Type of the EWASM interpreter (\"\" for default)\n\tEWASMInterpreter string\n\n\t// Type of the EVM interpreter (\"\" for default)\n\tEVMInterpreter string\n\n\t// RPCGasCap is the global gas cap for eth-call variants.\n\tRPCGasCap uint64\n\n\t// RPCTxFeeCap is the global transaction fee(price * gaslimit) cap for\n\t// send-transction variants. The unit is ether.\n\tRPCTxFeeCap float64\n\n\t// Checkpoint is a hardcoded checkpoint which can be nil.\n\tCheckpoint *params.TrustedCheckpoint `toml:\",omitempty\"`\n\n\t// CheckpointOracle is the configuration for checkpoint oracle.\n\tCheckpointOracle *params.CheckpointOracleConfig `toml:\",omitempty\"`\n\n\t// Berlin block override (TODO: remove after the fork)\n\tOverrideBerlin *big.Int `toml:\",omitempty\"`\n}\n\n// CreateConsensusEngine creates a consensus engine for the given chain configuration.\nfunc CreateConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, config *ethash.Config, notify []string, noverify bool, db ethdb.Database) consensus.Engine {\n\t// If proof-of-authority is requested, set it up\n\tif chainConfig.Clique != nil {\n\t\treturn clique.New(chainConfig.Clique, db)\n\t}\n\t// Otherwise assume proof-of-work\n\tswitch config.PowMode {\n\tcase ethash.ModeFake:\n\t\tlog.Warn(\"Ethash used in fake mode\")\n\tcase ethash.ModeTest:\n\t\tlog.Warn(\"Ethash used in test mode\")\n\tcase ethash.ModeShared:\n\t\tlog.Warn(\"Ethash used in shared mode\")\n\t}\n\tengine := ethash.New(ethash.Config{\n\t\tPowMode:          config.PowMode,\n\t\tCacheDir:         stack.ResolvePath(config.CacheDir),\n\t\tCachesInMem:      config.CachesInMem,\n\t\tCachesOnDisk:     config.CachesOnDisk,\n\t\tCachesLockMmap:   config.CachesLockMmap,\n\t\tDatasetDir:       config.DatasetDir,\n\t\tDatasetsInMem:    config.DatasetsInMem,\n\t\tDatasetsOnDisk:   config.DatasetsOnDisk,\n\t\tDatasetsLockMmap: config.DatasetsLockMmap,\n\t\tNotifyFull:       config.NotifyFull,\n\t}, notify, noverify)\n\tengine.SetThreads(-1) // Disable CPU mining\n\treturn engine\n}\n"
  },
  {
    "path": "eth/ethconfig/gen_config.go",
    "content": "// Code generated by github.com/fjl/gencodec. DO NOT EDIT.\n\npackage ethconfig\n\nimport (\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/eth/gasprice\"\n\t\"github.com/ethereum/go-ethereum/miner\"\n\t\"github.com/ethereum/go-ethereum/params\"\n)\n\n// MarshalTOML marshals as TOML.\nfunc (c Config) MarshalTOML() (interface{}, error) {\n\ttype Config struct {\n\t\tGenesis                 *core.Genesis `toml:\",omitempty\"`\n\t\tNetworkId               uint64\n\t\tSyncMode                downloader.SyncMode\n\t\tEthDiscoveryURLs        []string\n\t\tSnapDiscoveryURLs       []string\n\t\tNoPruning               bool\n\t\tNoPrefetch              bool\n\t\tTxLookupLimit           uint64                 `toml:\",omitempty\"`\n\t\tWhitelist               map[uint64]common.Hash `toml:\"-\"`\n\t\tLightServ               int                    `toml:\",omitempty\"`\n\t\tLightIngress            int                    `toml:\",omitempty\"`\n\t\tLightEgress             int                    `toml:\",omitempty\"`\n\t\tLightPeers              int                    `toml:\",omitempty\"`\n\t\tLightNoPrune            bool                   `toml:\",omitempty\"`\n\t\tLightNoSyncServe        bool                   `toml:\",omitempty\"`\n\t\tSyncFromCheckpoint      bool                   `toml:\",omitempty\"`\n\t\tUltraLightServers       []string               `toml:\",omitempty\"`\n\t\tUltraLightFraction      int                    `toml:\",omitempty\"`\n\t\tUltraLightOnlyAnnounce  bool                   `toml:\",omitempty\"`\n\t\tSkipBcVersionCheck      bool                   `toml:\"-\"`\n\t\tDatabaseHandles         int                    `toml:\"-\"`\n\t\tDatabaseCache           int\n\t\tDatabaseFreezer         string\n\t\tTrieCleanCache          int\n\t\tTrieCleanCacheJournal   string        `toml:\",omitempty\"`\n\t\tTrieCleanCacheRejournal time.Duration `toml:\",omitempty\"`\n\t\tTrieDirtyCache          int\n\t\tTrieTimeout             time.Duration\n\t\tSnapshotCache           int\n\t\tPreimages               bool\n\t\tMiner                   miner.Config\n\t\tEthash                  ethash.Config\n\t\tTxPool                  core.TxPoolConfig\n\t\tGPO                     gasprice.Config\n\t\tEnablePreimageRecording bool\n\t\tDocRoot                 string `toml:\"-\"`\n\t\tEWASMInterpreter        string\n\t\tEVMInterpreter          string\n\t\tRPCGasCap               uint64                         `toml:\",omitempty\"`\n\t\tRPCTxFeeCap             float64                        `toml:\",omitempty\"`\n\t\tCheckpoint              *params.TrustedCheckpoint      `toml:\",omitempty\"`\n\t\tCheckpointOracle        *params.CheckpointOracleConfig `toml:\",omitempty\"`\n\t}\n\tvar enc Config\n\tenc.Genesis = c.Genesis\n\tenc.NetworkId = c.NetworkId\n\tenc.SyncMode = c.SyncMode\n\tenc.EthDiscoveryURLs = c.EthDiscoveryURLs\n\tenc.SnapDiscoveryURLs = c.SnapDiscoveryURLs\n\tenc.NoPruning = c.NoPruning\n\tenc.NoPrefetch = c.NoPrefetch\n\tenc.TxLookupLimit = c.TxLookupLimit\n\tenc.Whitelist = c.Whitelist\n\tenc.LightServ = c.LightServ\n\tenc.LightIngress = c.LightIngress\n\tenc.LightEgress = c.LightEgress\n\tenc.LightPeers = c.LightPeers\n\tenc.LightNoPrune = c.LightNoPrune\n\tenc.LightNoSyncServe = c.LightNoSyncServe\n\tenc.SyncFromCheckpoint = c.SyncFromCheckpoint\n\tenc.UltraLightServers = c.UltraLightServers\n\tenc.UltraLightFraction = c.UltraLightFraction\n\tenc.UltraLightOnlyAnnounce = c.UltraLightOnlyAnnounce\n\tenc.SkipBcVersionCheck = c.SkipBcVersionCheck\n\tenc.DatabaseHandles = c.DatabaseHandles\n\tenc.DatabaseCache = c.DatabaseCache\n\tenc.DatabaseFreezer = c.DatabaseFreezer\n\tenc.TrieCleanCache = c.TrieCleanCache\n\tenc.TrieCleanCacheJournal = c.TrieCleanCacheJournal\n\tenc.TrieCleanCacheRejournal = c.TrieCleanCacheRejournal\n\tenc.TrieDirtyCache = c.TrieDirtyCache\n\tenc.TrieTimeout = c.TrieTimeout\n\tenc.SnapshotCache = c.SnapshotCache\n\tenc.Preimages = c.Preimages\n\tenc.Miner = c.Miner\n\tenc.Ethash = c.Ethash\n\tenc.TxPool = c.TxPool\n\tenc.GPO = c.GPO\n\tenc.EnablePreimageRecording = c.EnablePreimageRecording\n\tenc.DocRoot = c.DocRoot\n\tenc.EWASMInterpreter = c.EWASMInterpreter\n\tenc.EVMInterpreter = c.EVMInterpreter\n\tenc.RPCGasCap = c.RPCGasCap\n\tenc.RPCTxFeeCap = c.RPCTxFeeCap\n\tenc.Checkpoint = c.Checkpoint\n\tenc.CheckpointOracle = c.CheckpointOracle\n\treturn &enc, nil\n}\n\n// UnmarshalTOML unmarshals from TOML.\nfunc (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {\n\ttype Config struct {\n\t\tGenesis                 *core.Genesis `toml:\",omitempty\"`\n\t\tNetworkId               *uint64\n\t\tSyncMode                *downloader.SyncMode\n\t\tEthDiscoveryURLs        []string\n\t\tSnapDiscoveryURLs       []string\n\t\tNoPruning               *bool\n\t\tNoPrefetch              *bool\n\t\tTxLookupLimit           *uint64                `toml:\",omitempty\"`\n\t\tWhitelist               map[uint64]common.Hash `toml:\"-\"`\n\t\tLightServ               *int                   `toml:\",omitempty\"`\n\t\tLightIngress            *int                   `toml:\",omitempty\"`\n\t\tLightEgress             *int                   `toml:\",omitempty\"`\n\t\tLightPeers              *int                   `toml:\",omitempty\"`\n\t\tLightNoPrune            *bool                  `toml:\",omitempty\"`\n\t\tLightNoSyncServe        *bool                  `toml:\",omitempty\"`\n\t\tSyncFromCheckpoint      *bool                  `toml:\",omitempty\"`\n\t\tUltraLightServers       []string               `toml:\",omitempty\"`\n\t\tUltraLightFraction      *int                   `toml:\",omitempty\"`\n\t\tUltraLightOnlyAnnounce  *bool                  `toml:\",omitempty\"`\n\t\tSkipBcVersionCheck      *bool                  `toml:\"-\"`\n\t\tDatabaseHandles         *int                   `toml:\"-\"`\n\t\tDatabaseCache           *int\n\t\tDatabaseFreezer         *string\n\t\tTrieCleanCache          *int\n\t\tTrieCleanCacheJournal   *string        `toml:\",omitempty\"`\n\t\tTrieCleanCacheRejournal *time.Duration `toml:\",omitempty\"`\n\t\tTrieDirtyCache          *int\n\t\tTrieTimeout             *time.Duration\n\t\tSnapshotCache           *int\n\t\tPreimages               *bool\n\t\tMiner                   *miner.Config\n\t\tEthash                  *ethash.Config\n\t\tTxPool                  *core.TxPoolConfig\n\t\tGPO                     *gasprice.Config\n\t\tEnablePreimageRecording *bool\n\t\tDocRoot                 *string `toml:\"-\"`\n\t\tEWASMInterpreter        *string\n\t\tEVMInterpreter          *string\n\t\tRPCGasCap               *uint64                        `toml:\",omitempty\"`\n\t\tRPCTxFeeCap             *float64                       `toml:\",omitempty\"`\n\t\tCheckpoint              *params.TrustedCheckpoint      `toml:\",omitempty\"`\n\t\tCheckpointOracle        *params.CheckpointOracleConfig `toml:\",omitempty\"`\n\t}\n\tvar dec Config\n\tif err := unmarshal(&dec); err != nil {\n\t\treturn err\n\t}\n\tif dec.Genesis != nil {\n\t\tc.Genesis = dec.Genesis\n\t}\n\tif dec.NetworkId != nil {\n\t\tc.NetworkId = *dec.NetworkId\n\t}\n\tif dec.SyncMode != nil {\n\t\tc.SyncMode = *dec.SyncMode\n\t}\n\tif dec.EthDiscoveryURLs != nil {\n\t\tc.EthDiscoveryURLs = dec.EthDiscoveryURLs\n\t}\n\tif dec.SnapDiscoveryURLs != nil {\n\t\tc.SnapDiscoveryURLs = dec.SnapDiscoveryURLs\n\t}\n\tif dec.NoPruning != nil {\n\t\tc.NoPruning = *dec.NoPruning\n\t}\n\tif dec.NoPrefetch != nil {\n\t\tc.NoPrefetch = *dec.NoPrefetch\n\t}\n\tif dec.TxLookupLimit != nil {\n\t\tc.TxLookupLimit = *dec.TxLookupLimit\n\t}\n\tif dec.Whitelist != nil {\n\t\tc.Whitelist = dec.Whitelist\n\t}\n\tif dec.LightServ != nil {\n\t\tc.LightServ = *dec.LightServ\n\t}\n\tif dec.LightIngress != nil {\n\t\tc.LightIngress = *dec.LightIngress\n\t}\n\tif dec.LightEgress != nil {\n\t\tc.LightEgress = *dec.LightEgress\n\t}\n\tif dec.LightPeers != nil {\n\t\tc.LightPeers = *dec.LightPeers\n\t}\n\tif dec.LightNoPrune != nil {\n\t\tc.LightNoPrune = *dec.LightNoPrune\n\t}\n\tif dec.LightNoSyncServe != nil {\n\t\tc.LightNoSyncServe = *dec.LightNoSyncServe\n\t}\n\tif dec.SyncFromCheckpoint != nil {\n\t\tc.SyncFromCheckpoint = *dec.SyncFromCheckpoint\n\t}\n\tif dec.UltraLightServers != nil {\n\t\tc.UltraLightServers = dec.UltraLightServers\n\t}\n\tif dec.UltraLightFraction != nil {\n\t\tc.UltraLightFraction = *dec.UltraLightFraction\n\t}\n\tif dec.UltraLightOnlyAnnounce != nil {\n\t\tc.UltraLightOnlyAnnounce = *dec.UltraLightOnlyAnnounce\n\t}\n\tif dec.SkipBcVersionCheck != nil {\n\t\tc.SkipBcVersionCheck = *dec.SkipBcVersionCheck\n\t}\n\tif dec.DatabaseHandles != nil {\n\t\tc.DatabaseHandles = *dec.DatabaseHandles\n\t}\n\tif dec.DatabaseCache != nil {\n\t\tc.DatabaseCache = *dec.DatabaseCache\n\t}\n\tif dec.DatabaseFreezer != nil {\n\t\tc.DatabaseFreezer = *dec.DatabaseFreezer\n\t}\n\tif dec.TrieCleanCache != nil {\n\t\tc.TrieCleanCache = *dec.TrieCleanCache\n\t}\n\tif dec.TrieCleanCacheJournal != nil {\n\t\tc.TrieCleanCacheJournal = *dec.TrieCleanCacheJournal\n\t}\n\tif dec.TrieCleanCacheRejournal != nil {\n\t\tc.TrieCleanCacheRejournal = *dec.TrieCleanCacheRejournal\n\t}\n\tif dec.TrieDirtyCache != nil {\n\t\tc.TrieDirtyCache = *dec.TrieDirtyCache\n\t}\n\tif dec.TrieTimeout != nil {\n\t\tc.TrieTimeout = *dec.TrieTimeout\n\t}\n\tif dec.SnapshotCache != nil {\n\t\tc.SnapshotCache = *dec.SnapshotCache\n\t}\n\tif dec.Preimages != nil {\n\t\tc.Preimages = *dec.Preimages\n\t}\n\tif dec.Miner != nil {\n\t\tc.Miner = *dec.Miner\n\t}\n\tif dec.Ethash != nil {\n\t\tc.Ethash = *dec.Ethash\n\t}\n\tif dec.TxPool != nil {\n\t\tc.TxPool = *dec.TxPool\n\t}\n\tif dec.GPO != nil {\n\t\tc.GPO = *dec.GPO\n\t}\n\tif dec.EnablePreimageRecording != nil {\n\t\tc.EnablePreimageRecording = *dec.EnablePreimageRecording\n\t}\n\tif dec.DocRoot != nil {\n\t\tc.DocRoot = *dec.DocRoot\n\t}\n\tif dec.EWASMInterpreter != nil {\n\t\tc.EWASMInterpreter = *dec.EWASMInterpreter\n\t}\n\tif dec.EVMInterpreter != nil {\n\t\tc.EVMInterpreter = *dec.EVMInterpreter\n\t}\n\tif dec.RPCGasCap != nil {\n\t\tc.RPCGasCap = *dec.RPCGasCap\n\t}\n\tif dec.RPCTxFeeCap != nil {\n\t\tc.RPCTxFeeCap = *dec.RPCTxFeeCap\n\t}\n\tif dec.Checkpoint != nil {\n\t\tc.Checkpoint = dec.Checkpoint\n\t}\n\tif dec.CheckpointOracle != nil {\n\t\tc.CheckpointOracle = dec.CheckpointOracle\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "eth/fetcher/block_fetcher.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Package fetcher contains the announcement based header, blocks or transaction synchronisation.\npackage fetcher\n\nimport (\n\t\"errors\"\n\t\"math/rand\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/prque\"\n\t\"github.com/ethereum/go-ethereum/consensus\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/metrics\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\nconst (\n\tlightTimeout  = time.Millisecond       // Time allowance before an announced header is explicitly requested\n\tarriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested\n\tgatherSlack   = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches\n\tfetchTimeout  = 5 * time.Second        // Maximum allotted time to return an explicitly requested block/transaction\n)\n\nconst (\n\tmaxUncleDist = 7   // Maximum allowed backward distance from the chain head\n\tmaxQueueDist = 32  // Maximum allowed distance from the chain head to queue\n\thashLimit    = 256 // Maximum number of unique blocks or headers a peer may have announced\n\tblockLimit   = 64  // Maximum number of unique blocks a peer may have delivered\n)\n\nvar (\n\tblockAnnounceInMeter   = metrics.NewRegisteredMeter(\"eth/fetcher/block/announces/in\", nil)\n\tblockAnnounceOutTimer  = metrics.NewRegisteredTimer(\"eth/fetcher/block/announces/out\", nil)\n\tblockAnnounceDropMeter = metrics.NewRegisteredMeter(\"eth/fetcher/block/announces/drop\", nil)\n\tblockAnnounceDOSMeter  = metrics.NewRegisteredMeter(\"eth/fetcher/block/announces/dos\", nil)\n\n\tblockBroadcastInMeter   = metrics.NewRegisteredMeter(\"eth/fetcher/block/broadcasts/in\", nil)\n\tblockBroadcastOutTimer  = metrics.NewRegisteredTimer(\"eth/fetcher/block/broadcasts/out\", nil)\n\tblockBroadcastDropMeter = metrics.NewRegisteredMeter(\"eth/fetcher/block/broadcasts/drop\", nil)\n\tblockBroadcastDOSMeter  = metrics.NewRegisteredMeter(\"eth/fetcher/block/broadcasts/dos\", nil)\n\n\theaderFetchMeter = metrics.NewRegisteredMeter(\"eth/fetcher/block/headers\", nil)\n\tbodyFetchMeter   = metrics.NewRegisteredMeter(\"eth/fetcher/block/bodies\", nil)\n\n\theaderFilterInMeter  = metrics.NewRegisteredMeter(\"eth/fetcher/block/filter/headers/in\", nil)\n\theaderFilterOutMeter = metrics.NewRegisteredMeter(\"eth/fetcher/block/filter/headers/out\", nil)\n\tbodyFilterInMeter    = metrics.NewRegisteredMeter(\"eth/fetcher/block/filter/bodies/in\", nil)\n\tbodyFilterOutMeter   = metrics.NewRegisteredMeter(\"eth/fetcher/block/filter/bodies/out\", nil)\n)\n\nvar errTerminated = errors.New(\"terminated\")\n\n// HeaderRetrievalFn is a callback type for retrieving a header from the local chain.\ntype HeaderRetrievalFn func(common.Hash) *types.Header\n\n// blockRetrievalFn is a callback type for retrieving a block from the local chain.\ntype blockRetrievalFn func(common.Hash) *types.Block\n\n// headerRequesterFn is a callback type for sending a header retrieval request.\ntype headerRequesterFn func(common.Hash) error\n\n// bodyRequesterFn is a callback type for sending a body retrieval request.\ntype bodyRequesterFn func([]common.Hash) error\n\n// headerVerifierFn is a callback type to verify a block's header for fast propagation.\ntype headerVerifierFn func(header *types.Header) error\n\n// blockBroadcasterFn is a callback type for broadcasting a block to connected peers.\ntype blockBroadcasterFn func(block *types.Block, propagate bool)\n\n// chainHeightFn is a callback type to retrieve the current chain height.\ntype chainHeightFn func() uint64\n\n// headersInsertFn is a callback type to insert a batch of headers into the local chain.\ntype headersInsertFn func(headers []*types.Header) (int, error)\n\n// chainInsertFn is a callback type to insert a batch of blocks into the local chain.\ntype chainInsertFn func(types.Blocks) (int, error)\n\n// peerDropFn is a callback type for dropping a peer detected as malicious.\ntype peerDropFn func(id string)\n\n// blockAnnounce is the hash notification of the availability of a new block in the\n// network.\ntype blockAnnounce struct {\n\thash   common.Hash   // Hash of the block being announced\n\tnumber uint64        // Number of the block being announced (0 = unknown | old protocol)\n\theader *types.Header // Header of the block partially reassembled (new protocol)\n\ttime   time.Time     // Timestamp of the announcement\n\n\torigin string // Identifier of the peer originating the notification\n\n\tfetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block\n\tfetchBodies bodyRequesterFn   // Fetcher function to retrieve the body of an announced block\n}\n\n// headerFilterTask represents a batch of headers needing fetcher filtering.\ntype headerFilterTask struct {\n\tpeer    string          // The source peer of block headers\n\theaders []*types.Header // Collection of headers to filter\n\ttime    time.Time       // Arrival time of the headers\n}\n\n// bodyFilterTask represents a batch of block bodies (transactions and uncles)\n// needing fetcher filtering.\ntype bodyFilterTask struct {\n\tpeer         string                 // The source peer of block bodies\n\ttransactions [][]*types.Transaction // Collection of transactions per block bodies\n\tuncles       [][]*types.Header      // Collection of uncles per block bodies\n\ttime         time.Time              // Arrival time of the blocks' contents\n}\n\n// blockOrHeaderInject represents a schedules import operation.\ntype blockOrHeaderInject struct {\n\torigin string\n\n\theader *types.Header // Used for light mode fetcher which only cares about header.\n\tblock  *types.Block  // Used for normal mode fetcher which imports full block.\n}\n\n// number returns the block number of the injected object.\nfunc (inject *blockOrHeaderInject) number() uint64 {\n\tif inject.header != nil {\n\t\treturn inject.header.Number.Uint64()\n\t}\n\treturn inject.block.NumberU64()\n}\n\n// number returns the block hash of the injected object.\nfunc (inject *blockOrHeaderInject) hash() common.Hash {\n\tif inject.header != nil {\n\t\treturn inject.header.Hash()\n\t}\n\treturn inject.block.Hash()\n}\n\n// BlockFetcher is responsible for accumulating block announcements from various peers\n// and scheduling them for retrieval.\ntype BlockFetcher struct {\n\tlight bool // The indicator whether it's a light fetcher or normal one.\n\n\t// Various event channels\n\tnotify chan *blockAnnounce\n\tinject chan *blockOrHeaderInject\n\n\theaderFilter chan chan *headerFilterTask\n\tbodyFilter   chan chan *bodyFilterTask\n\n\tdone chan common.Hash\n\tquit chan struct{}\n\n\t// Announce states\n\tannounces  map[string]int                   // Per peer blockAnnounce counts to prevent memory exhaustion\n\tannounced  map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching\n\tfetching   map[common.Hash]*blockAnnounce   // Announced blocks, currently fetching\n\tfetched    map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval\n\tcompleting map[common.Hash]*blockAnnounce   // Blocks with headers, currently body-completing\n\n\t// Block cache\n\tqueue  *prque.Prque                         // Queue containing the import operations (block number sorted)\n\tqueues map[string]int                       // Per peer block counts to prevent memory exhaustion\n\tqueued map[common.Hash]*blockOrHeaderInject // Set of already queued blocks (to dedup imports)\n\n\t// Callbacks\n\tgetHeader      HeaderRetrievalFn  // Retrieves a header from the local chain\n\tgetBlock       blockRetrievalFn   // Retrieves a block from the local chain\n\tverifyHeader   headerVerifierFn   // Checks if a block's headers have a valid proof of work\n\tbroadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers\n\tchainHeight    chainHeightFn      // Retrieves the current chain's height\n\tinsertHeaders  headersInsertFn    // Injects a batch of headers into the chain\n\tinsertChain    chainInsertFn      // Injects a batch of blocks into the chain\n\tdropPeer       peerDropFn         // Drops a peer for misbehaving\n\n\t// Testing hooks\n\tannounceChangeHook func(common.Hash, bool)           // Method to call upon adding or deleting a hash from the blockAnnounce list\n\tqueueChangeHook    func(common.Hash, bool)           // Method to call upon adding or deleting a block from the import queue\n\tfetchingHook       func([]common.Hash)               // Method to call upon starting a block (eth/61) or header (eth/62) fetch\n\tcompletingHook     func([]common.Hash)               // Method to call upon starting a block body fetch (eth/62)\n\timportedHook       func(*types.Header, *types.Block) // Method to call upon successful header or block import (both eth/61 and eth/62)\n}\n\n// NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.\nfunc NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher {\n\treturn &BlockFetcher{\n\t\tlight:          light,\n\t\tnotify:         make(chan *blockAnnounce),\n\t\tinject:         make(chan *blockOrHeaderInject),\n\t\theaderFilter:   make(chan chan *headerFilterTask),\n\t\tbodyFilter:     make(chan chan *bodyFilterTask),\n\t\tdone:           make(chan common.Hash),\n\t\tquit:           make(chan struct{}),\n\t\tannounces:      make(map[string]int),\n\t\tannounced:      make(map[common.Hash][]*blockAnnounce),\n\t\tfetching:       make(map[common.Hash]*blockAnnounce),\n\t\tfetched:        make(map[common.Hash][]*blockAnnounce),\n\t\tcompleting:     make(map[common.Hash]*blockAnnounce),\n\t\tqueue:          prque.New(nil),\n\t\tqueues:         make(map[string]int),\n\t\tqueued:         make(map[common.Hash]*blockOrHeaderInject),\n\t\tgetHeader:      getHeader,\n\t\tgetBlock:       getBlock,\n\t\tverifyHeader:   verifyHeader,\n\t\tbroadcastBlock: broadcastBlock,\n\t\tchainHeight:    chainHeight,\n\t\tinsertHeaders:  insertHeaders,\n\t\tinsertChain:    insertChain,\n\t\tdropPeer:       dropPeer,\n\t}\n}\n\n// Start boots up the announcement based synchroniser, accepting and processing\n// hash notifications and block fetches until termination requested.\nfunc (f *BlockFetcher) Start() {\n\tgo f.loop()\n}\n\n// Stop terminates the announcement based synchroniser, canceling all pending\n// operations.\nfunc (f *BlockFetcher) Stop() {\n\tclose(f.quit)\n}\n\n// Notify announces the fetcher of the potential availability of a new block in\n// the network.\nfunc (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,\n\theaderFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {\n\tblock := &blockAnnounce{\n\t\thash:        hash,\n\t\tnumber:      number,\n\t\ttime:        time,\n\t\torigin:      peer,\n\t\tfetchHeader: headerFetcher,\n\t\tfetchBodies: bodyFetcher,\n\t}\n\tselect {\n\tcase f.notify <- block:\n\t\treturn nil\n\tcase <-f.quit:\n\t\treturn errTerminated\n\t}\n}\n\n// Enqueue tries to fill gaps the fetcher's future import queue.\nfunc (f *BlockFetcher) Enqueue(peer string, block *types.Block) error {\n\top := &blockOrHeaderInject{\n\t\torigin: peer,\n\t\tblock:  block,\n\t}\n\tselect {\n\tcase f.inject <- op:\n\t\treturn nil\n\tcase <-f.quit:\n\t\treturn errTerminated\n\t}\n}\n\n// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,\n// returning those that should be handled differently.\nfunc (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {\n\tlog.Trace(\"Filtering headers\", \"peer\", peer, \"headers\", len(headers))\n\n\t// Send the filter channel to the fetcher\n\tfilter := make(chan *headerFilterTask)\n\n\tselect {\n\tcase f.headerFilter <- filter:\n\tcase <-f.quit:\n\t\treturn nil\n\t}\n\t// Request the filtering of the header list\n\tselect {\n\tcase filter <- &headerFilterTask{peer: peer, headers: headers, time: time}:\n\tcase <-f.quit:\n\t\treturn nil\n\t}\n\t// Retrieve the headers remaining after filtering\n\tselect {\n\tcase task := <-filter:\n\t\treturn task.headers\n\tcase <-f.quit:\n\t\treturn nil\n\t}\n}\n\n// FilterBodies extracts all the block bodies that were explicitly requested by\n// the fetcher, returning those that should be handled differently.\nfunc (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {\n\tlog.Trace(\"Filtering bodies\", \"peer\", peer, \"txs\", len(transactions), \"uncles\", len(uncles))\n\n\t// Send the filter channel to the fetcher\n\tfilter := make(chan *bodyFilterTask)\n\n\tselect {\n\tcase f.bodyFilter <- filter:\n\tcase <-f.quit:\n\t\treturn nil, nil\n\t}\n\t// Request the filtering of the body list\n\tselect {\n\tcase filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}:\n\tcase <-f.quit:\n\t\treturn nil, nil\n\t}\n\t// Retrieve the bodies remaining after filtering\n\tselect {\n\tcase task := <-filter:\n\t\treturn task.transactions, task.uncles\n\tcase <-f.quit:\n\t\treturn nil, nil\n\t}\n}\n\n// Loop is the main fetcher loop, checking and processing various notification\n// events.\nfunc (f *BlockFetcher) loop() {\n\t// Iterate the block fetching until a quit is requested\n\tfetchTimer := time.NewTimer(0)\n\tcompleteTimer := time.NewTimer(0)\n\tdefer fetchTimer.Stop()\n\tdefer completeTimer.Stop()\n\n\tfor {\n\t\t// Clean up any expired block fetches\n\t\tfor hash, announce := range f.fetching {\n\t\t\tif time.Since(announce.time) > fetchTimeout {\n\t\t\t\tf.forgetHash(hash)\n\t\t\t}\n\t\t}\n\t\t// Import any queued blocks that could potentially fit\n\t\theight := f.chainHeight()\n\t\tfor !f.queue.Empty() {\n\t\t\top := f.queue.PopItem().(*blockOrHeaderInject)\n\t\t\thash := op.hash()\n\t\t\tif f.queueChangeHook != nil {\n\t\t\t\tf.queueChangeHook(hash, false)\n\t\t\t}\n\t\t\t// If too high up the chain or phase, continue later\n\t\t\tnumber := op.number()\n\t\t\tif number > height+1 {\n\t\t\t\tf.queue.Push(op, -int64(number))\n\t\t\t\tif f.queueChangeHook != nil {\n\t\t\t\t\tf.queueChangeHook(hash, true)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// Otherwise if fresh and still unknown, try and import\n\t\t\tif (number+maxUncleDist < height) || (f.light && f.getHeader(hash) != nil) || (!f.light && f.getBlock(hash) != nil) {\n\t\t\t\tf.forgetBlock(hash)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif f.light {\n\t\t\t\tf.importHeaders(op.origin, op.header)\n\t\t\t} else {\n\t\t\t\tf.importBlocks(op.origin, op.block)\n\t\t\t}\n\t\t}\n\t\t// Wait for an outside event to occur\n\t\tselect {\n\t\tcase <-f.quit:\n\t\t\t// BlockFetcher terminating, abort all operations\n\t\t\treturn\n\n\t\tcase notification := <-f.notify:\n\t\t\t// A block was announced, make sure the peer isn't DOSing us\n\t\t\tblockAnnounceInMeter.Mark(1)\n\n\t\t\tcount := f.announces[notification.origin] + 1\n\t\t\tif count > hashLimit {\n\t\t\t\tlog.Debug(\"Peer exceeded outstanding announces\", \"peer\", notification.origin, \"limit\", hashLimit)\n\t\t\t\tblockAnnounceDOSMeter.Mark(1)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// If we have a valid block number, check that it's potentially useful\n\t\t\tif notification.number > 0 {\n\t\t\t\tif dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {\n\t\t\t\t\tlog.Debug(\"Peer discarded announcement\", \"peer\", notification.origin, \"number\", notification.number, \"hash\", notification.hash, \"distance\", dist)\n\t\t\t\t\tblockAnnounceDropMeter.Mark(1)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\t// All is well, schedule the announce if block's not yet downloading\n\t\t\tif _, ok := f.fetching[notification.hash]; ok {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif _, ok := f.completing[notification.hash]; ok {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tf.announces[notification.origin] = count\n\t\t\tf.announced[notification.hash] = append(f.announced[notification.hash], notification)\n\t\t\tif f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 {\n\t\t\t\tf.announceChangeHook(notification.hash, true)\n\t\t\t}\n\t\t\tif len(f.announced) == 1 {\n\t\t\t\tf.rescheduleFetch(fetchTimer)\n\t\t\t}\n\n\t\tcase op := <-f.inject:\n\t\t\t// A direct block insertion was requested, try and fill any pending gaps\n\t\t\tblockBroadcastInMeter.Mark(1)\n\n\t\t\t// Now only direct block injection is allowed, drop the header injection\n\t\t\t// here silently if we receive.\n\t\t\tif f.light {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tf.enqueue(op.origin, nil, op.block)\n\n\t\tcase hash := <-f.done:\n\t\t\t// A pending import finished, remove all traces of the notification\n\t\t\tf.forgetHash(hash)\n\t\t\tf.forgetBlock(hash)\n\n\t\tcase <-fetchTimer.C:\n\t\t\t// At least one block's timer ran out, check for needing retrieval\n\t\t\trequest := make(map[string][]common.Hash)\n\n\t\t\tfor hash, announces := range f.announced {\n\t\t\t\t// In current LES protocol(les2/les3), only header announce is\n\t\t\t\t// available, no need to wait too much time for header broadcast.\n\t\t\t\ttimeout := arriveTimeout - gatherSlack\n\t\t\t\tif f.light {\n\t\t\t\t\ttimeout = 0\n\t\t\t\t}\n\t\t\t\tif time.Since(announces[0].time) > timeout {\n\t\t\t\t\t// Pick a random peer to retrieve from, reset all others\n\t\t\t\t\tannounce := announces[rand.Intn(len(announces))]\n\t\t\t\t\tf.forgetHash(hash)\n\n\t\t\t\t\t// If the block still didn't arrive, queue for fetching\n\t\t\t\t\tif (f.light && f.getHeader(hash) == nil) || (!f.light && f.getBlock(hash) == nil) {\n\t\t\t\t\t\trequest[announce.origin] = append(request[announce.origin], hash)\n\t\t\t\t\t\tf.fetching[hash] = announce\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Send out all block header requests\n\t\t\tfor peer, hashes := range request {\n\t\t\t\tlog.Trace(\"Fetching scheduled headers\", \"peer\", peer, \"list\", hashes)\n\n\t\t\t\t// Create a closure of the fetch and schedule in on a new thread\n\t\t\t\tfetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes\n\t\t\t\tgo func() {\n\t\t\t\t\tif f.fetchingHook != nil {\n\t\t\t\t\t\tf.fetchingHook(hashes)\n\t\t\t\t\t}\n\t\t\t\t\tfor _, hash := range hashes {\n\t\t\t\t\t\theaderFetchMeter.Mark(1)\n\t\t\t\t\t\tfetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\t\t\t// Schedule the next fetch if blocks are still pending\n\t\t\tf.rescheduleFetch(fetchTimer)\n\n\t\tcase <-completeTimer.C:\n\t\t\t// At least one header's timer ran out, retrieve everything\n\t\t\trequest := make(map[string][]common.Hash)\n\n\t\t\tfor hash, announces := range f.fetched {\n\t\t\t\t// Pick a random peer to retrieve from, reset all others\n\t\t\t\tannounce := announces[rand.Intn(len(announces))]\n\t\t\t\tf.forgetHash(hash)\n\n\t\t\t\t// If the block still didn't arrive, queue for completion\n\t\t\t\tif f.getBlock(hash) == nil {\n\t\t\t\t\trequest[announce.origin] = append(request[announce.origin], hash)\n\t\t\t\t\tf.completing[hash] = announce\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Send out all block body requests\n\t\t\tfor peer, hashes := range request {\n\t\t\t\tlog.Trace(\"Fetching scheduled bodies\", \"peer\", peer, \"list\", hashes)\n\n\t\t\t\t// Create a closure of the fetch and schedule in on a new thread\n\t\t\t\tif f.completingHook != nil {\n\t\t\t\t\tf.completingHook(hashes)\n\t\t\t\t}\n\t\t\t\tbodyFetchMeter.Mark(int64(len(hashes)))\n\t\t\t\tgo f.completing[hashes[0]].fetchBodies(hashes)\n\t\t\t}\n\t\t\t// Schedule the next fetch if blocks are still pending\n\t\t\tf.rescheduleComplete(completeTimer)\n\n\t\tcase filter := <-f.headerFilter:\n\t\t\t// Headers arrived from a remote peer. Extract those that were explicitly\n\t\t\t// requested by the fetcher, and return everything else so it's delivered\n\t\t\t// to other parts of the system.\n\t\t\tvar task *headerFilterTask\n\t\t\tselect {\n\t\t\tcase task = <-filter:\n\t\t\tcase <-f.quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t\theaderFilterInMeter.Mark(int64(len(task.headers)))\n\n\t\t\t// Split the batch of headers into unknown ones (to return to the caller),\n\t\t\t// known incomplete ones (requiring body retrievals) and completed blocks.\n\t\t\tunknown, incomplete, complete, lightHeaders := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}, []*blockAnnounce{}\n\t\t\tfor _, header := range task.headers {\n\t\t\t\thash := header.Hash()\n\n\t\t\t\t// Filter fetcher-requested headers from other synchronisation algorithms\n\t\t\t\tif announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {\n\t\t\t\t\t// If the delivered header does not match the promised number, drop the announcer\n\t\t\t\t\tif header.Number.Uint64() != announce.number {\n\t\t\t\t\t\tlog.Trace(\"Invalid block number fetched\", \"peer\", announce.origin, \"hash\", header.Hash(), \"announced\", announce.number, \"provided\", header.Number)\n\t\t\t\t\t\tf.dropPeer(announce.origin)\n\t\t\t\t\t\tf.forgetHash(hash)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\t// Collect all headers only if we are running in light\n\t\t\t\t\t// mode and the headers are not imported by other means.\n\t\t\t\t\tif f.light {\n\t\t\t\t\t\tif f.getHeader(hash) == nil {\n\t\t\t\t\t\t\tannounce.header = header\n\t\t\t\t\t\t\tlightHeaders = append(lightHeaders, announce)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tf.forgetHash(hash)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\t// Only keep if not imported by other means\n\t\t\t\t\tif f.getBlock(hash) == nil {\n\t\t\t\t\t\tannounce.header = header\n\t\t\t\t\t\tannounce.time = task.time\n\n\t\t\t\t\t\t// If the block is empty (header only), short circuit into the final import queue\n\t\t\t\t\t\tif header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash {\n\t\t\t\t\t\t\tlog.Trace(\"Block empty, skipping body retrieval\", \"peer\", announce.origin, \"number\", header.Number, \"hash\", header.Hash())\n\n\t\t\t\t\t\t\tblock := types.NewBlockWithHeader(header)\n\t\t\t\t\t\t\tblock.ReceivedAt = task.time\n\n\t\t\t\t\t\t\tcomplete = append(complete, block)\n\t\t\t\t\t\t\tf.completing[hash] = announce\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// Otherwise add to the list of blocks needing completion\n\t\t\t\t\t\tincomplete = append(incomplete, announce)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlog.Trace(\"Block already imported, discarding header\", \"peer\", announce.origin, \"number\", header.Number, \"hash\", header.Hash())\n\t\t\t\t\t\tf.forgetHash(hash)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t// BlockFetcher doesn't know about it, add to the return list\n\t\t\t\t\tunknown = append(unknown, header)\n\t\t\t\t}\n\t\t\t}\n\t\t\theaderFilterOutMeter.Mark(int64(len(unknown)))\n\t\t\tselect {\n\t\t\tcase filter <- &headerFilterTask{headers: unknown, time: task.time}:\n\t\t\tcase <-f.quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// Schedule the retrieved headers for body completion\n\t\t\tfor _, announce := range incomplete {\n\t\t\t\thash := announce.header.Hash()\n\t\t\t\tif _, ok := f.completing[hash]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tf.fetched[hash] = append(f.fetched[hash], announce)\n\t\t\t\tif len(f.fetched) == 1 {\n\t\t\t\t\tf.rescheduleComplete(completeTimer)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Schedule the header for light fetcher import\n\t\t\tfor _, announce := range lightHeaders {\n\t\t\t\tf.enqueue(announce.origin, announce.header, nil)\n\t\t\t}\n\t\t\t// Schedule the header-only blocks for import\n\t\t\tfor _, block := range complete {\n\t\t\t\tif announce := f.completing[block.Hash()]; announce != nil {\n\t\t\t\t\tf.enqueue(announce.origin, nil, block)\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase filter := <-f.bodyFilter:\n\t\t\t// Block bodies arrived, extract any explicitly requested blocks, return the rest\n\t\t\tvar task *bodyFilterTask\n\t\t\tselect {\n\t\t\tcase task = <-filter:\n\t\t\tcase <-f.quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbodyFilterInMeter.Mark(int64(len(task.transactions)))\n\t\t\tblocks := []*types.Block{}\n\t\t\t// abort early if there's nothing explicitly requested\n\t\t\tif len(f.completing) > 0 {\n\t\t\t\tfor i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {\n\t\t\t\t\t// Match up a body to any possible completion request\n\t\t\t\t\tvar (\n\t\t\t\t\t\tmatched   = false\n\t\t\t\t\t\tuncleHash common.Hash // calculated lazily and reused\n\t\t\t\t\t\ttxnHash   common.Hash // calculated lazily and reused\n\t\t\t\t\t)\n\t\t\t\t\tfor hash, announce := range f.completing {\n\t\t\t\t\t\tif f.queued[hash] != nil || announce.origin != task.peer {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif uncleHash == (common.Hash{}) {\n\t\t\t\t\t\t\tuncleHash = types.CalcUncleHash(task.uncles[i])\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif uncleHash != announce.header.UncleHash {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif txnHash == (common.Hash{}) {\n\t\t\t\t\t\t\ttxnHash = types.DeriveSha(types.Transactions(task.transactions[i]), trie.NewStackTrie(nil))\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif txnHash != announce.header.TxHash {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// Mark the body matched, reassemble if still unknown\n\t\t\t\t\t\tmatched = true\n\t\t\t\t\t\tif f.getBlock(hash) == nil {\n\t\t\t\t\t\t\tblock := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])\n\t\t\t\t\t\t\tblock.ReceivedAt = task.time\n\t\t\t\t\t\t\tblocks = append(blocks, block)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tf.forgetHash(hash)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\t\t\t\t\tif matched {\n\t\t\t\t\t\ttask.transactions = append(task.transactions[:i], task.transactions[i+1:]...)\n\t\t\t\t\t\ttask.uncles = append(task.uncles[:i], task.uncles[i+1:]...)\n\t\t\t\t\t\ti--\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tbodyFilterOutMeter.Mark(int64(len(task.transactions)))\n\t\t\tselect {\n\t\t\tcase filter <- task:\n\t\t\tcase <-f.quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// Schedule the retrieved blocks for ordered import\n\t\t\tfor _, block := range blocks {\n\t\t\t\tif announce := f.completing[block.Hash()]; announce != nil {\n\t\t\t\t\tf.enqueue(announce.origin, nil, block)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n// rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout.\nfunc (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) {\n\t// Short circuit if no blocks are announced\n\tif len(f.announced) == 0 {\n\t\treturn\n\t}\n\t// Schedule announcement retrieval quickly for light mode\n\t// since server won't send any headers to client.\n\tif f.light {\n\t\tfetch.Reset(lightTimeout)\n\t\treturn\n\t}\n\t// Otherwise find the earliest expiring announcement\n\tearliest := time.Now()\n\tfor _, announces := range f.announced {\n\t\tif earliest.After(announces[0].time) {\n\t\t\tearliest = announces[0].time\n\t\t}\n\t}\n\tfetch.Reset(arriveTimeout - time.Since(earliest))\n}\n\n// rescheduleComplete resets the specified completion timer to the next fetch timeout.\nfunc (f *BlockFetcher) rescheduleComplete(complete *time.Timer) {\n\t// Short circuit if no headers are fetched\n\tif len(f.fetched) == 0 {\n\t\treturn\n\t}\n\t// Otherwise find the earliest expiring announcement\n\tearliest := time.Now()\n\tfor _, announces := range f.fetched {\n\t\tif earliest.After(announces[0].time) {\n\t\t\tearliest = announces[0].time\n\t\t}\n\t}\n\tcomplete.Reset(gatherSlack - time.Since(earliest))\n}\n\n// enqueue schedules a new header or block import operation, if the component\n// to be imported has not yet been seen.\nfunc (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.Block) {\n\tvar (\n\t\thash   common.Hash\n\t\tnumber uint64\n\t)\n\tif header != nil {\n\t\thash, number = header.Hash(), header.Number.Uint64()\n\t} else {\n\t\thash, number = block.Hash(), block.NumberU64()\n\t}\n\t// Ensure the peer isn't DOSing us\n\tcount := f.queues[peer] + 1\n\tif count > blockLimit {\n\t\tlog.Debug(\"Discarded delivered header or block, exceeded allowance\", \"peer\", peer, \"number\", number, \"hash\", hash, \"limit\", blockLimit)\n\t\tblockBroadcastDOSMeter.Mark(1)\n\t\tf.forgetHash(hash)\n\t\treturn\n\t}\n\t// Discard any past or too distant blocks\n\tif dist := int64(number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {\n\t\tlog.Debug(\"Discarded delivered header or block, too far away\", \"peer\", peer, \"number\", number, \"hash\", hash, \"distance\", dist)\n\t\tblockBroadcastDropMeter.Mark(1)\n\t\tf.forgetHash(hash)\n\t\treturn\n\t}\n\t// Schedule the block for future importing\n\tif _, ok := f.queued[hash]; !ok {\n\t\top := &blockOrHeaderInject{origin: peer}\n\t\tif header != nil {\n\t\t\top.header = header\n\t\t} else {\n\t\t\top.block = block\n\t\t}\n\t\tf.queues[peer] = count\n\t\tf.queued[hash] = op\n\t\tf.queue.Push(op, -int64(number))\n\t\tif f.queueChangeHook != nil {\n\t\t\tf.queueChangeHook(hash, true)\n\t\t}\n\t\tlog.Debug(\"Queued delivered header or block\", \"peer\", peer, \"number\", number, \"hash\", hash, \"queued\", f.queue.Size())\n\t}\n}\n\n// importHeaders spawns a new goroutine to run a header insertion into the chain.\n// If the header's number is at the same height as the current import phase, it\n// updates the phase states accordingly.\nfunc (f *BlockFetcher) importHeaders(peer string, header *types.Header) {\n\thash := header.Hash()\n\tlog.Debug(\"Importing propagated header\", \"peer\", peer, \"number\", header.Number, \"hash\", hash)\n\n\tgo func() {\n\t\tdefer func() { f.done <- hash }()\n\t\t// If the parent's unknown, abort insertion\n\t\tparent := f.getHeader(header.ParentHash)\n\t\tif parent == nil {\n\t\t\tlog.Debug(\"Unknown parent of propagated header\", \"peer\", peer, \"number\", header.Number, \"hash\", hash, \"parent\", header.ParentHash)\n\t\t\treturn\n\t\t}\n\t\t// Validate the header and if something went wrong, drop the peer\n\t\tif err := f.verifyHeader(header); err != nil && err != consensus.ErrFutureBlock {\n\t\t\tlog.Debug(\"Propagated header verification failed\", \"peer\", peer, \"number\", header.Number, \"hash\", hash, \"err\", err)\n\t\t\tf.dropPeer(peer)\n\t\t\treturn\n\t\t}\n\t\t// Run the actual import and log any issues\n\t\tif _, err := f.insertHeaders([]*types.Header{header}); err != nil {\n\t\t\tlog.Debug(\"Propagated header import failed\", \"peer\", peer, \"number\", header.Number, \"hash\", hash, \"err\", err)\n\t\t\treturn\n\t\t}\n\t\t// Invoke the testing hook if needed\n\t\tif f.importedHook != nil {\n\t\t\tf.importedHook(header, nil)\n\t\t}\n\t}()\n}\n\n// importBlocks spawns a new goroutine to run a block insertion into the chain. If the\n// block's number is at the same height as the current import phase, it updates\n// the phase states accordingly.\nfunc (f *BlockFetcher) importBlocks(peer string, block *types.Block) {\n\thash := block.Hash()\n\n\t// Run the import on a new thread\n\tlog.Debug(\"Importing propagated block\", \"peer\", peer, \"number\", block.Number(), \"hash\", hash)\n\tgo func() {\n\t\tdefer func() { f.done <- hash }()\n\n\t\t// If the parent's unknown, abort insertion\n\t\tparent := f.getBlock(block.ParentHash())\n\t\tif parent == nil {\n\t\t\tlog.Debug(\"Unknown parent of propagated block\", \"peer\", peer, \"number\", block.Number(), \"hash\", hash, \"parent\", block.ParentHash())\n\t\t\treturn\n\t\t}\n\t\t// Quickly validate the header and propagate the block if it passes\n\t\tswitch err := f.verifyHeader(block.Header()); err {\n\t\tcase nil:\n\t\t\t// All ok, quickly propagate to our peers\n\t\t\tblockBroadcastOutTimer.UpdateSince(block.ReceivedAt)\n\t\t\tgo f.broadcastBlock(block, true)\n\n\t\tcase consensus.ErrFutureBlock:\n\t\t\t// Weird future block, don't fail, but neither propagate\n\n\t\tdefault:\n\t\t\t// Something went very wrong, drop the peer\n\t\t\tlog.Debug(\"Propagated block verification failed\", \"peer\", peer, \"number\", block.Number(), \"hash\", hash, \"err\", err)\n\t\t\tf.dropPeer(peer)\n\t\t\treturn\n\t\t}\n\t\t// Run the actual import and log any issues\n\t\tif _, err := f.insertChain(types.Blocks{block}); err != nil {\n\t\t\tlog.Debug(\"Propagated block import failed\", \"peer\", peer, \"number\", block.Number(), \"hash\", hash, \"err\", err)\n\t\t\treturn\n\t\t}\n\t\t// If import succeeded, broadcast the block\n\t\tblockAnnounceOutTimer.UpdateSince(block.ReceivedAt)\n\t\tgo f.broadcastBlock(block, false)\n\n\t\t// Invoke the testing hook if needed\n\t\tif f.importedHook != nil {\n\t\t\tf.importedHook(nil, block)\n\t\t}\n\t}()\n}\n\n// forgetHash removes all traces of a block announcement from the fetcher's\n// internal state.\nfunc (f *BlockFetcher) forgetHash(hash common.Hash) {\n\t// Remove all pending announces and decrement DOS counters\n\tfor _, announce := range f.announced[hash] {\n\t\tf.announces[announce.origin]--\n\t\tif f.announces[announce.origin] <= 0 {\n\t\t\tdelete(f.announces, announce.origin)\n\t\t}\n\t}\n\tdelete(f.announced, hash)\n\tif f.announceChangeHook != nil {\n\t\tf.announceChangeHook(hash, false)\n\t}\n\t// Remove any pending fetches and decrement the DOS counters\n\tif announce := f.fetching[hash]; announce != nil {\n\t\tf.announces[announce.origin]--\n\t\tif f.announces[announce.origin] <= 0 {\n\t\t\tdelete(f.announces, announce.origin)\n\t\t}\n\t\tdelete(f.fetching, hash)\n\t}\n\n\t// Remove any pending completion requests and decrement the DOS counters\n\tfor _, announce := range f.fetched[hash] {\n\t\tf.announces[announce.origin]--\n\t\tif f.announces[announce.origin] <= 0 {\n\t\t\tdelete(f.announces, announce.origin)\n\t\t}\n\t}\n\tdelete(f.fetched, hash)\n\n\t// Remove any pending completions and decrement the DOS counters\n\tif announce := f.completing[hash]; announce != nil {\n\t\tf.announces[announce.origin]--\n\t\tif f.announces[announce.origin] <= 0 {\n\t\t\tdelete(f.announces, announce.origin)\n\t\t}\n\t\tdelete(f.completing, hash)\n\t}\n}\n\n// forgetBlock removes all traces of a queued block from the fetcher's internal\n// state.\nfunc (f *BlockFetcher) forgetBlock(hash common.Hash) {\n\tif insert := f.queued[hash]; insert != nil {\n\t\tf.queues[insert.origin]--\n\t\tif f.queues[insert.origin] == 0 {\n\t\t\tdelete(f.queues, insert.origin)\n\t\t}\n\t\tdelete(f.queued, hash)\n\t}\n}\n"
  },
  {
    "path": "eth/fetcher/block_fetcher_test.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage fetcher\n\nimport (\n\t\"errors\"\n\t\"math/big\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\nvar (\n\ttestdb       = rawdb.NewMemoryDatabase()\n\ttestKey, _   = crypto.HexToECDSA(\"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291\")\n\ttestAddress  = crypto.PubkeyToAddress(testKey.PublicKey)\n\tgenesis      = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000))\n\tunknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit}, nil, nil, nil, trie.NewStackTrie(nil))\n)\n\n// makeChain creates a chain of n blocks starting at and including parent.\n// the returned hash chain is ordered head->parent. In addition, every 3rd block\n// contains a transaction and every 5th an uncle to allow testing correct block\n// reassembly.\nfunc makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common.Hash]*types.Block) {\n\tblocks, _ := core.GenerateChain(params.TestChainConfig, parent, ethash.NewFaker(), testdb, n, func(i int, block *core.BlockGen) {\n\t\tblock.SetCoinbase(common.Address{seed})\n\n\t\t// If the block number is multiple of 3, send a bonus transaction to the miner\n\t\tif parent == genesis && i%3 == 0 {\n\t\t\tsigner := types.MakeSigner(params.TestChainConfig, block.Number())\n\t\t\ttx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddress), common.Address{seed}, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tblock.AddTx(tx)\n\t\t}\n\t\t// If the block number is a multiple of 5, add a bonus uncle to the block\n\t\tif i%5 == 0 {\n\t\t\tblock.AddUncle(&types.Header{ParentHash: block.PrevBlock(i - 1).Hash(), Number: big.NewInt(int64(i - 1))})\n\t\t}\n\t})\n\thashes := make([]common.Hash, n+1)\n\thashes[len(hashes)-1] = parent.Hash()\n\tblockm := make(map[common.Hash]*types.Block, n+1)\n\tblockm[parent.Hash()] = parent\n\tfor i, b := range blocks {\n\t\thashes[len(hashes)-i-2] = b.Hash()\n\t\tblockm[b.Hash()] = b\n\t}\n\treturn hashes, blockm\n}\n\n// fetcherTester is a test simulator for mocking out local block chain.\ntype fetcherTester struct {\n\tfetcher *BlockFetcher\n\n\thashes  []common.Hash                 // Hash chain belonging to the tester\n\theaders map[common.Hash]*types.Header // Headers belonging to the tester\n\tblocks  map[common.Hash]*types.Block  // Blocks belonging to the tester\n\tdrops   map[string]bool               // Map of peers dropped by the fetcher\n\n\tlock sync.RWMutex\n}\n\n// newTester creates a new fetcher test mocker.\nfunc newTester(light bool) *fetcherTester {\n\ttester := &fetcherTester{\n\t\thashes:  []common.Hash{genesis.Hash()},\n\t\theaders: map[common.Hash]*types.Header{genesis.Hash(): genesis.Header()},\n\t\tblocks:  map[common.Hash]*types.Block{genesis.Hash(): genesis},\n\t\tdrops:   make(map[string]bool),\n\t}\n\ttester.fetcher = NewBlockFetcher(light, tester.getHeader, tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertHeaders, tester.insertChain, tester.dropPeer)\n\ttester.fetcher.Start()\n\n\treturn tester\n}\n\n// getHeader retrieves a header from the tester's block chain.\nfunc (f *fetcherTester) getHeader(hash common.Hash) *types.Header {\n\tf.lock.RLock()\n\tdefer f.lock.RUnlock()\n\n\treturn f.headers[hash]\n}\n\n// getBlock retrieves a block from the tester's block chain.\nfunc (f *fetcherTester) getBlock(hash common.Hash) *types.Block {\n\tf.lock.RLock()\n\tdefer f.lock.RUnlock()\n\n\treturn f.blocks[hash]\n}\n\n// verifyHeader is a nop placeholder for the block header verification.\nfunc (f *fetcherTester) verifyHeader(header *types.Header) error {\n\treturn nil\n}\n\n// broadcastBlock is a nop placeholder for the block broadcasting.\nfunc (f *fetcherTester) broadcastBlock(block *types.Block, propagate bool) {\n}\n\n// chainHeight retrieves the current height (block number) of the chain.\nfunc (f *fetcherTester) chainHeight() uint64 {\n\tf.lock.RLock()\n\tdefer f.lock.RUnlock()\n\n\tif f.fetcher.light {\n\t\treturn f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64()\n\t}\n\treturn f.blocks[f.hashes[len(f.hashes)-1]].NumberU64()\n}\n\n// insertChain injects a new headers into the simulated chain.\nfunc (f *fetcherTester) insertHeaders(headers []*types.Header) (int, error) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tfor i, header := range headers {\n\t\t// Make sure the parent in known\n\t\tif _, ok := f.headers[header.ParentHash]; !ok {\n\t\t\treturn i, errors.New(\"unknown parent\")\n\t\t}\n\t\t// Discard any new blocks if the same height already exists\n\t\tif header.Number.Uint64() <= f.headers[f.hashes[len(f.hashes)-1]].Number.Uint64() {\n\t\t\treturn i, nil\n\t\t}\n\t\t// Otherwise build our current chain\n\t\tf.hashes = append(f.hashes, header.Hash())\n\t\tf.headers[header.Hash()] = header\n\t}\n\treturn 0, nil\n}\n\n// insertChain injects a new blocks into the simulated chain.\nfunc (f *fetcherTester) insertChain(blocks types.Blocks) (int, error) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tfor i, block := range blocks {\n\t\t// Make sure the parent in known\n\t\tif _, ok := f.blocks[block.ParentHash()]; !ok {\n\t\t\treturn i, errors.New(\"unknown parent\")\n\t\t}\n\t\t// Discard any new blocks if the same height already exists\n\t\tif block.NumberU64() <= f.blocks[f.hashes[len(f.hashes)-1]].NumberU64() {\n\t\t\treturn i, nil\n\t\t}\n\t\t// Otherwise build our current chain\n\t\tf.hashes = append(f.hashes, block.Hash())\n\t\tf.blocks[block.Hash()] = block\n\t}\n\treturn 0, nil\n}\n\n// dropPeer is an emulator for the peer removal, simply accumulating the various\n// peers dropped by the fetcher.\nfunc (f *fetcherTester) dropPeer(peer string) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tf.drops[peer] = true\n}\n\n// makeHeaderFetcher retrieves a block header fetcher associated with a simulated peer.\nfunc (f *fetcherTester) makeHeaderFetcher(peer string, blocks map[common.Hash]*types.Block, drift time.Duration) headerRequesterFn {\n\tclosure := make(map[common.Hash]*types.Block)\n\tfor hash, block := range blocks {\n\t\tclosure[hash] = block\n\t}\n\t// Create a function that return a header from the closure\n\treturn func(hash common.Hash) error {\n\t\t// Gather the blocks to return\n\t\theaders := make([]*types.Header, 0, 1)\n\t\tif block, ok := closure[hash]; ok {\n\t\t\theaders = append(headers, block.Header())\n\t\t}\n\t\t// Return on a new thread\n\t\tgo f.fetcher.FilterHeaders(peer, headers, time.Now().Add(drift))\n\n\t\treturn nil\n\t}\n}\n\n// makeBodyFetcher retrieves a block body fetcher associated with a simulated peer.\nfunc (f *fetcherTester) makeBodyFetcher(peer string, blocks map[common.Hash]*types.Block, drift time.Duration) bodyRequesterFn {\n\tclosure := make(map[common.Hash]*types.Block)\n\tfor hash, block := range blocks {\n\t\tclosure[hash] = block\n\t}\n\t// Create a function that returns blocks from the closure\n\treturn func(hashes []common.Hash) error {\n\t\t// Gather the block bodies to return\n\t\ttransactions := make([][]*types.Transaction, 0, len(hashes))\n\t\tuncles := make([][]*types.Header, 0, len(hashes))\n\n\t\tfor _, hash := range hashes {\n\t\t\tif block, ok := closure[hash]; ok {\n\t\t\t\ttransactions = append(transactions, block.Transactions())\n\t\t\t\tuncles = append(uncles, block.Uncles())\n\t\t\t}\n\t\t}\n\t\t// Return on a new thread\n\t\tgo f.fetcher.FilterBodies(peer, transactions, uncles, time.Now().Add(drift))\n\n\t\treturn nil\n\t}\n}\n\n// verifyFetchingEvent verifies that one single event arrive on a fetching channel.\nfunc verifyFetchingEvent(t *testing.T, fetching chan []common.Hash, arrive bool) {\n\tif arrive {\n\t\tselect {\n\t\tcase <-fetching:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Fatalf(\"fetching timeout\")\n\t\t}\n\t} else {\n\t\tselect {\n\t\tcase <-fetching:\n\t\t\tt.Fatalf(\"fetching invoked\")\n\t\tcase <-time.After(10 * time.Millisecond):\n\t\t}\n\t}\n}\n\n// verifyCompletingEvent verifies that one single event arrive on an completing channel.\nfunc verifyCompletingEvent(t *testing.T, completing chan []common.Hash, arrive bool) {\n\tif arrive {\n\t\tselect {\n\t\tcase <-completing:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Fatalf(\"completing timeout\")\n\t\t}\n\t} else {\n\t\tselect {\n\t\tcase <-completing:\n\t\t\tt.Fatalf(\"completing invoked\")\n\t\tcase <-time.After(10 * time.Millisecond):\n\t\t}\n\t}\n}\n\n// verifyImportEvent verifies that one single event arrive on an import channel.\nfunc verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) {\n\tif arrive {\n\t\tselect {\n\t\tcase <-imported:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Fatalf(\"import timeout\")\n\t\t}\n\t} else {\n\t\tselect {\n\t\tcase <-imported:\n\t\t\tt.Fatalf(\"import invoked\")\n\t\tcase <-time.After(20 * time.Millisecond):\n\t\t}\n\t}\n}\n\n// verifyImportCount verifies that exactly count number of events arrive on an\n// import hook channel.\nfunc verifyImportCount(t *testing.T, imported chan interface{}, count int) {\n\tfor i := 0; i < count; i++ {\n\t\tselect {\n\t\tcase <-imported:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Fatalf(\"block %d: import timeout\", i+1)\n\t\t}\n\t}\n\tverifyImportDone(t, imported)\n}\n\n// verifyImportDone verifies that no more events are arriving on an import channel.\nfunc verifyImportDone(t *testing.T, imported chan interface{}) {\n\tselect {\n\tcase <-imported:\n\t\tt.Fatalf(\"extra block imported\")\n\tcase <-time.After(50 * time.Millisecond):\n\t}\n}\n\n// verifyChainHeight verifies the chain height is as expected.\nfunc verifyChainHeight(t *testing.T, fetcher *fetcherTester, height uint64) {\n\tif fetcher.chainHeight() != height {\n\t\tt.Fatalf(\"chain height mismatch, got %d, want %d\", fetcher.chainHeight(), height)\n\t}\n}\n\n// Tests that a fetcher accepts block/header announcements and initiates retrievals\n// for them, successfully importing into the local chain.\nfunc TestFullSequentialAnnouncements(t *testing.T)  { testSequentialAnnouncements(t, false) }\nfunc TestLightSequentialAnnouncements(t *testing.T) { testSequentialAnnouncements(t, true) }\n\nfunc testSequentialAnnouncements(t *testing.T, light bool) {\n\t// Create a chain of blocks to import\n\ttargetBlocks := 4 * hashLimit\n\thashes, blocks := makeChain(targetBlocks, 0, genesis)\n\n\ttester := newTester(light)\n\theaderFetcher := tester.makeHeaderFetcher(\"valid\", blocks, -gatherSlack)\n\tbodyFetcher := tester.makeBodyFetcher(\"valid\", blocks, 0)\n\n\t// Iteratively announce blocks until all are imported\n\timported := make(chan interface{})\n\ttester.fetcher.importedHook = func(header *types.Header, block *types.Block) {\n\t\tif light {\n\t\t\tif header == nil {\n\t\t\t\tt.Fatalf(\"Fetcher try to import empty header\")\n\t\t\t}\n\t\t\timported <- header\n\t\t} else {\n\t\t\tif block == nil {\n\t\t\t\tt.Fatalf(\"Fetcher try to import empty block\")\n\t\t\t}\n\t\t\timported <- block\n\t\t}\n\t}\n\tfor i := len(hashes) - 2; i >= 0; i-- {\n\t\ttester.fetcher.Notify(\"valid\", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)\n\t\tverifyImportEvent(t, imported, true)\n\t}\n\tverifyImportDone(t, imported)\n\tverifyChainHeight(t, tester, uint64(len(hashes)-1))\n}\n\n// Tests that if blocks are announced by multiple peers (or even the same buggy\n// peer), they will only get downloaded at most once.\nfunc TestFullConcurrentAnnouncements(t *testing.T)  { testConcurrentAnnouncements(t, false) }\nfunc TestLightConcurrentAnnouncements(t *testing.T) { testConcurrentAnnouncements(t, true) }\n\nfunc testConcurrentAnnouncements(t *testing.T, light bool) {\n\t// Create a chain of blocks to import\n\ttargetBlocks := 4 * hashLimit\n\thashes, blocks := makeChain(targetBlocks, 0, genesis)\n\n\t// Assemble a tester with a built in counter for the requests\n\ttester := newTester(light)\n\tfirstHeaderFetcher := tester.makeHeaderFetcher(\"first\", blocks, -gatherSlack)\n\tfirstBodyFetcher := tester.makeBodyFetcher(\"first\", blocks, 0)\n\tsecondHeaderFetcher := tester.makeHeaderFetcher(\"second\", blocks, -gatherSlack)\n\tsecondBodyFetcher := tester.makeBodyFetcher(\"second\", blocks, 0)\n\n\tcounter := uint32(0)\n\tfirstHeaderWrapper := func(hash common.Hash) error {\n\t\tatomic.AddUint32(&counter, 1)\n\t\treturn firstHeaderFetcher(hash)\n\t}\n\tsecondHeaderWrapper := func(hash common.Hash) error {\n\t\tatomic.AddUint32(&counter, 1)\n\t\treturn secondHeaderFetcher(hash)\n\t}\n\t// Iteratively announce blocks until all are imported\n\timported := make(chan interface{})\n\ttester.fetcher.importedHook = func(header *types.Header, block *types.Block) {\n\t\tif light {\n\t\t\tif header == nil {\n\t\t\t\tt.Fatalf(\"Fetcher try to import empty header\")\n\t\t\t}\n\t\t\timported <- header\n\t\t} else {\n\t\t\tif block == nil {\n\t\t\t\tt.Fatalf(\"Fetcher try to import empty block\")\n\t\t\t}\n\t\t\timported <- block\n\t\t}\n\t}\n\tfor i := len(hashes) - 2; i >= 0; i-- {\n\t\ttester.fetcher.Notify(\"first\", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), firstHeaderWrapper, firstBodyFetcher)\n\t\ttester.fetcher.Notify(\"second\", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout+time.Millisecond), secondHeaderWrapper, secondBodyFetcher)\n\t\ttester.fetcher.Notify(\"second\", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout-time.Millisecond), secondHeaderWrapper, secondBodyFetcher)\n\t\tverifyImportEvent(t, imported, true)\n\t}\n\tverifyImportDone(t, imported)\n\n\t// Make sure no blocks were retrieved twice\n\tif int(counter) != targetBlocks {\n\t\tt.Fatalf(\"retrieval count mismatch: have %v, want %v\", counter, targetBlocks)\n\t}\n\tverifyChainHeight(t, tester, uint64(len(hashes)-1))\n}\n\n// Tests that announcements arriving while a previous is being fetched still\n// results in a valid import.\nfunc TestFullOverlappingAnnouncements(t *testing.T)  { testOverlappingAnnouncements(t, false) }\nfunc TestLightOverlappingAnnouncements(t *testing.T) { testOverlappingAnnouncements(t, true) }\n\nfunc testOverlappingAnnouncements(t *testing.T, light bool) {\n\t// Create a chain of blocks to import\n\ttargetBlocks := 4 * hashLimit\n\thashes, blocks := makeChain(targetBlocks, 0, genesis)\n\n\ttester := newTester(light)\n\theaderFetcher := tester.makeHeaderFetcher(\"valid\", blocks, -gatherSlack)\n\tbodyFetcher := tester.makeBodyFetcher(\"valid\", blocks, 0)\n\n\t// Iteratively announce blocks, but overlap them continuously\n\toverlap := 16\n\timported := make(chan interface{}, len(hashes)-1)\n\tfor i := 0; i < overlap; i++ {\n\t\timported <- nil\n\t}\n\ttester.fetcher.importedHook = func(header *types.Header, block *types.Block) {\n\t\tif light {\n\t\t\tif header == nil {\n\t\t\t\tt.Fatalf(\"Fetcher try to import empty header\")\n\t\t\t}\n\t\t\timported <- header\n\t\t} else {\n\t\t\tif block == nil {\n\t\t\t\tt.Fatalf(\"Fetcher try to import empty block\")\n\t\t\t}\n\t\t\timported <- block\n\t\t}\n\t}\n\n\tfor i := len(hashes) - 2; i >= 0; i-- {\n\t\ttester.fetcher.Notify(\"valid\", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)\n\t\tselect {\n\t\tcase <-imported:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Fatalf(\"block %d: import timeout\", len(hashes)-i)\n\t\t}\n\t}\n\t// Wait for all the imports to complete and check count\n\tverifyImportCount(t, imported, overlap)\n\tverifyChainHeight(t, tester, uint64(len(hashes)-1))\n}\n\n// Tests that announces already being retrieved will not be duplicated.\nfunc TestFullPendingDeduplication(t *testing.T)  { testPendingDeduplication(t, false) }\nfunc TestLightPendingDeduplication(t *testing.T) { testPendingDeduplication(t, true) }\n\nfunc testPendingDeduplication(t *testing.T, light bool) {\n\t// Create a hash and corresponding block\n\thashes, blocks := makeChain(1, 0, genesis)\n\n\t// Assemble a tester with a built in counter and delayed fetcher\n\ttester := newTester(light)\n\theaderFetcher := tester.makeHeaderFetcher(\"repeater\", blocks, -gatherSlack)\n\tbodyFetcher := tester.makeBodyFetcher(\"repeater\", blocks, 0)\n\n\tdelay := 50 * time.Millisecond\n\tcounter := uint32(0)\n\theaderWrapper := func(hash common.Hash) error {\n\t\tatomic.AddUint32(&counter, 1)\n\n\t\t// Simulate a long running fetch\n\t\tgo func() {\n\t\t\ttime.Sleep(delay)\n\t\t\theaderFetcher(hash)\n\t\t}()\n\t\treturn nil\n\t}\n\tcheckNonExist := func() bool {\n\t\treturn tester.getBlock(hashes[0]) == nil\n\t}\n\tif light {\n\t\tcheckNonExist = func() bool {\n\t\t\treturn tester.getHeader(hashes[0]) == nil\n\t\t}\n\t}\n\t// Announce the same block many times until it's fetched (wait for any pending ops)\n\tfor checkNonExist() {\n\t\ttester.fetcher.Notify(\"repeater\", hashes[0], 1, time.Now().Add(-arriveTimeout), headerWrapper, bodyFetcher)\n\t\ttime.Sleep(time.Millisecond)\n\t}\n\ttime.Sleep(delay)\n\n\t// Check that all blocks were imported and none fetched twice\n\tif int(counter) != 1 {\n\t\tt.Fatalf(\"retrieval count mismatch: have %v, want %v\", counter, 1)\n\t}\n\tverifyChainHeight(t, tester, 1)\n}\n\n// Tests that announcements retrieved in a random order are cached and eventually\n// imported when all the gaps are filled in.\nfunc TestFullRandomArrivalImport(t *testing.T)  { testRandomArrivalImport(t, false) }\nfunc TestLightRandomArrivalImport(t *testing.T) { testRandomArrivalImport(t, true) }\n\nfunc testRandomArrivalImport(t *testing.T, light bool) {\n\t// Create a chain of blocks to import, and choose one to delay\n\ttargetBlocks := maxQueueDist\n\thashes, blocks := makeChain(targetBlocks, 0, genesis)\n\tskip := targetBlocks / 2\n\n\ttester := newTester(light)\n\theaderFetcher := tester.makeHeaderFetcher(\"valid\", blocks, -gatherSlack)\n\tbodyFetcher := tester.makeBodyFetcher(\"valid\", blocks, 0)\n\n\t// Iteratively announce blocks, skipping one entry\n\timported := make(chan interface{}, len(hashes)-1)\n\ttester.fetcher.importedHook = func(header *types.Header, block *types.Block) {\n\t\tif light {\n\t\t\tif header == nil {\n\t\t\t\tt.Fatalf(\"Fetcher try to import empty header\")\n\t\t\t}\n\t\t\timported <- header\n\t\t} else {\n\t\t\tif block == nil {\n\t\t\t\tt.Fatalf(\"Fetcher try to import empty block\")\n\t\t\t}\n\t\t\timported <- block\n\t\t}\n\t}\n\tfor i := len(hashes) - 1; i >= 0; i-- {\n\t\tif i != skip {\n\t\t\ttester.fetcher.Notify(\"valid\", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n\t// Finally announce the skipped entry and check full import\n\ttester.fetcher.Notify(\"valid\", hashes[skip], uint64(len(hashes)-skip-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)\n\tverifyImportCount(t, imported, len(hashes)-1)\n\tverifyChainHeight(t, tester, uint64(len(hashes)-1))\n}\n\n// Tests that direct block enqueues (due to block propagation vs. hash announce)\n// are correctly schedule, filling and import queue gaps.\nfunc TestQueueGapFill(t *testing.T) {\n\t// Create a chain of blocks to import, and choose one to not announce at all\n\ttargetBlocks := maxQueueDist\n\thashes, blocks := makeChain(targetBlocks, 0, genesis)\n\tskip := targetBlocks / 2\n\n\ttester := newTester(false)\n\theaderFetcher := tester.makeHeaderFetcher(\"valid\", blocks, -gatherSlack)\n\tbodyFetcher := tester.makeBodyFetcher(\"valid\", blocks, 0)\n\n\t// Iteratively announce blocks, skipping one entry\n\timported := make(chan interface{}, len(hashes)-1)\n\ttester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }\n\n\tfor i := len(hashes) - 1; i >= 0; i-- {\n\t\tif i != skip {\n\t\t\ttester.fetcher.Notify(\"valid\", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)\n\t\t\ttime.Sleep(time.Millisecond)\n\t\t}\n\t}\n\t// Fill the missing block directly as if propagated\n\ttester.fetcher.Enqueue(\"valid\", blocks[hashes[skip]])\n\tverifyImportCount(t, imported, len(hashes)-1)\n\tverifyChainHeight(t, tester, uint64(len(hashes)-1))\n}\n\n// Tests that blocks arriving from various sources (multiple propagations, hash\n// announces, etc) do not get scheduled for import multiple times.\nfunc TestImportDeduplication(t *testing.T) {\n\t// Create two blocks to import (one for duplication, the other for stalling)\n\thashes, blocks := makeChain(2, 0, genesis)\n\n\t// Create the tester and wrap the importer with a counter\n\ttester := newTester(false)\n\theaderFetcher := tester.makeHeaderFetcher(\"valid\", blocks, -gatherSlack)\n\tbodyFetcher := tester.makeBodyFetcher(\"valid\", blocks, 0)\n\n\tcounter := uint32(0)\n\ttester.fetcher.insertChain = func(blocks types.Blocks) (int, error) {\n\t\tatomic.AddUint32(&counter, uint32(len(blocks)))\n\t\treturn tester.insertChain(blocks)\n\t}\n\t// Instrument the fetching and imported events\n\tfetching := make(chan []common.Hash)\n\timported := make(chan interface{}, len(hashes)-1)\n\ttester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }\n\ttester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }\n\n\t// Announce the duplicating block, wait for retrieval, and also propagate directly\n\ttester.fetcher.Notify(\"valid\", hashes[0], 1, time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)\n\t<-fetching\n\n\ttester.fetcher.Enqueue(\"valid\", blocks[hashes[0]])\n\ttester.fetcher.Enqueue(\"valid\", blocks[hashes[0]])\n\ttester.fetcher.Enqueue(\"valid\", blocks[hashes[0]])\n\n\t// Fill the missing block directly as if propagated, and check import uniqueness\n\ttester.fetcher.Enqueue(\"valid\", blocks[hashes[1]])\n\tverifyImportCount(t, imported, 2)\n\n\tif counter != 2 {\n\t\tt.Fatalf(\"import invocation count mismatch: have %v, want %v\", counter, 2)\n\t}\n}\n\n// Tests that blocks with numbers much lower or higher than out current head get\n// discarded to prevent wasting resources on useless blocks from faulty peers.\nfunc TestDistantPropagationDiscarding(t *testing.T) {\n\t// Create a long chain to import and define the discard boundaries\n\thashes, blocks := makeChain(3*maxQueueDist, 0, genesis)\n\thead := hashes[len(hashes)/2]\n\n\tlow, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1\n\n\t// Create a tester and simulate a head block being the middle of the above chain\n\ttester := newTester(false)\n\n\ttester.lock.Lock()\n\ttester.hashes = []common.Hash{head}\n\ttester.blocks = map[common.Hash]*types.Block{head: blocks[head]}\n\ttester.lock.Unlock()\n\n\t// Ensure that a block with a lower number than the threshold is discarded\n\ttester.fetcher.Enqueue(\"lower\", blocks[hashes[low]])\n\ttime.Sleep(10 * time.Millisecond)\n\tif !tester.fetcher.queue.Empty() {\n\t\tt.Fatalf(\"fetcher queued stale block\")\n\t}\n\t// Ensure that a block with a higher number than the threshold is discarded\n\ttester.fetcher.Enqueue(\"higher\", blocks[hashes[high]])\n\ttime.Sleep(10 * time.Millisecond)\n\tif !tester.fetcher.queue.Empty() {\n\t\tt.Fatalf(\"fetcher queued future block\")\n\t}\n}\n\n// Tests that announcements with numbers much lower or higher than out current\n// head get discarded to prevent wasting resources on useless blocks from faulty\n// peers.\nfunc TestFullDistantAnnouncementDiscarding(t *testing.T)  { testDistantAnnouncementDiscarding(t, false) }\nfunc TestLightDistantAnnouncementDiscarding(t *testing.T) { testDistantAnnouncementDiscarding(t, true) }\n\nfunc testDistantAnnouncementDiscarding(t *testing.T, light bool) {\n\t// Create a long chain to import and define the discard boundaries\n\thashes, blocks := makeChain(3*maxQueueDist, 0, genesis)\n\thead := hashes[len(hashes)/2]\n\n\tlow, high := len(hashes)/2+maxUncleDist+1, len(hashes)/2-maxQueueDist-1\n\n\t// Create a tester and simulate a head block being the middle of the above chain\n\ttester := newTester(light)\n\n\ttester.lock.Lock()\n\ttester.hashes = []common.Hash{head}\n\ttester.headers = map[common.Hash]*types.Header{head: blocks[head].Header()}\n\ttester.blocks = map[common.Hash]*types.Block{head: blocks[head]}\n\ttester.lock.Unlock()\n\n\theaderFetcher := tester.makeHeaderFetcher(\"lower\", blocks, -gatherSlack)\n\tbodyFetcher := tester.makeBodyFetcher(\"lower\", blocks, 0)\n\n\tfetching := make(chan struct{}, 2)\n\ttester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- struct{}{} }\n\n\t// Ensure that a block with a lower number than the threshold is discarded\n\ttester.fetcher.Notify(\"lower\", hashes[low], blocks[hashes[low]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)\n\tselect {\n\tcase <-time.After(50 * time.Millisecond):\n\tcase <-fetching:\n\t\tt.Fatalf(\"fetcher requested stale header\")\n\t}\n\t// Ensure that a block with a higher number than the threshold is discarded\n\ttester.fetcher.Notify(\"higher\", hashes[high], blocks[hashes[high]].NumberU64(), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)\n\tselect {\n\tcase <-time.After(50 * time.Millisecond):\n\tcase <-fetching:\n\t\tt.Fatalf(\"fetcher requested future header\")\n\t}\n}\n\n// Tests that peers announcing blocks with invalid numbers (i.e. not matching\n// the headers provided afterwards) get dropped as malicious.\nfunc TestFullInvalidNumberAnnouncement(t *testing.T)  { testInvalidNumberAnnouncement(t, false) }\nfunc TestLightInvalidNumberAnnouncement(t *testing.T) { testInvalidNumberAnnouncement(t, true) }\n\nfunc testInvalidNumberAnnouncement(t *testing.T, light bool) {\n\t// Create a single block to import and check numbers against\n\thashes, blocks := makeChain(1, 0, genesis)\n\n\ttester := newTester(light)\n\tbadHeaderFetcher := tester.makeHeaderFetcher(\"bad\", blocks, -gatherSlack)\n\tbadBodyFetcher := tester.makeBodyFetcher(\"bad\", blocks, 0)\n\n\timported := make(chan interface{})\n\ttester.fetcher.importedHook = func(header *types.Header, block *types.Block) {\n\t\tif light {\n\t\t\tif header == nil {\n\t\t\t\tt.Fatalf(\"Fetcher try to import empty header\")\n\t\t\t}\n\t\t\timported <- header\n\t\t} else {\n\t\t\tif block == nil {\n\t\t\t\tt.Fatalf(\"Fetcher try to import empty block\")\n\t\t\t}\n\t\t\timported <- block\n\t\t}\n\t}\n\t// Announce a block with a bad number, check for immediate drop\n\ttester.fetcher.Notify(\"bad\", hashes[0], 2, time.Now().Add(-arriveTimeout), badHeaderFetcher, badBodyFetcher)\n\tverifyImportEvent(t, imported, false)\n\n\ttester.lock.RLock()\n\tdropped := tester.drops[\"bad\"]\n\ttester.lock.RUnlock()\n\n\tif !dropped {\n\t\tt.Fatalf(\"peer with invalid numbered announcement not dropped\")\n\t}\n\n\tgoodHeaderFetcher := tester.makeHeaderFetcher(\"good\", blocks, -gatherSlack)\n\tgoodBodyFetcher := tester.makeBodyFetcher(\"good\", blocks, 0)\n\t// Make sure a good announcement passes without a drop\n\ttester.fetcher.Notify(\"good\", hashes[0], 1, time.Now().Add(-arriveTimeout), goodHeaderFetcher, goodBodyFetcher)\n\tverifyImportEvent(t, imported, true)\n\n\ttester.lock.RLock()\n\tdropped = tester.drops[\"good\"]\n\ttester.lock.RUnlock()\n\n\tif dropped {\n\t\tt.Fatalf(\"peer with valid numbered announcement dropped\")\n\t}\n\tverifyImportDone(t, imported)\n}\n\n// Tests that if a block is empty (i.e. header only), no body request should be\n// made, and instead the header should be assembled into a whole block in itself.\nfunc TestEmptyBlockShortCircuit(t *testing.T) {\n\t// Create a chain of blocks to import\n\thashes, blocks := makeChain(32, 0, genesis)\n\n\ttester := newTester(false)\n\theaderFetcher := tester.makeHeaderFetcher(\"valid\", blocks, -gatherSlack)\n\tbodyFetcher := tester.makeBodyFetcher(\"valid\", blocks, 0)\n\n\t// Add a monitoring hook for all internal events\n\tfetching := make(chan []common.Hash)\n\ttester.fetcher.fetchingHook = func(hashes []common.Hash) { fetching <- hashes }\n\n\tcompleting := make(chan []common.Hash)\n\ttester.fetcher.completingHook = func(hashes []common.Hash) { completing <- hashes }\n\n\timported := make(chan interface{})\n\ttester.fetcher.importedHook = func(header *types.Header, block *types.Block) {\n\t\tif block == nil {\n\t\t\tt.Fatalf(\"Fetcher try to import empty block\")\n\t\t}\n\t\timported <- block\n\t}\n\t// Iteratively announce blocks until all are imported\n\tfor i := len(hashes) - 2; i >= 0; i-- {\n\t\ttester.fetcher.Notify(\"valid\", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), headerFetcher, bodyFetcher)\n\n\t\t// All announces should fetch the header\n\t\tverifyFetchingEvent(t, fetching, true)\n\n\t\t// Only blocks with data contents should request bodies\n\t\tverifyCompletingEvent(t, completing, len(blocks[hashes[i]].Transactions()) > 0 || len(blocks[hashes[i]].Uncles()) > 0)\n\n\t\t// Irrelevant of the construct, import should succeed\n\t\tverifyImportEvent(t, imported, true)\n\t}\n\tverifyImportDone(t, imported)\n}\n\n// Tests that a peer is unable to use unbounded memory with sending infinite\n// block announcements to a node, but that even in the face of such an attack,\n// the fetcher remains operational.\nfunc TestHashMemoryExhaustionAttack(t *testing.T) {\n\t// Create a tester with instrumented import hooks\n\ttester := newTester(false)\n\n\timported, announces := make(chan interface{}), int32(0)\n\ttester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }\n\ttester.fetcher.announceChangeHook = func(hash common.Hash, added bool) {\n\t\tif added {\n\t\t\tatomic.AddInt32(&announces, 1)\n\t\t} else {\n\t\t\tatomic.AddInt32(&announces, -1)\n\t\t}\n\t}\n\t// Create a valid chain and an infinite junk chain\n\ttargetBlocks := hashLimit + 2*maxQueueDist\n\thashes, blocks := makeChain(targetBlocks, 0, genesis)\n\tvalidHeaderFetcher := tester.makeHeaderFetcher(\"valid\", blocks, -gatherSlack)\n\tvalidBodyFetcher := tester.makeBodyFetcher(\"valid\", blocks, 0)\n\n\tattack, _ := makeChain(targetBlocks, 0, unknownBlock)\n\tattackerHeaderFetcher := tester.makeHeaderFetcher(\"attacker\", nil, -gatherSlack)\n\tattackerBodyFetcher := tester.makeBodyFetcher(\"attacker\", nil, 0)\n\n\t// Feed the tester a huge hashset from the attacker, and a limited from the valid peer\n\tfor i := 0; i < len(attack); i++ {\n\t\tif i < maxQueueDist {\n\t\t\ttester.fetcher.Notify(\"valid\", hashes[len(hashes)-2-i], uint64(i+1), time.Now(), validHeaderFetcher, validBodyFetcher)\n\t\t}\n\t\ttester.fetcher.Notify(\"attacker\", attack[i], 1 /* don't distance drop */, time.Now(), attackerHeaderFetcher, attackerBodyFetcher)\n\t}\n\tif count := atomic.LoadInt32(&announces); count != hashLimit+maxQueueDist {\n\t\tt.Fatalf(\"queued announce count mismatch: have %d, want %d\", count, hashLimit+maxQueueDist)\n\t}\n\t// Wait for fetches to complete\n\tverifyImportCount(t, imported, maxQueueDist)\n\n\t// Feed the remaining valid hashes to ensure DOS protection state remains clean\n\tfor i := len(hashes) - maxQueueDist - 2; i >= 0; i-- {\n\t\ttester.fetcher.Notify(\"valid\", hashes[i], uint64(len(hashes)-i-1), time.Now().Add(-arriveTimeout), validHeaderFetcher, validBodyFetcher)\n\t\tverifyImportEvent(t, imported, true)\n\t}\n\tverifyImportDone(t, imported)\n}\n\n// Tests that blocks sent to the fetcher (either through propagation or via hash\n// announces and retrievals) don't pile up indefinitely, exhausting available\n// system memory.\nfunc TestBlockMemoryExhaustionAttack(t *testing.T) {\n\t// Create a tester with instrumented import hooks\n\ttester := newTester(false)\n\n\timported, enqueued := make(chan interface{}), int32(0)\n\ttester.fetcher.importedHook = func(header *types.Header, block *types.Block) { imported <- block }\n\ttester.fetcher.queueChangeHook = func(hash common.Hash, added bool) {\n\t\tif added {\n\t\t\tatomic.AddInt32(&enqueued, 1)\n\t\t} else {\n\t\t\tatomic.AddInt32(&enqueued, -1)\n\t\t}\n\t}\n\t// Create a valid chain and a batch of dangling (but in range) blocks\n\ttargetBlocks := hashLimit + 2*maxQueueDist\n\thashes, blocks := makeChain(targetBlocks, 0, genesis)\n\tattack := make(map[common.Hash]*types.Block)\n\tfor i := byte(0); len(attack) < blockLimit+2*maxQueueDist; i++ {\n\t\thashes, blocks := makeChain(maxQueueDist-1, i, unknownBlock)\n\t\tfor _, hash := range hashes[:maxQueueDist-2] {\n\t\t\tattack[hash] = blocks[hash]\n\t\t}\n\t}\n\t// Try to feed all the attacker blocks make sure only a limited batch is accepted\n\tfor _, block := range attack {\n\t\ttester.fetcher.Enqueue(\"attacker\", block)\n\t}\n\ttime.Sleep(200 * time.Millisecond)\n\tif queued := atomic.LoadInt32(&enqueued); queued != blockLimit {\n\t\tt.Fatalf(\"queued block count mismatch: have %d, want %d\", queued, blockLimit)\n\t}\n\t// Queue up a batch of valid blocks, and check that a new peer is allowed to do so\n\tfor i := 0; i < maxQueueDist-1; i++ {\n\t\ttester.fetcher.Enqueue(\"valid\", blocks[hashes[len(hashes)-3-i]])\n\t}\n\ttime.Sleep(100 * time.Millisecond)\n\tif queued := atomic.LoadInt32(&enqueued); queued != blockLimit+maxQueueDist-1 {\n\t\tt.Fatalf(\"queued block count mismatch: have %d, want %d\", queued, blockLimit+maxQueueDist-1)\n\t}\n\t// Insert the missing piece (and sanity check the import)\n\ttester.fetcher.Enqueue(\"valid\", blocks[hashes[len(hashes)-2]])\n\tverifyImportCount(t, imported, maxQueueDist)\n\n\t// Insert the remaining blocks in chunks to ensure clean DOS protection\n\tfor i := maxQueueDist; i < len(hashes)-1; i++ {\n\t\ttester.fetcher.Enqueue(\"valid\", blocks[hashes[len(hashes)-2-i]])\n\t\tverifyImportEvent(t, imported, true)\n\t}\n\tverifyImportDone(t, imported)\n}\n"
  },
  {
    "path": "eth/fetcher/tx_fetcher.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage fetcher\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\tmrand \"math/rand\"\n\t\"sort\"\n\t\"time\"\n\n\tmapset \"github.com/deckarep/golang-set\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/metrics\"\n)\n\nconst (\n\t// maxTxAnnounces is the maximum number of unique transaction a peer\n\t// can announce in a short time.\n\tmaxTxAnnounces = 4096\n\n\t// maxTxRetrievals is the maximum transaction number can be fetched in one\n\t// request. The rationale to pick 256 is:\n\t//   - In eth protocol, the softResponseLimit is 2MB. Nowadays according to\n\t//     Etherscan the average transaction size is around 200B, so in theory\n\t//     we can include lots of transaction in a single protocol packet.\n\t//   - However the maximum size of a single transaction is raised to 128KB,\n\t//     so pick a middle value here to ensure we can maximize the efficiency\n\t//     of the retrieval and response size overflow won't happen in most cases.\n\tmaxTxRetrievals = 256\n\n\t// maxTxUnderpricedSetSize is the size of the underpriced transaction set that\n\t// is used to track recent transactions that have been dropped so we don't\n\t// re-request them.\n\tmaxTxUnderpricedSetSize = 32768\n\n\t// txArriveTimeout is the time allowance before an announced transaction is\n\t// explicitly requested.\n\ttxArriveTimeout = 500 * time.Millisecond\n\n\t// txGatherSlack is the interval used to collate almost-expired announces\n\t// with network fetches.\n\ttxGatherSlack = 100 * time.Millisecond\n)\n\nvar (\n\t// txFetchTimeout is the maximum allotted time to return an explicitly\n\t// requested transaction.\n\ttxFetchTimeout = 5 * time.Second\n)\n\nvar (\n\ttxAnnounceInMeter          = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/announces/in\", nil)\n\ttxAnnounceKnownMeter       = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/announces/known\", nil)\n\ttxAnnounceUnderpricedMeter = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/announces/underpriced\", nil)\n\ttxAnnounceDOSMeter         = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/announces/dos\", nil)\n\n\ttxBroadcastInMeter          = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/broadcasts/in\", nil)\n\ttxBroadcastKnownMeter       = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/broadcasts/known\", nil)\n\ttxBroadcastUnderpricedMeter = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/broadcasts/underpriced\", nil)\n\ttxBroadcastOtherRejectMeter = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/broadcasts/otherreject\", nil)\n\n\ttxRequestOutMeter     = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/request/out\", nil)\n\ttxRequestFailMeter    = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/request/fail\", nil)\n\ttxRequestDoneMeter    = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/request/done\", nil)\n\ttxRequestTimeoutMeter = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/request/timeout\", nil)\n\n\ttxReplyInMeter          = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/replies/in\", nil)\n\ttxReplyKnownMeter       = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/replies/known\", nil)\n\ttxReplyUnderpricedMeter = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/replies/underpriced\", nil)\n\ttxReplyOtherRejectMeter = metrics.NewRegisteredMeter(\"eth/fetcher/transaction/replies/otherreject\", nil)\n\n\ttxFetcherWaitingPeers   = metrics.NewRegisteredGauge(\"eth/fetcher/transaction/waiting/peers\", nil)\n\ttxFetcherWaitingHashes  = metrics.NewRegisteredGauge(\"eth/fetcher/transaction/waiting/hashes\", nil)\n\ttxFetcherQueueingPeers  = metrics.NewRegisteredGauge(\"eth/fetcher/transaction/queueing/peers\", nil)\n\ttxFetcherQueueingHashes = metrics.NewRegisteredGauge(\"eth/fetcher/transaction/queueing/hashes\", nil)\n\ttxFetcherFetchingPeers  = metrics.NewRegisteredGauge(\"eth/fetcher/transaction/fetching/peers\", nil)\n\ttxFetcherFetchingHashes = metrics.NewRegisteredGauge(\"eth/fetcher/transaction/fetching/hashes\", nil)\n)\n\n// txAnnounce is the notification of the availability of a batch\n// of new transactions in the network.\ntype txAnnounce struct {\n\torigin string        // Identifier of the peer originating the notification\n\thashes []common.Hash // Batch of transaction hashes being announced\n}\n\n// txRequest represents an in-flight transaction retrieval request destined to\n// a specific peers.\ntype txRequest struct {\n\thashes []common.Hash            // Transactions having been requested\n\tstolen map[common.Hash]struct{} // Deliveries by someone else (don't re-request)\n\ttime   mclock.AbsTime           // Timestamp of the request\n}\n\n// txDelivery is the notification that a batch of transactions have been added\n// to the pool and should be untracked.\ntype txDelivery struct {\n\torigin string        // Identifier of the peer originating the notification\n\thashes []common.Hash // Batch of transaction hashes having been delivered\n\tdirect bool          // Whether this is a direct reply or a broadcast\n}\n\n// txDrop is the notiication that a peer has disconnected.\ntype txDrop struct {\n\tpeer string\n}\n\n// TxFetcher is responsible for retrieving new transaction based on announcements.\n//\n// The fetcher operates in 3 stages:\n//   - Transactions that are newly discovered are moved into a wait list.\n//   - After ~500ms passes, transactions from the wait list that have not been\n//     broadcast to us in whole are moved into a queueing area.\n//   - When a connected peer doesn't have in-flight retrieval requests, any\n//     transaction queued up (and announced by the peer) are allocated to the\n//     peer and moved into a fetching status until it's fulfilled or fails.\n//\n// The invariants of the fetcher are:\n//   - Each tracked transaction (hash) must only be present in one of the\n//     three stages. This ensures that the fetcher operates akin to a finite\n//     state automata and there's do data leak.\n//   - Each peer that announced transactions may be scheduled retrievals, but\n//     only ever one concurrently. This ensures we can immediately know what is\n//     missing from a reply and reschedule it.\ntype TxFetcher struct {\n\tnotify  chan *txAnnounce\n\tcleanup chan *txDelivery\n\tdrop    chan *txDrop\n\tquit    chan struct{}\n\n\tunderpriced mapset.Set // Transactions discarded as too cheap (don't re-fetch)\n\n\t// Stage 1: Waiting lists for newly discovered transactions that might be\n\t// broadcast without needing explicit request/reply round trips.\n\twaitlist  map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast\n\twaittime  map[common.Hash]mclock.AbsTime      // Timestamps when transactions were added to the waitlist\n\twaitslots map[string]map[common.Hash]struct{} // Waiting announcement sgroupped by peer (DoS protection)\n\n\t// Stage 2: Queue of transactions that waiting to be allocated to some peer\n\t// to be retrieved directly.\n\tannounces map[string]map[common.Hash]struct{} // Set of announced transactions, grouped by origin peer\n\tannounced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash\n\n\t// Stage 3: Set of transactions currently being retrieved, some which may be\n\t// fulfilled and some rescheduled. Note, this step shares 'announces' from the\n\t// previous stage to avoid having to duplicate (need it for DoS checks).\n\tfetching   map[common.Hash]string              // Transaction set currently being retrieved\n\trequests   map[string]*txRequest               // In-flight transaction retrievals\n\talternates map[common.Hash]map[string]struct{} // In-flight transaction alternate origins if retrieval fails\n\n\t// Callbacks\n\thasTx    func(common.Hash) bool             // Retrieves a tx from the local txpool\n\taddTxs   func([]*types.Transaction) []error // Insert a batch of transactions into local txpool\n\tfetchTxs func(string, []common.Hash) error  // Retrieves a set of txs from a remote peer\n\n\tstep  chan struct{} // Notification channel when the fetcher loop iterates\n\tclock mclock.Clock  // Time wrapper to simulate in tests\n\trand  *mrand.Rand   // Randomizer to use in tests instead of map range loops (soft-random)\n}\n\n// NewTxFetcher creates a transaction fetcher to retrieve transaction\n// based on hash announcements.\nfunc NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher {\n\treturn NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil)\n}\n\n// NewTxFetcherForTests is a testing method to mock out the realtime clock with\n// a simulated version and the internal randomness with a deterministic one.\nfunc NewTxFetcherForTests(\n\thasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error,\n\tclock mclock.Clock, rand *mrand.Rand) *TxFetcher {\n\treturn &TxFetcher{\n\t\tnotify:      make(chan *txAnnounce),\n\t\tcleanup:     make(chan *txDelivery),\n\t\tdrop:        make(chan *txDrop),\n\t\tquit:        make(chan struct{}),\n\t\twaitlist:    make(map[common.Hash]map[string]struct{}),\n\t\twaittime:    make(map[common.Hash]mclock.AbsTime),\n\t\twaitslots:   make(map[string]map[common.Hash]struct{}),\n\t\tannounces:   make(map[string]map[common.Hash]struct{}),\n\t\tannounced:   make(map[common.Hash]map[string]struct{}),\n\t\tfetching:    make(map[common.Hash]string),\n\t\trequests:    make(map[string]*txRequest),\n\t\talternates:  make(map[common.Hash]map[string]struct{}),\n\t\tunderpriced: mapset.NewSet(),\n\t\thasTx:       hasTx,\n\t\taddTxs:      addTxs,\n\t\tfetchTxs:    fetchTxs,\n\t\tclock:       clock,\n\t\trand:        rand,\n\t}\n}\n\n// Notify announces the fetcher of the potential availability of a new batch of\n// transactions in the network.\nfunc (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {\n\t// Keep track of all the announced transactions\n\ttxAnnounceInMeter.Mark(int64(len(hashes)))\n\n\t// Skip any transaction announcements that we already know of, or that we've\n\t// previously marked as cheap and discarded. This check is of course racey,\n\t// because multiple concurrent notifies will still manage to pass it, but it's\n\t// still valuable to check here because it runs concurrent  to the internal\n\t// loop, so anything caught here is time saved internally.\n\tvar (\n\t\tunknowns               = make([]common.Hash, 0, len(hashes))\n\t\tduplicate, underpriced int64\n\t)\n\tfor _, hash := range hashes {\n\t\tswitch {\n\t\tcase f.hasTx(hash):\n\t\t\tduplicate++\n\n\t\tcase f.underpriced.Contains(hash):\n\t\t\tunderpriced++\n\n\t\tdefault:\n\t\t\tunknowns = append(unknowns, hash)\n\t\t}\n\t}\n\ttxAnnounceKnownMeter.Mark(duplicate)\n\ttxAnnounceUnderpricedMeter.Mark(underpriced)\n\n\t// If anything's left to announce, push it into the internal loop\n\tif len(unknowns) == 0 {\n\t\treturn nil\n\t}\n\tannounce := &txAnnounce{\n\t\torigin: peer,\n\t\thashes: unknowns,\n\t}\n\tselect {\n\tcase f.notify <- announce:\n\t\treturn nil\n\tcase <-f.quit:\n\t\treturn errTerminated\n\t}\n}\n\n// Enqueue imports a batch of received transaction into the transaction pool\n// and the fetcher. This method may be called by both transaction broadcasts and\n// direct request replies. The differentiation is important so the fetcher can\n// re-shedule missing transactions as soon as possible.\nfunc (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error {\n\t// Keep track of all the propagated transactions\n\tif direct {\n\t\ttxReplyInMeter.Mark(int64(len(txs)))\n\t} else {\n\t\ttxBroadcastInMeter.Mark(int64(len(txs)))\n\t}\n\t// Push all the transactions into the pool, tracking underpriced ones to avoid\n\t// re-requesting them and dropping the peer in case of malicious transfers.\n\tvar (\n\t\tadded       = make([]common.Hash, 0, len(txs))\n\t\tduplicate   int64\n\t\tunderpriced int64\n\t\totherreject int64\n\t)\n\terrs := f.addTxs(txs)\n\tfor i, err := range errs {\n\t\tif err != nil {\n\t\t\t// Track the transaction hash if the price is too low for us.\n\t\t\t// Avoid re-request this transaction when we receive another\n\t\t\t// announcement.\n\t\t\tif err == core.ErrUnderpriced || err == core.ErrReplaceUnderpriced {\n\t\t\t\tfor f.underpriced.Cardinality() >= maxTxUnderpricedSetSize {\n\t\t\t\t\tf.underpriced.Pop()\n\t\t\t\t}\n\t\t\t\tf.underpriced.Add(txs[i].Hash())\n\t\t\t}\n\t\t\t// Track a few interesting failure types\n\t\t\tswitch err {\n\t\t\tcase nil: // Noop, but need to handle to not count these\n\n\t\t\tcase core.ErrAlreadyKnown:\n\t\t\t\tduplicate++\n\n\t\t\tcase core.ErrUnderpriced, core.ErrReplaceUnderpriced:\n\t\t\t\tunderpriced++\n\n\t\t\tdefault:\n\t\t\t\totherreject++\n\t\t\t}\n\t\t}\n\t\tadded = append(added, txs[i].Hash())\n\t}\n\tif direct {\n\t\ttxReplyKnownMeter.Mark(duplicate)\n\t\ttxReplyUnderpricedMeter.Mark(underpriced)\n\t\ttxReplyOtherRejectMeter.Mark(otherreject)\n\t} else {\n\t\ttxBroadcastKnownMeter.Mark(duplicate)\n\t\ttxBroadcastUnderpricedMeter.Mark(underpriced)\n\t\ttxBroadcastOtherRejectMeter.Mark(otherreject)\n\t}\n\tselect {\n\tcase f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}:\n\t\treturn nil\n\tcase <-f.quit:\n\t\treturn errTerminated\n\t}\n}\n\n// Drop should be called when a peer disconnects. It cleans up all the internal\n// data structures of the given node.\nfunc (f *TxFetcher) Drop(peer string) error {\n\tselect {\n\tcase f.drop <- &txDrop{peer: peer}:\n\t\treturn nil\n\tcase <-f.quit:\n\t\treturn errTerminated\n\t}\n}\n\n// Start boots up the announcement based synchroniser, accepting and processing\n// hash notifications and block fetches until termination requested.\nfunc (f *TxFetcher) Start() {\n\tgo f.loop()\n}\n\n// Stop terminates the announcement based synchroniser, canceling all pending\n// operations.\nfunc (f *TxFetcher) Stop() {\n\tclose(f.quit)\n}\n\nfunc (f *TxFetcher) loop() {\n\tvar (\n\t\twaitTimer    = new(mclock.Timer)\n\t\ttimeoutTimer = new(mclock.Timer)\n\n\t\twaitTrigger    = make(chan struct{}, 1)\n\t\ttimeoutTrigger = make(chan struct{}, 1)\n\t)\n\tfor {\n\t\tselect {\n\t\tcase ann := <-f.notify:\n\t\t\t// Drop part of the new announcements if there are too many accumulated.\n\t\t\t// Note, we could but do not filter already known transactions here as\n\t\t\t// the probability of something arriving between this call and the pre-\n\t\t\t// filter outside is essentially zero.\n\t\t\tused := len(f.waitslots[ann.origin]) + len(f.announces[ann.origin])\n\t\t\tif used >= maxTxAnnounces {\n\t\t\t\t// This can happen if a set of transactions are requested but not\n\t\t\t\t// all fulfilled, so the remainder are rescheduled without the cap\n\t\t\t\t// check. Should be fine as the limit is in the thousands and the\n\t\t\t\t// request size in the hundreds.\n\t\t\t\ttxAnnounceDOSMeter.Mark(int64(len(ann.hashes)))\n\t\t\t\tbreak\n\t\t\t}\n\t\t\twant := used + len(ann.hashes)\n\t\t\tif want > maxTxAnnounces {\n\t\t\t\ttxAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces))\n\t\t\t\tann.hashes = ann.hashes[:want-maxTxAnnounces]\n\t\t\t}\n\t\t\t// All is well, schedule the remainder of the transactions\n\t\t\tidleWait := len(f.waittime) == 0\n\t\t\t_, oldPeer := f.announces[ann.origin]\n\n\t\t\tfor _, hash := range ann.hashes {\n\t\t\t\t// If the transaction is already downloading, add it to the list\n\t\t\t\t// of possible alternates (in case the current retrieval fails) and\n\t\t\t\t// also account it for the peer.\n\t\t\t\tif f.alternates[hash] != nil {\n\t\t\t\t\tf.alternates[hash][ann.origin] = struct{}{}\n\n\t\t\t\t\t// Stage 2 and 3 share the set of origins per tx\n\t\t\t\t\tif announces := f.announces[ann.origin]; announces != nil {\n\t\t\t\t\t\tannounces[hash] = struct{}{}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tf.announces[ann.origin] = map[common.Hash]struct{}{hash: {}}\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// If the transaction is not downloading, but is already queued\n\t\t\t\t// from a different peer, track it for the new peer too.\n\t\t\t\tif f.announced[hash] != nil {\n\t\t\t\t\tf.announced[hash][ann.origin] = struct{}{}\n\n\t\t\t\t\t// Stage 2 and 3 share the set of origins per tx\n\t\t\t\t\tif announces := f.announces[ann.origin]; announces != nil {\n\t\t\t\t\t\tannounces[hash] = struct{}{}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tf.announces[ann.origin] = map[common.Hash]struct{}{hash: {}}\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// If the transaction is already known to the fetcher, but not\n\t\t\t\t// yet downloading, add the peer as an alternate origin in the\n\t\t\t\t// waiting list.\n\t\t\t\tif f.waitlist[hash] != nil {\n\t\t\t\t\tf.waitlist[hash][ann.origin] = struct{}{}\n\n\t\t\t\t\tif waitslots := f.waitslots[ann.origin]; waitslots != nil {\n\t\t\t\t\t\twaitslots[hash] = struct{}{}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tf.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}}\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// Transaction unknown to the fetcher, insert it into the waiting list\n\t\t\t\tf.waitlist[hash] = map[string]struct{}{ann.origin: {}}\n\t\t\t\tf.waittime[hash] = f.clock.Now()\n\n\t\t\t\tif waitslots := f.waitslots[ann.origin]; waitslots != nil {\n\t\t\t\t\twaitslots[hash] = struct{}{}\n\t\t\t\t} else {\n\t\t\t\t\tf.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// If a new item was added to the waitlist, schedule it into the fetcher\n\t\t\tif idleWait && len(f.waittime) > 0 {\n\t\t\t\tf.rescheduleWait(waitTimer, waitTrigger)\n\t\t\t}\n\t\t\t// If this peer is new and announced something already queued, maybe\n\t\t\t// request transactions from them\n\t\t\tif !oldPeer && len(f.announces[ann.origin]) > 0 {\n\t\t\t\tf.scheduleFetches(timeoutTimer, timeoutTrigger, map[string]struct{}{ann.origin: {}})\n\t\t\t}\n\n\t\tcase <-waitTrigger:\n\t\t\t// At least one transaction's waiting time ran out, push all expired\n\t\t\t// ones into the retrieval queues\n\t\t\tactives := make(map[string]struct{})\n\t\t\tfor hash, instance := range f.waittime {\n\t\t\t\tif time.Duration(f.clock.Now()-instance)+txGatherSlack > txArriveTimeout {\n\t\t\t\t\t// Transaction expired without propagation, schedule for retrieval\n\t\t\t\t\tif f.announced[hash] != nil {\n\t\t\t\t\t\tpanic(\"announce tracker already contains waitlist item\")\n\t\t\t\t\t}\n\t\t\t\t\tf.announced[hash] = f.waitlist[hash]\n\t\t\t\t\tfor peer := range f.waitlist[hash] {\n\t\t\t\t\t\tif announces := f.announces[peer]; announces != nil {\n\t\t\t\t\t\t\tannounces[hash] = struct{}{}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tf.announces[peer] = map[common.Hash]struct{}{hash: {}}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdelete(f.waitslots[peer], hash)\n\t\t\t\t\t\tif len(f.waitslots[peer]) == 0 {\n\t\t\t\t\t\t\tdelete(f.waitslots, peer)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tactives[peer] = struct{}{}\n\t\t\t\t\t}\n\t\t\t\t\tdelete(f.waittime, hash)\n\t\t\t\t\tdelete(f.waitlist, hash)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// If transactions are still waiting for propagation, reschedule the wait timer\n\t\t\tif len(f.waittime) > 0 {\n\t\t\t\tf.rescheduleWait(waitTimer, waitTrigger)\n\t\t\t}\n\t\t\t// If any peers became active and are idle, request transactions from them\n\t\t\tif len(actives) > 0 {\n\t\t\t\tf.scheduleFetches(timeoutTimer, timeoutTrigger, actives)\n\t\t\t}\n\n\t\tcase <-timeoutTrigger:\n\t\t\t// Clean up any expired retrievals and avoid re-requesting them from the\n\t\t\t// same peer (either overloaded or malicious, useless in both cases). We\n\t\t\t// could also penalize (Drop), but there's nothing to gain, and if could\n\t\t\t// possibly further increase the load on it.\n\t\t\tfor peer, req := range f.requests {\n\t\t\t\tif time.Duration(f.clock.Now()-req.time)+txGatherSlack > txFetchTimeout {\n\t\t\t\t\ttxRequestTimeoutMeter.Mark(int64(len(req.hashes)))\n\n\t\t\t\t\t// Reschedule all the not-yet-delivered fetches to alternate peers\n\t\t\t\t\tfor _, hash := range req.hashes {\n\t\t\t\t\t\t// Skip rescheduling hashes already delivered by someone else\n\t\t\t\t\t\tif req.stolen != nil {\n\t\t\t\t\t\t\tif _, ok := req.stolen[hash]; ok {\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// Move the delivery back from fetching to queued\n\t\t\t\t\t\tif _, ok := f.announced[hash]; ok {\n\t\t\t\t\t\t\tpanic(\"announced tracker already contains alternate item\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif f.alternates[hash] != nil { // nil if tx was broadcast during fetch\n\t\t\t\t\t\t\tf.announced[hash] = f.alternates[hash]\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdelete(f.announced[hash], peer)\n\t\t\t\t\t\tif len(f.announced[hash]) == 0 {\n\t\t\t\t\t\t\tdelete(f.announced, hash)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tdelete(f.announces[peer], hash)\n\t\t\t\t\t\tdelete(f.alternates, hash)\n\t\t\t\t\t\tdelete(f.fetching, hash)\n\t\t\t\t\t}\n\t\t\t\t\tif len(f.announces[peer]) == 0 {\n\t\t\t\t\t\tdelete(f.announces, peer)\n\t\t\t\t\t}\n\t\t\t\t\t// Keep track of the request as dangling, but never expire\n\t\t\t\t\tf.requests[peer].hashes = nil\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Schedule a new transaction retrieval\n\t\t\tf.scheduleFetches(timeoutTimer, timeoutTrigger, nil)\n\n\t\t\t// No idea if we scheduled something or not, trigger the timer if needed\n\t\t\t// TODO(karalabe): this is kind of lame, can't we dump it into scheduleFetches somehow?\n\t\t\tf.rescheduleTimeout(timeoutTimer, timeoutTrigger)\n\n\t\tcase delivery := <-f.cleanup:\n\t\t\t// Independent if the delivery was direct or broadcast, remove all\n\t\t\t// traces of the hash from internal trackers\n\t\t\tfor _, hash := range delivery.hashes {\n\t\t\t\tif _, ok := f.waitlist[hash]; ok {\n\t\t\t\t\tfor peer, txset := range f.waitslots {\n\t\t\t\t\t\tdelete(txset, hash)\n\t\t\t\t\t\tif len(txset) == 0 {\n\t\t\t\t\t\t\tdelete(f.waitslots, peer)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tdelete(f.waitlist, hash)\n\t\t\t\t\tdelete(f.waittime, hash)\n\t\t\t\t} else {\n\t\t\t\t\tfor peer, txset := range f.announces {\n\t\t\t\t\t\tdelete(txset, hash)\n\t\t\t\t\t\tif len(txset) == 0 {\n\t\t\t\t\t\t\tdelete(f.announces, peer)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tdelete(f.announced, hash)\n\t\t\t\t\tdelete(f.alternates, hash)\n\n\t\t\t\t\t// If a transaction currently being fetched from a different\n\t\t\t\t\t// origin was delivered (delivery stolen), mark it so the\n\t\t\t\t\t// actual delivery won't double schedule it.\n\t\t\t\t\tif origin, ok := f.fetching[hash]; ok && (origin != delivery.origin || !delivery.direct) {\n\t\t\t\t\t\tstolen := f.requests[origin].stolen\n\t\t\t\t\t\tif stolen == nil {\n\t\t\t\t\t\t\tf.requests[origin].stolen = make(map[common.Hash]struct{})\n\t\t\t\t\t\t\tstolen = f.requests[origin].stolen\n\t\t\t\t\t\t}\n\t\t\t\t\t\tstolen[hash] = struct{}{}\n\t\t\t\t\t}\n\t\t\t\t\tdelete(f.fetching, hash)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// In case of a direct delivery, also reschedule anything missing\n\t\t\t// from the original query\n\t\t\tif delivery.direct {\n\t\t\t\t// Mark the reqesting successful (independent of individual status)\n\t\t\t\ttxRequestDoneMeter.Mark(int64(len(delivery.hashes)))\n\n\t\t\t\t// Make sure something was pending, nuke it\n\t\t\t\treq := f.requests[delivery.origin]\n\t\t\t\tif req == nil {\n\t\t\t\t\tlog.Warn(\"Unexpected transaction delivery\", \"peer\", delivery.origin)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tdelete(f.requests, delivery.origin)\n\n\t\t\t\t// Anything not delivered should be re-scheduled (with or without\n\t\t\t\t// this peer, depending on the response cutoff)\n\t\t\t\tdelivered := make(map[common.Hash]struct{})\n\t\t\t\tfor _, hash := range delivery.hashes {\n\t\t\t\t\tdelivered[hash] = struct{}{}\n\t\t\t\t}\n\t\t\t\tcutoff := len(req.hashes) // If nothing is delivered, assume everything is missing, don't retry!!!\n\t\t\t\tfor i, hash := range req.hashes {\n\t\t\t\t\tif _, ok := delivered[hash]; ok {\n\t\t\t\t\t\tcutoff = i\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Reschedule missing hashes from alternates, not-fulfilled from alt+self\n\t\t\t\tfor i, hash := range req.hashes {\n\t\t\t\t\t// Skip rescheduling hashes already delivered by someone else\n\t\t\t\t\tif req.stolen != nil {\n\t\t\t\t\t\tif _, ok := req.stolen[hash]; ok {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif _, ok := delivered[hash]; !ok {\n\t\t\t\t\t\tif i < cutoff {\n\t\t\t\t\t\t\tdelete(f.alternates[hash], delivery.origin)\n\t\t\t\t\t\t\tdelete(f.announces[delivery.origin], hash)\n\t\t\t\t\t\t\tif len(f.announces[delivery.origin]) == 0 {\n\t\t\t\t\t\t\t\tdelete(f.announces, delivery.origin)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif len(f.alternates[hash]) > 0 {\n\t\t\t\t\t\t\tif _, ok := f.announced[hash]; ok {\n\t\t\t\t\t\t\t\tpanic(fmt.Sprintf(\"announced tracker already contains alternate item: %v\", f.announced[hash]))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tf.announced[hash] = f.alternates[hash]\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tdelete(f.alternates, hash)\n\t\t\t\t\tdelete(f.fetching, hash)\n\t\t\t\t}\n\t\t\t\t// Something was delivered, try to rechedule requests\n\t\t\t\tf.scheduleFetches(timeoutTimer, timeoutTrigger, nil) // Partial delivery may enable others to deliver too\n\t\t\t}\n\n\t\tcase drop := <-f.drop:\n\t\t\t// A peer was dropped, remove all traces of it\n\t\t\tif _, ok := f.waitslots[drop.peer]; ok {\n\t\t\t\tfor hash := range f.waitslots[drop.peer] {\n\t\t\t\t\tdelete(f.waitlist[hash], drop.peer)\n\t\t\t\t\tif len(f.waitlist[hash]) == 0 {\n\t\t\t\t\t\tdelete(f.waitlist, hash)\n\t\t\t\t\t\tdelete(f.waittime, hash)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tdelete(f.waitslots, drop.peer)\n\t\t\t\tif len(f.waitlist) > 0 {\n\t\t\t\t\tf.rescheduleWait(waitTimer, waitTrigger)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Clean up any active requests\n\t\t\tvar request *txRequest\n\t\t\tif request = f.requests[drop.peer]; request != nil {\n\t\t\t\tfor _, hash := range request.hashes {\n\t\t\t\t\t// Skip rescheduling hashes already delivered by someone else\n\t\t\t\t\tif request.stolen != nil {\n\t\t\t\t\t\tif _, ok := request.stolen[hash]; ok {\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t// Undelivered hash, reschedule if there's an alternative origin available\n\t\t\t\t\tdelete(f.alternates[hash], drop.peer)\n\t\t\t\t\tif len(f.alternates[hash]) == 0 {\n\t\t\t\t\t\tdelete(f.alternates, hash)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tf.announced[hash] = f.alternates[hash]\n\t\t\t\t\t\tdelete(f.alternates, hash)\n\t\t\t\t\t}\n\t\t\t\t\tdelete(f.fetching, hash)\n\t\t\t\t}\n\t\t\t\tdelete(f.requests, drop.peer)\n\t\t\t}\n\t\t\t// Clean up general announcement tracking\n\t\t\tif _, ok := f.announces[drop.peer]; ok {\n\t\t\t\tfor hash := range f.announces[drop.peer] {\n\t\t\t\t\tdelete(f.announced[hash], drop.peer)\n\t\t\t\t\tif len(f.announced[hash]) == 0 {\n\t\t\t\t\t\tdelete(f.announced, hash)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tdelete(f.announces, drop.peer)\n\t\t\t}\n\t\t\t// If a request was cancelled, check if anything needs to be rescheduled\n\t\t\tif request != nil {\n\t\t\t\tf.scheduleFetches(timeoutTimer, timeoutTrigger, nil)\n\t\t\t\tf.rescheduleTimeout(timeoutTimer, timeoutTrigger)\n\t\t\t}\n\n\t\tcase <-f.quit:\n\t\t\treturn\n\t\t}\n\t\t// No idea what happened, but bump some sanity metrics\n\t\ttxFetcherWaitingPeers.Update(int64(len(f.waitslots)))\n\t\ttxFetcherWaitingHashes.Update(int64(len(f.waitlist)))\n\t\ttxFetcherQueueingPeers.Update(int64(len(f.announces) - len(f.requests)))\n\t\ttxFetcherQueueingHashes.Update(int64(len(f.announced)))\n\t\ttxFetcherFetchingPeers.Update(int64(len(f.requests)))\n\t\ttxFetcherFetchingHashes.Update(int64(len(f.fetching)))\n\n\t\t// Loop did something, ping the step notifier if needed (tests)\n\t\tif f.step != nil {\n\t\t\tf.step <- struct{}{}\n\t\t}\n\t}\n}\n\n// rescheduleWait iterates over all the transactions currently in the waitlist\n// and schedules the movement into the fetcher for the earliest.\n//\n// The method has a granularity of 'gatherSlack', since there's not much point in\n// spinning over all the transactions just to maybe find one that should trigger\n// a few ms earlier.\nfunc (f *TxFetcher) rescheduleWait(timer *mclock.Timer, trigger chan struct{}) {\n\tif *timer != nil {\n\t\t(*timer).Stop()\n\t}\n\tnow := f.clock.Now()\n\n\tearliest := now\n\tfor _, instance := range f.waittime {\n\t\tif earliest > instance {\n\t\t\tearliest = instance\n\t\t\tif txArriveTimeout-time.Duration(now-earliest) < gatherSlack {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\t*timer = f.clock.AfterFunc(txArriveTimeout-time.Duration(now-earliest), func() {\n\t\ttrigger <- struct{}{}\n\t})\n}\n\n// rescheduleTimeout iterates over all the transactions currently in flight and\n// schedules a cleanup run when the first would trigger.\n//\n// The method has a granularity of 'gatherSlack', since there's not much point in\n// spinning over all the transactions just to maybe find one that should trigger\n// a few ms earlier.\n//\n// This method is a bit \"flaky\" \"by design\". In theory the timeout timer only ever\n// should be rescheduled if some request is pending. In practice, a timeout will\n// cause the timer to be rescheduled every 5 secs (until the peer comes through or\n// disconnects). This is a limitation of the fetcher code because we don't trac\n// pending requests and timed out requests separatey. Without double tracking, if\n// we simply didn't reschedule the timer on all-timeout then the timer would never\n// be set again since len(request) > 0 => something's running.\nfunc (f *TxFetcher) rescheduleTimeout(timer *mclock.Timer, trigger chan struct{}) {\n\tif *timer != nil {\n\t\t(*timer).Stop()\n\t}\n\tnow := f.clock.Now()\n\n\tearliest := now\n\tfor _, req := range f.requests {\n\t\t// If this request already timed out, skip it altogether\n\t\tif req.hashes == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif earliest > req.time {\n\t\t\tearliest = req.time\n\t\t\tif txFetchTimeout-time.Duration(now-earliest) < gatherSlack {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\t*timer = f.clock.AfterFunc(txFetchTimeout-time.Duration(now-earliest), func() {\n\t\ttrigger <- struct{}{}\n\t})\n}\n\n// scheduleFetches starts a batch of retrievals for all available idle peers.\nfunc (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{}, whitelist map[string]struct{}) {\n\t// Gather the set of peers we want to retrieve from (default to all)\n\tactives := whitelist\n\tif actives == nil {\n\t\tactives = make(map[string]struct{})\n\t\tfor peer := range f.announces {\n\t\t\tactives[peer] = struct{}{}\n\t\t}\n\t}\n\tif len(actives) == 0 {\n\t\treturn\n\t}\n\t// For each active peer, try to schedule some transaction fetches\n\tidle := len(f.requests) == 0\n\n\tf.forEachPeer(actives, func(peer string) {\n\t\tif f.requests[peer] != nil {\n\t\t\treturn // continue in the for-each\n\t\t}\n\t\tif len(f.announces[peer]) == 0 {\n\t\t\treturn // continue in the for-each\n\t\t}\n\t\thashes := make([]common.Hash, 0, maxTxRetrievals)\n\t\tf.forEachHash(f.announces[peer], func(hash common.Hash) bool {\n\t\t\tif _, ok := f.fetching[hash]; !ok {\n\t\t\t\t// Mark the hash as fetching and stash away possible alternates\n\t\t\t\tf.fetching[hash] = peer\n\n\t\t\t\tif _, ok := f.alternates[hash]; ok {\n\t\t\t\t\tpanic(fmt.Sprintf(\"alternate tracker already contains fetching item: %v\", f.alternates[hash]))\n\t\t\t\t}\n\t\t\t\tf.alternates[hash] = f.announced[hash]\n\t\t\t\tdelete(f.announced, hash)\n\n\t\t\t\t// Accumulate the hash and stop if the limit was reached\n\t\t\t\thashes = append(hashes, hash)\n\t\t\t\tif len(hashes) >= maxTxRetrievals {\n\t\t\t\t\treturn false // break in the for-each\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true // continue in the for-each\n\t\t})\n\t\t// If any hashes were allocated, request them from the peer\n\t\tif len(hashes) > 0 {\n\t\t\tf.requests[peer] = &txRequest{hashes: hashes, time: f.clock.Now()}\n\t\t\ttxRequestOutMeter.Mark(int64(len(hashes)))\n\n\t\t\tgo func(peer string, hashes []common.Hash) {\n\t\t\t\t// Try to fetch the transactions, but in case of a request\n\t\t\t\t// failure (e.g. peer disconnected), reschedule the hashes.\n\t\t\t\tif err := f.fetchTxs(peer, hashes); err != nil {\n\t\t\t\t\ttxRequestFailMeter.Mark(int64(len(hashes)))\n\t\t\t\t\tf.Drop(peer)\n\t\t\t\t}\n\t\t\t}(peer, hashes)\n\t\t}\n\t})\n\t// If a new request was fired, schedule a timeout timer\n\tif idle && len(f.requests) > 0 {\n\t\tf.rescheduleTimeout(timer, timeout)\n\t}\n}\n\n// forEachPeer does a range loop over a map of peers in production, but during\n// testing it does a deterministic sorted random to allow reproducing issues.\nfunc (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string)) {\n\t// If we're running production, use whatever Go's map gives us\n\tif f.rand == nil {\n\t\tfor peer := range peers {\n\t\t\tdo(peer)\n\t\t}\n\t\treturn\n\t}\n\t// We're running the test suite, make iteration deterministic\n\tlist := make([]string, 0, len(peers))\n\tfor peer := range peers {\n\t\tlist = append(list, peer)\n\t}\n\tsort.Strings(list)\n\trotateStrings(list, f.rand.Intn(len(list)))\n\tfor _, peer := range list {\n\t\tdo(peer)\n\t}\n}\n\n// forEachHash does a range loop over a map of hashes in production, but during\n// testing it does a deterministic sorted random to allow reproducing issues.\nfunc (f *TxFetcher) forEachHash(hashes map[common.Hash]struct{}, do func(hash common.Hash) bool) {\n\t// If we're running production, use whatever Go's map gives us\n\tif f.rand == nil {\n\t\tfor hash := range hashes {\n\t\t\tif !do(hash) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\t// We're running the test suite, make iteration deterministic\n\tlist := make([]common.Hash, 0, len(hashes))\n\tfor hash := range hashes {\n\t\tlist = append(list, hash)\n\t}\n\tsortHashes(list)\n\trotateHashes(list, f.rand.Intn(len(list)))\n\tfor _, hash := range list {\n\t\tif !do(hash) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// rotateStrings rotates the contents of a slice by n steps. This method is only\n// used in tests to simulate random map iteration but keep it deterministic.\nfunc rotateStrings(slice []string, n int) {\n\torig := make([]string, len(slice))\n\tcopy(orig, slice)\n\n\tfor i := 0; i < len(orig); i++ {\n\t\tslice[i] = orig[(i+n)%len(orig)]\n\t}\n}\n\n// sortHashes sorts a slice of hashes. This method is only used in tests in order\n// to simulate random map iteration but keep it deterministic.\nfunc sortHashes(slice []common.Hash) {\n\tfor i := 0; i < len(slice); i++ {\n\t\tfor j := i + 1; j < len(slice); j++ {\n\t\t\tif bytes.Compare(slice[i][:], slice[j][:]) > 0 {\n\t\t\t\tslice[i], slice[j] = slice[j], slice[i]\n\t\t\t}\n\t\t}\n\t}\n}\n\n// rotateHashes rotates the contents of a slice by n steps. This method is only\n// used in tests to simulate random map iteration but keep it deterministic.\nfunc rotateHashes(slice []common.Hash, n int) {\n\torig := make([]common.Hash, len(slice))\n\tcopy(orig, slice)\n\n\tfor i := 0; i < len(orig); i++ {\n\t\tslice[i] = orig[(i+n)%len(orig)]\n\t}\n}\n"
  },
  {
    "path": "eth/fetcher/tx_fetcher_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage fetcher\n\nimport (\n\t\"errors\"\n\t\"math/big\"\n\t\"math/rand\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n)\n\nvar (\n\t// testTxs is a set of transactions to use during testing that have meaningful hashes.\n\ttestTxs = []*types.Transaction{\n\t\ttypes.NewTransaction(5577006791947779410, common.Address{0x0f}, new(big.Int), 0, new(big.Int), nil),\n\t\ttypes.NewTransaction(15352856648520921629, common.Address{0xbb}, new(big.Int), 0, new(big.Int), nil),\n\t\ttypes.NewTransaction(3916589616287113937, common.Address{0x86}, new(big.Int), 0, new(big.Int), nil),\n\t\ttypes.NewTransaction(9828766684487745566, common.Address{0xac}, new(big.Int), 0, new(big.Int), nil),\n\t}\n\t// testTxsHashes is the hashes of the test transactions above\n\ttestTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()}\n)\n\ntype doTxNotify struct {\n\tpeer   string\n\thashes []common.Hash\n}\ntype doTxEnqueue struct {\n\tpeer   string\n\ttxs    []*types.Transaction\n\tdirect bool\n}\ntype doWait struct {\n\ttime time.Duration\n\tstep bool\n}\ntype doDrop string\ntype doFunc func()\n\ntype isWaiting map[string][]common.Hash\ntype isScheduled struct {\n\ttracking map[string][]common.Hash\n\tfetching map[string][]common.Hash\n\tdangling map[string][]common.Hash\n}\ntype isUnderpriced int\n\n// txFetcherTest represents a test scenario that can be executed by the test\n// runner.\ntype txFetcherTest struct {\n\tinit  func() *TxFetcher\n\tsteps []interface{}\n}\n\n// Tests that transaction announcements are added to a waitlist, and none\n// of them are scheduled for retrieval until the wait expires.\nfunc TestTransactionFetcherWaiting(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tnil,\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Initial announcement to get something into the waitlist\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{{0x01}, {0x02}}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t}),\n\t\t\t// Announce from a new peer to check that no overwrite happens\n\t\t\tdoTxNotify{peer: \"B\", hashes: []common.Hash{{0x03}, {0x04}}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t\"B\": {{0x03}, {0x04}},\n\t\t\t}),\n\t\t\t// Announce clashing hashes but unique new peer\n\t\t\tdoTxNotify{peer: \"C\", hashes: []common.Hash{{0x01}, {0x04}}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t\"B\": {{0x03}, {0x04}},\n\t\t\t\t\"C\": {{0x01}, {0x04}},\n\t\t\t}),\n\t\t\t// Announce existing and clashing hashes from existing peer\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x01}, {0x02}, {0x03}, {0x05}},\n\t\t\t\t\"B\": {{0x03}, {0x04}},\n\t\t\t\t\"C\": {{0x01}, {0x04}},\n\t\t\t}),\n\t\t\tisScheduled{tracking: nil, fetching: nil},\n\n\t\t\t// Wait for the arrival timeout which should move all expired items\n\t\t\t// from the wait list to the scheduler\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}, {0x03}, {0x05}},\n\t\t\t\t\t\"B\": {{0x03}, {0x04}},\n\t\t\t\t\t\"C\": {{0x01}, {0x04}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{ // Depends on deterministic test randomizer\n\t\t\t\t\t\"A\": {{0x02}, {0x03}, {0x05}},\n\t\t\t\t\t\"C\": {{0x01}, {0x04}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Queue up a non-fetchable transaction and then trigger it with a new\n\t\t\t// peer (weird case to test 1 line in the fetcher)\n\t\t\tdoTxNotify{peer: \"C\", hashes: []common.Hash{{0x06}, {0x07}}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"C\": {{0x06}, {0x07}},\n\t\t\t}),\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}, {0x03}, {0x05}},\n\t\t\t\t\t\"B\": {{0x03}, {0x04}},\n\t\t\t\t\t\"C\": {{0x01}, {0x04}, {0x06}, {0x07}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x02}, {0x03}, {0x05}},\n\t\t\t\t\t\"C\": {{0x01}, {0x04}},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdoTxNotify{peer: \"D\", hashes: []common.Hash{{0x06}, {0x07}}},\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}, {0x03}, {0x05}},\n\t\t\t\t\t\"B\": {{0x03}, {0x04}},\n\t\t\t\t\t\"C\": {{0x01}, {0x04}, {0x06}, {0x07}},\n\t\t\t\t\t\"D\": {{0x06}, {0x07}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x02}, {0x03}, {0x05}},\n\t\t\t\t\t\"C\": {{0x01}, {0x04}},\n\t\t\t\t\t\"D\": {{0x06}, {0x07}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}\n\n// Tests that transaction announcements skip the waiting list if they are\n// already scheduled.\nfunc TestTransactionFetcherSkipWaiting(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tnil,\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Push an initial announcement through to the scheduled stage\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{{0x01}, {0x02}}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t}),\n\t\t\tisScheduled{tracking: nil, fetching: nil},\n\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Announce overlaps from the same peer, ensure the new ones end up\n\t\t\t// in stage one, and clashing ones don't get double tracked\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{{0x02}, {0x03}}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x03}},\n\t\t\t}),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Announce overlaps from a new peer, ensure new transactions end up\n\t\t\t// in stage one and clashing ones get tracked for the new peer\n\t\t\tdoTxNotify{peer: \"B\", hashes: []common.Hash{{0x02}, {0x03}, {0x04}}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x03}},\n\t\t\t\t\"B\": {{0x03}, {0x04}},\n\t\t\t}),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t\t\"B\": {{0x02}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}\n\n// Tests that only a single transaction request gets scheduled to a peer\n// and subsequent announces block or get allotted to someone else.\nfunc TestTransactionFetcherSingletonRequesting(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tnil,\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Push an initial announcement through to the scheduled stage\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{{0x01}, {0x02}}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t}),\n\t\t\tisScheduled{tracking: nil, fetching: nil},\n\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Announce a new set of transactions from the same peer and ensure\n\t\t\t// they do not start fetching since the peer is already busy\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{{0x03}, {0x04}}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x03}, {0x04}},\n\t\t\t}),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}, {0x03}, {0x04}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Announce a duplicate set of transactions from a new peer and ensure\n\t\t\t// uniquely new ones start downloading, even if clashing.\n\t\t\tdoTxNotify{peer: \"B\", hashes: []common.Hash{{0x02}, {0x03}, {0x05}, {0x06}}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"B\": {{0x05}, {0x06}},\n\t\t\t}),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}, {0x03}, {0x04}},\n\t\t\t\t\t\"B\": {{0x02}, {0x03}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t\t\"B\": {{0x03}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}\n\n// Tests that if a transaction retrieval fails, all the transactions get\n// instantly schedule back to someone else or the announcements dropped\n// if no alternate source is available.\nfunc TestTransactionFetcherFailedRescheduling(t *testing.T) {\n\t// Create a channel to control when tx requests can fail\n\tproceed := make(chan struct{})\n\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tnil,\n\t\t\t\tfunc(origin string, hashes []common.Hash) error {\n\t\t\t\t\t<-proceed\n\t\t\t\t\treturn errors.New(\"peer disconnected\")\n\t\t\t\t},\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Push an initial announcement through to the scheduled stage\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{{0x01}, {0x02}}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t}),\n\t\t\tisScheduled{tracking: nil, fetching: nil},\n\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// While the original peer is stuck in the request, push in an second\n\t\t\t// data source.\n\t\t\tdoTxNotify{peer: \"B\", hashes: []common.Hash{{0x02}}},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t\t\"B\": {{0x02}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Wait until the original request fails and check that transactions\n\t\t\t// are either rescheduled or dropped\n\t\t\tdoFunc(func() {\n\t\t\t\tproceed <- struct{}{} // Allow peer A to return the failure\n\t\t\t}),\n\t\t\tdoWait{time: 0, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"B\": {{0x02}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"B\": {{0x02}},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdoFunc(func() {\n\t\t\t\tproceed <- struct{}{} // Allow peer B to return the failure\n\t\t\t}),\n\t\t\tdoWait{time: 0, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{nil, nil, nil},\n\t\t},\n\t})\n}\n\n// Tests that if a transaction retrieval succeeds, all alternate origins\n// are cleaned up.\nfunc TestTransactionFetcherCleanup(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\treturn make([]error, len(txs))\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Push an initial announcement through to the scheduled stage\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0]}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t}),\n\t\t\tisScheduled{tracking: nil, fetching: nil},\n\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Request should be delivered\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{testTxs[0]}, direct: true},\n\t\t\tisScheduled{nil, nil, nil},\n\t\t},\n\t})\n}\n\n// Tests that if a transaction retrieval succeeds, but the response is empty (no\n// transactions available, then all are nuked instead of being rescheduled (yes,\n// this was a bug)).\nfunc TestTransactionFetcherCleanupEmpty(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\treturn make([]error, len(txs))\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Push an initial announcement through to the scheduled stage\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0]}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t}),\n\t\t\tisScheduled{tracking: nil, fetching: nil},\n\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Deliver an empty response and ensure the transaction is cleared, not rescheduled\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{}, direct: true},\n\t\t\tisScheduled{nil, nil, nil},\n\t\t},\n\t})\n}\n\n// Tests that non-returned transactions are either re-scheduled from a\n// different peer, or self if they are after the cutoff point.\nfunc TestTransactionFetcherMissingRescheduling(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\treturn make([]error, len(txs))\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Push an initial announcement through to the scheduled stage\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},\n\t\t\t}),\n\t\t\tisScheduled{tracking: nil, fetching: nil},\n\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Deliver the middle transaction requested, the one before which\n\t\t\t// should be dropped and the one after re-requested.\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{testTxs[0]}, direct: true}, // This depends on the deterministic random\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[2]},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[2]},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}\n\n// Tests that out of two transactions, if one is missing and the last is\n// delivered, the peer gets properly cleaned out from the internal state.\nfunc TestTransactionFetcherMissingCleanup(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\treturn make([]error, len(txs))\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Push an initial announcement through to the scheduled stage\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {testTxsHashes[0], testTxsHashes[1]},\n\t\t\t}),\n\t\t\tisScheduled{tracking: nil, fetching: nil},\n\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0], testTxsHashes[1]},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0], testTxsHashes[1]},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Deliver the middle transaction requested, the one before which\n\t\t\t// should be dropped and the one after re-requested.\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{testTxs[1]}, direct: true}, // This depends on the deterministic random\n\t\t\tisScheduled{nil, nil, nil},\n\t\t},\n\t})\n}\n\n// Tests that transaction broadcasts properly clean up announcements.\nfunc TestTransactionFetcherBroadcasts(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\treturn make([]error, len(txs))\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Set up three transactions to be in different stats, waiting, queued and fetching\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0]}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[1]}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[2]}},\n\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {testTxsHashes[2]},\n\t\t\t}),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0], testTxsHashes[1]},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Broadcast all the transactions and ensure everything gets cleaned\n\t\t\t// up, but the dangling request is left alone to avoid doing multiple\n\t\t\t// concurrent requests.\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: false},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: nil,\n\t\t\t\tfetching: nil,\n\t\t\t\tdangling: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Deliver the requested hashes\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: true},\n\t\t\tisScheduled{nil, nil, nil},\n\t\t},\n\t})\n}\n\n// Tests that the waiting list timers properly reset and reschedule.\nfunc TestTransactionFetcherWaitTimerResets(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tnil,\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{{0x01}}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x01}},\n\t\t\t}),\n\t\t\tisScheduled{nil, nil, nil},\n\t\t\tdoWait{time: txArriveTimeout / 2, step: false},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x01}},\n\t\t\t}),\n\t\t\tisScheduled{nil, nil, nil},\n\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{{0x02}}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t}),\n\t\t\tisScheduled{nil, nil, nil},\n\t\t\tdoWait{time: txArriveTimeout / 2, step: true},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x02}},\n\t\t\t}),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}},\n\t\t\t\t},\n\t\t\t},\n\n\t\t\tdoWait{time: txArriveTimeout / 2, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}\n\n// Tests that if a transaction request is not replied to, it will time\n// out and be re-scheduled for someone else.\nfunc TestTransactionFetcherTimeoutRescheduling(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\treturn make([]error, len(txs))\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Push an initial announcement through to the scheduled stage\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0]}},\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t}),\n\t\t\tisScheduled{tracking: nil, fetching: nil},\n\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Wait until the delivery times out, everything should be cleaned up\n\t\t\tdoWait{time: txFetchTimeout, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: nil,\n\t\t\t\tfetching: nil,\n\t\t\t\tdangling: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Ensure that followup announcements don't get scheduled\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[1]}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[1]},\n\t\t\t\t},\n\t\t\t\tfetching: nil,\n\t\t\t\tdangling: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// If the dangling request arrives a bit later, do not choke\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{testTxs[0]}, direct: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[1]},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[1]},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}\n\n// Tests that the fetching timeout timers properly reset and reschedule.\nfunc TestTransactionFetcherTimeoutTimerResets(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tnil,\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{{0x01}}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoTxNotify{peer: \"B\", hashes: []common.Hash{{0x02}}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}},\n\t\t\t\t\t\"B\": {{0x02}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}},\n\t\t\t\t\t\"B\": {{0x02}},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdoWait{time: txFetchTimeout - txArriveTimeout, step: true},\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"B\": {{0x02}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"B\": {{0x02}},\n\t\t\t\t},\n\t\t\t\tdangling: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisScheduled{\n\t\t\t\ttracking: nil,\n\t\t\t\tfetching: nil,\n\t\t\t\tdangling: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {},\n\t\t\t\t\t\"B\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}\n\n// Tests that if thousands of transactions are announces, only a small\n// number of them will be requested at a time.\nfunc TestTransactionFetcherRateLimiting(t *testing.T) {\n\t// Create a slew of transactions and to announce them\n\tvar hashes []common.Hash\n\tfor i := 0; i < maxTxAnnounces; i++ {\n\t\thashes = append(hashes, common.Hash{byte(i / 256), byte(i % 256)})\n\t}\n\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tnil,\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Announce all the transactions, wait a bit and ensure only a small\n\t\t\t// percentage gets requested\n\t\t\tdoTxNotify{peer: \"A\", hashes: hashes},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": hashes,\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": hashes[1643 : 1643+maxTxRetrievals],\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}\n\n// Tests that then number of transactions a peer is allowed to announce and/or\n// request at the same time is hard capped.\nfunc TestTransactionFetcherDoSProtection(t *testing.T) {\n\t// Create a slew of transactions and to announce them\n\tvar hashesA []common.Hash\n\tfor i := 0; i < maxTxAnnounces+1; i++ {\n\t\thashesA = append(hashesA, common.Hash{0x01, byte(i / 256), byte(i % 256)})\n\t}\n\tvar hashesB []common.Hash\n\tfor i := 0; i < maxTxAnnounces+1; i++ {\n\t\thashesB = append(hashesB, common.Hash{0x02, byte(i / 256), byte(i % 256)})\n\t}\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tnil,\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Announce half of the transaction and wait for them to be scheduled\n\t\t\tdoTxNotify{peer: \"A\", hashes: hashesA[:maxTxAnnounces/2]},\n\t\t\tdoTxNotify{peer: \"B\", hashes: hashesB[:maxTxAnnounces/2-1]},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\n\t\t\t// Announce the second half and keep them in the wait list\n\t\t\tdoTxNotify{peer: \"A\", hashes: hashesA[maxTxAnnounces/2 : maxTxAnnounces]},\n\t\t\tdoTxNotify{peer: \"B\", hashes: hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1]},\n\n\t\t\t// Ensure the hashes are split half and half\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": hashesA[maxTxAnnounces/2 : maxTxAnnounces],\n\t\t\t\t\"B\": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1],\n\t\t\t}),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": hashesA[:maxTxAnnounces/2],\n\t\t\t\t\t\"B\": hashesB[:maxTxAnnounces/2-1],\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": hashesA[1643 : 1643+maxTxRetrievals],\n\t\t\t\t\t\"B\": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...),\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Ensure that adding even one more hash results in dropping the hash\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{hashesA[maxTxAnnounces]}},\n\t\t\tdoTxNotify{peer: \"B\", hashes: hashesB[maxTxAnnounces-1 : maxTxAnnounces+1]},\n\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": hashesA[maxTxAnnounces/2 : maxTxAnnounces],\n\t\t\t\t\"B\": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces],\n\t\t\t}),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": hashesA[:maxTxAnnounces/2],\n\t\t\t\t\t\"B\": hashesB[:maxTxAnnounces/2-1],\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": hashesA[1643 : 1643+maxTxRetrievals],\n\t\t\t\t\t\"B\": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}\n\n// Tests that underpriced transactions don't get rescheduled after being rejected.\nfunc TestTransactionFetcherUnderpricedDedup(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\terrs := make([]error, len(txs))\n\t\t\t\t\tfor i := 0; i < len(errs); i++ {\n\t\t\t\t\t\tif i%2 == 0 {\n\t\t\t\t\t\t\terrs[i] = core.ErrUnderpriced\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\terrs[i] = core.ErrReplaceUnderpriced\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn errs\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Deliver a transaction through the fetcher, but reject as underpriced\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{testTxs[0], testTxs[1]}, direct: true},\n\t\t\tisScheduled{nil, nil, nil},\n\n\t\t\t// Try to announce the transaction again, ensure it's not scheduled back\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}}, // [2] is needed to force a step in the fetcher\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {testTxsHashes[2]},\n\t\t\t}),\n\t\t\tisScheduled{nil, nil, nil},\n\t\t},\n\t})\n}\n\n// Tests that underpriced transactions don't get rescheduled after being rejected,\n// but at the same time there's a hard cap on the number of transactions that are\n// tracked.\nfunc TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) {\n\t// Temporarily disable fetch timeouts as they massively mess up the simulated clock\n\tdefer func(timeout time.Duration) { txFetchTimeout = timeout }(txFetchTimeout)\n\ttxFetchTimeout = 24 * time.Hour\n\n\t// Create a slew of transactions to max out the underpriced set\n\tvar txs []*types.Transaction\n\tfor i := 0; i < maxTxUnderpricedSetSize+1; i++ {\n\t\ttxs = append(txs, types.NewTransaction(rand.Uint64(), common.Address{byte(rand.Intn(256))}, new(big.Int), 0, new(big.Int), nil))\n\t}\n\thashes := make([]common.Hash, len(txs))\n\tfor i, tx := range txs {\n\t\thashes[i] = tx.Hash()\n\t}\n\t// Generate a set of steps to announce and deliver the entire set of transactions\n\tvar steps []interface{}\n\tfor i := 0; i < maxTxUnderpricedSetSize/maxTxRetrievals; i++ {\n\t\tsteps = append(steps, doTxNotify{peer: \"A\", hashes: hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals]})\n\t\tsteps = append(steps, isWaiting(map[string][]common.Hash{\n\t\t\t\"A\": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],\n\t\t}))\n\t\tsteps = append(steps, doWait{time: txArriveTimeout, step: true})\n\t\tsteps = append(steps, isScheduled{\n\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\"A\": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],\n\t\t\t},\n\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\"A\": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],\n\t\t\t},\n\t\t})\n\t\tsteps = append(steps, doTxEnqueue{peer: \"A\", txs: txs[i*maxTxRetrievals : (i+1)*maxTxRetrievals], direct: true})\n\t\tsteps = append(steps, isWaiting(nil))\n\t\tsteps = append(steps, isScheduled{nil, nil, nil})\n\t\tsteps = append(steps, isUnderpriced((i+1)*maxTxRetrievals))\n\t}\n\ttestTransactionFetcher(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\terrs := make([]error, len(txs))\n\t\t\t\t\tfor i := 0; i < len(errs); i++ {\n\t\t\t\t\t\terrs[i] = core.ErrUnderpriced\n\t\t\t\t\t}\n\t\t\t\t\treturn errs\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: append(steps, []interface{}{\n\t\t\t// The preparation of the test has already been done in `steps`, add the last check\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{hashes[maxTxUnderpricedSetSize]}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{txs[maxTxUnderpricedSetSize]}, direct: true},\n\t\t\tisUnderpriced(maxTxUnderpricedSetSize),\n\t\t}...),\n\t})\n}\n\n// Tests that unexpected deliveries don't corrupt the internal state.\nfunc TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\treturn make([]error, len(txs))\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Deliver something out of the blue\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{nil, nil, nil},\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{testTxs[0]}, direct: false},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{nil, nil, nil},\n\n\t\t\t// Set up a few hashes into various stages\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0]}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[1]}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[2]}},\n\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {testTxsHashes[2]},\n\t\t\t}),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0], testTxsHashes[1]},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Deliver everything and more out of the blue\n\t\t\tdoTxEnqueue{peer: \"B\", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2], testTxs[3]}, direct: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: nil,\n\t\t\t\tfetching: nil,\n\t\t\t\tdangling: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}\n\n// Tests that dropping a peer cleans out all internal data structures in all the\n// live or danglng stages.\nfunc TestTransactionFetcherDrop(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\treturn make([]error, len(txs))\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Set up a few hashes into various stages\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{{0x01}}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{{0x02}}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{{0x03}}},\n\n\t\t\tisWaiting(map[string][]common.Hash{\n\t\t\t\t\"A\": {{0x03}},\n\t\t\t}),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}, {0x02}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Drop the peer and ensure everything's cleaned out\n\t\t\tdoDrop(\"A\"),\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{nil, nil, nil},\n\n\t\t\t// Push the node into a dangling (timeout) state\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0]}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {testTxsHashes[0]},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdoWait{time: txFetchTimeout, step: true},\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: nil,\n\t\t\t\tfetching: nil,\n\t\t\t\tdangling: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Drop the peer and ensure everything's cleaned out\n\t\t\tdoDrop(\"A\"),\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{nil, nil, nil},\n\t\t},\n\t})\n}\n\n// Tests that dropping a peer instantly reschedules failed announcements to any\n// available peer.\nfunc TestTransactionFetcherDropRescheduling(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\treturn make([]error, len(txs))\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Set up a few hashes into various stages\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{{0x01}}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoTxNotify{peer: \"B\", hashes: []common.Hash{{0x01}}},\n\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}},\n\t\t\t\t\t\"B\": {{0x01}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"A\": {{0x01}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Drop the peer and ensure everything's cleaned out\n\t\t\tdoDrop(\"A\"),\n\t\t\tisWaiting(nil),\n\t\t\tisScheduled{\n\t\t\t\ttracking: map[string][]common.Hash{\n\t\t\t\t\t\"B\": {{0x01}},\n\t\t\t\t},\n\t\t\t\tfetching: map[string][]common.Hash{\n\t\t\t\t\t\"B\": {{0x01}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}\n\n// This test reproduces a crash caught by the fuzzer. The root cause was a\n// dangling transaction timing out and clashing on readd with a concurrently\n// announced one.\nfunc TestTransactionFetcherFuzzCrash01(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\treturn make([]error, len(txs))\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Get a transaction into fetching mode and make it dangling with a broadcast\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0]}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{testTxs[0]}},\n\n\t\t\t// Notify the dangling transaction once more and crash via a timeout\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0]}},\n\t\t\tdoWait{time: txFetchTimeout, step: true},\n\t\t},\n\t})\n}\n\n// This test reproduces a crash caught by the fuzzer. The root cause was a\n// dangling transaction getting peer-dropped and clashing on readd with a\n// concurrently announced one.\nfunc TestTransactionFetcherFuzzCrash02(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\treturn make([]error, len(txs))\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Get a transaction into fetching mode and make it dangling with a broadcast\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0]}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{testTxs[0]}},\n\n\t\t\t// Notify the dangling transaction once more, re-fetch, and crash via a drop and timeout\n\t\t\tdoTxNotify{peer: \"B\", hashes: []common.Hash{testTxsHashes[0]}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoDrop(\"A\"),\n\t\t\tdoWait{time: txFetchTimeout, step: true},\n\t\t},\n\t})\n}\n\n// This test reproduces a crash caught by the fuzzer. The root cause was a\n// dangling transaction getting rescheduled via a partial delivery, clashing\n// with a concurrent notify.\nfunc TestTransactionFetcherFuzzCrash03(t *testing.T) {\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\treturn make([]error, len(txs))\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error { return nil },\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Get a transaction into fetching mode and make it dangling with a broadcast\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},\n\t\t\tdoWait{time: txFetchTimeout, step: true},\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{testTxs[0], testTxs[1]}},\n\n\t\t\t// Notify the dangling transaction once more, partially deliver, clash&crash with a timeout\n\t\t\tdoTxNotify{peer: \"B\", hashes: []common.Hash{testTxsHashes[0]}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{testTxs[1]}, direct: true},\n\t\t\tdoWait{time: txFetchTimeout, step: true},\n\t\t},\n\t})\n}\n\n// This test reproduces a crash caught by the fuzzer. The root cause was a\n// dangling transaction getting rescheduled via a disconnect, clashing with\n// a concurrent notify.\nfunc TestTransactionFetcherFuzzCrash04(t *testing.T) {\n\t// Create a channel to control when tx requests can fail\n\tproceed := make(chan struct{})\n\n\ttestTransactionFetcherParallel(t, txFetcherTest{\n\t\tinit: func() *TxFetcher {\n\t\t\treturn NewTxFetcher(\n\t\t\t\tfunc(common.Hash) bool { return false },\n\t\t\t\tfunc(txs []*types.Transaction) []error {\n\t\t\t\t\treturn make([]error, len(txs))\n\t\t\t\t},\n\t\t\t\tfunc(string, []common.Hash) error {\n\t\t\t\t\t<-proceed\n\t\t\t\t\treturn errors.New(\"peer disconnected\")\n\t\t\t\t},\n\t\t\t)\n\t\t},\n\t\tsteps: []interface{}{\n\t\t\t// Get a transaction into fetching mode and make it dangling with a broadcast\n\t\t\tdoTxNotify{peer: \"A\", hashes: []common.Hash{testTxsHashes[0]}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoTxEnqueue{peer: \"A\", txs: []*types.Transaction{testTxs[0]}},\n\n\t\t\t// Notify the dangling transaction once more, re-fetch, and crash via an in-flight disconnect\n\t\t\tdoTxNotify{peer: \"B\", hashes: []common.Hash{testTxsHashes[0]}},\n\t\t\tdoWait{time: txArriveTimeout, step: true},\n\t\t\tdoFunc(func() {\n\t\t\t\tproceed <- struct{}{} // Allow peer A to return the failure\n\t\t\t}),\n\t\t\tdoWait{time: 0, step: true},\n\t\t\tdoWait{time: txFetchTimeout, step: true},\n\t\t},\n\t})\n}\n\nfunc testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) {\n\tt.Parallel()\n\ttestTransactionFetcher(t, tt)\n}\n\nfunc testTransactionFetcher(t *testing.T, tt txFetcherTest) {\n\t// Create a fetcher and hook into it's simulated fields\n\tclock := new(mclock.Simulated)\n\twait := make(chan struct{})\n\n\tfetcher := tt.init()\n\tfetcher.clock = clock\n\tfetcher.step = wait\n\tfetcher.rand = rand.New(rand.NewSource(0x3a29))\n\n\tfetcher.Start()\n\tdefer fetcher.Stop()\n\n\t// Crunch through all the test steps and execute them\n\tfor i, step := range tt.steps {\n\t\tswitch step := step.(type) {\n\t\tcase doTxNotify:\n\t\t\tif err := fetcher.Notify(step.peer, step.hashes); err != nil {\n\t\t\t\tt.Errorf(\"step %d: %v\", i, err)\n\t\t\t}\n\t\t\t<-wait // Fetcher needs to process this, wait until it's done\n\t\t\tselect {\n\t\t\tcase <-wait:\n\t\t\t\tpanic(\"wtf\")\n\t\t\tcase <-time.After(time.Millisecond):\n\t\t\t}\n\n\t\tcase doTxEnqueue:\n\t\t\tif err := fetcher.Enqueue(step.peer, step.txs, step.direct); err != nil {\n\t\t\t\tt.Errorf(\"step %d: %v\", i, err)\n\t\t\t}\n\t\t\t<-wait // Fetcher needs to process this, wait until it's done\n\n\t\tcase doWait:\n\t\t\tclock.Run(step.time)\n\t\t\tif step.step {\n\t\t\t\t<-wait // Fetcher supposed to do something, wait until it's done\n\t\t\t}\n\n\t\tcase doDrop:\n\t\t\tif err := fetcher.Drop(string(step)); err != nil {\n\t\t\t\tt.Errorf(\"step %d: %v\", i, err)\n\t\t\t}\n\t\t\t<-wait // Fetcher needs to process this, wait until it's done\n\n\t\tcase doFunc:\n\t\t\tstep()\n\n\t\tcase isWaiting:\n\t\t\t// We need to check that the waiting list (stage 1) internals\n\t\t\t// match with the expected set. Check the peer->hash mappings\n\t\t\t// first.\n\t\t\tfor peer, hashes := range step {\n\t\t\t\twaiting := fetcher.waitslots[peer]\n\t\t\t\tif waiting == nil {\n\t\t\t\t\tt.Errorf(\"step %d: peer %s missing from waitslots\", i, peer)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, hash := range hashes {\n\t\t\t\t\tif _, ok := waiting[hash]; !ok {\n\t\t\t\t\t\tt.Errorf(\"step %d, peer %s: hash %x missing from waitslots\", i, peer, hash)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor hash := range waiting {\n\t\t\t\t\tif !containsHash(hashes, hash) {\n\t\t\t\t\t\tt.Errorf(\"step %d, peer %s: hash %x extra in waitslots\", i, peer, hash)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor peer := range fetcher.waitslots {\n\t\t\t\tif _, ok := step[peer]; !ok {\n\t\t\t\t\tt.Errorf(\"step %d: peer %s extra in waitslots\", i, peer)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Peer->hash sets correct, check the hash->peer and timeout sets\n\t\t\tfor peer, hashes := range step {\n\t\t\t\tfor _, hash := range hashes {\n\t\t\t\t\tif _, ok := fetcher.waitlist[hash][peer]; !ok {\n\t\t\t\t\t\tt.Errorf(\"step %d, hash %x: peer %s missing from waitlist\", i, hash, peer)\n\t\t\t\t\t}\n\t\t\t\t\tif _, ok := fetcher.waittime[hash]; !ok {\n\t\t\t\t\t\tt.Errorf(\"step %d: hash %x missing from waittime\", i, hash)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor hash, peers := range fetcher.waitlist {\n\t\t\t\tif len(peers) == 0 {\n\t\t\t\t\tt.Errorf(\"step %d, hash %x: empty peerset in waitlist\", i, hash)\n\t\t\t\t}\n\t\t\t\tfor peer := range peers {\n\t\t\t\t\tif !containsHash(step[peer], hash) {\n\t\t\t\t\t\tt.Errorf(\"step %d, hash %x: peer %s extra in waitlist\", i, hash, peer)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor hash := range fetcher.waittime {\n\t\t\t\tvar found bool\n\t\t\t\tfor _, hashes := range step {\n\t\t\t\t\tif containsHash(hashes, hash) {\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !found {\n\t\t\t\t\tt.Errorf(\"step %d,: hash %x extra in waittime\", i, hash)\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase isScheduled:\n\t\t\t// Check that all scheduled announces are accounted for and no\n\t\t\t// extra ones are present.\n\t\t\tfor peer, hashes := range step.tracking {\n\t\t\t\tscheduled := fetcher.announces[peer]\n\t\t\t\tif scheduled == nil {\n\t\t\t\t\tt.Errorf(\"step %d: peer %s missing from announces\", i, peer)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, hash := range hashes {\n\t\t\t\t\tif _, ok := scheduled[hash]; !ok {\n\t\t\t\t\t\tt.Errorf(\"step %d, peer %s: hash %x missing from announces\", i, peer, hash)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor hash := range scheduled {\n\t\t\t\t\tif !containsHash(hashes, hash) {\n\t\t\t\t\t\tt.Errorf(\"step %d, peer %s: hash %x extra in announces\", i, peer, hash)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor peer := range fetcher.announces {\n\t\t\t\tif _, ok := step.tracking[peer]; !ok {\n\t\t\t\t\tt.Errorf(\"step %d: peer %s extra in announces\", i, peer)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Check that all announces required to be fetching are in the\n\t\t\t// appropriate sets\n\t\t\tfor peer, hashes := range step.fetching {\n\t\t\t\trequest := fetcher.requests[peer]\n\t\t\t\tif request == nil {\n\t\t\t\t\tt.Errorf(\"step %d: peer %s missing from requests\", i, peer)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, hash := range hashes {\n\t\t\t\t\tif !containsHash(request.hashes, hash) {\n\t\t\t\t\t\tt.Errorf(\"step %d, peer %s: hash %x missing from requests\", i, peer, hash)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, hash := range request.hashes {\n\t\t\t\t\tif !containsHash(hashes, hash) {\n\t\t\t\t\t\tt.Errorf(\"step %d, peer %s: hash %x extra in requests\", i, peer, hash)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor peer := range fetcher.requests {\n\t\t\t\tif _, ok := step.fetching[peer]; !ok {\n\t\t\t\t\tif _, ok := step.dangling[peer]; !ok {\n\t\t\t\t\t\tt.Errorf(\"step %d: peer %s extra in requests\", i, peer)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor peer, hashes := range step.fetching {\n\t\t\t\tfor _, hash := range hashes {\n\t\t\t\t\tif _, ok := fetcher.fetching[hash]; !ok {\n\t\t\t\t\t\tt.Errorf(\"step %d, peer %s: hash %x missing from fetching\", i, peer, hash)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor hash := range fetcher.fetching {\n\t\t\t\tvar found bool\n\t\t\t\tfor _, req := range fetcher.requests {\n\t\t\t\t\tif containsHash(req.hashes, hash) {\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !found {\n\t\t\t\t\tt.Errorf(\"step %d: hash %x extra in fetching\", i, hash)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, hashes := range step.fetching {\n\t\t\t\tfor _, hash := range hashes {\n\t\t\t\t\talternates := fetcher.alternates[hash]\n\t\t\t\t\tif alternates == nil {\n\t\t\t\t\t\tt.Errorf(\"step %d: hash %x missing from alternates\", i, hash)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tfor peer := range alternates {\n\t\t\t\t\t\tif _, ok := fetcher.announces[peer]; !ok {\n\t\t\t\t\t\t\tt.Errorf(\"step %d: peer %s extra in alternates\", i, peer)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif _, ok := fetcher.announces[peer][hash]; !ok {\n\t\t\t\t\t\t\tt.Errorf(\"step %d, peer %s: hash %x extra in alternates\", i, hash, peer)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tfor p := range fetcher.announced[hash] {\n\t\t\t\t\t\tif _, ok := alternates[p]; !ok {\n\t\t\t\t\t\t\tt.Errorf(\"step %d, hash %x: peer %s missing from alternates\", i, hash, p)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor peer, hashes := range step.dangling {\n\t\t\t\trequest := fetcher.requests[peer]\n\t\t\t\tif request == nil {\n\t\t\t\t\tt.Errorf(\"step %d: peer %s missing from requests\", i, peer)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, hash := range hashes {\n\t\t\t\t\tif !containsHash(request.hashes, hash) {\n\t\t\t\t\t\tt.Errorf(\"step %d, peer %s: hash %x missing from requests\", i, peer, hash)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, hash := range request.hashes {\n\t\t\t\t\tif !containsHash(hashes, hash) {\n\t\t\t\t\t\tt.Errorf(\"step %d, peer %s: hash %x extra in requests\", i, peer, hash)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Check that all transaction announces that are scheduled for\n\t\t\t// retrieval but not actively being downloaded are tracked only\n\t\t\t// in the stage 2 `announced` map.\n\t\t\tvar queued []common.Hash\n\t\t\tfor _, hashes := range step.tracking {\n\t\t\t\tfor _, hash := range hashes {\n\t\t\t\t\tvar found bool\n\t\t\t\t\tfor _, hs := range step.fetching {\n\t\t\t\t\t\tif containsHash(hs, hash) {\n\t\t\t\t\t\t\tfound = true\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif !found {\n\t\t\t\t\t\tqueued = append(queued, hash)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, hash := range queued {\n\t\t\t\tif _, ok := fetcher.announced[hash]; !ok {\n\t\t\t\t\tt.Errorf(\"step %d: hash %x missing from announced\", i, hash)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor hash := range fetcher.announced {\n\t\t\t\tif !containsHash(queued, hash) {\n\t\t\t\t\tt.Errorf(\"step %d: hash %x extra in announced\", i, hash)\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase isUnderpriced:\n\t\t\tif fetcher.underpriced.Cardinality() != int(step) {\n\t\t\t\tt.Errorf(\"step %d: underpriced set size mismatch: have %d, want %d\", i, fetcher.underpriced.Cardinality(), step)\n\t\t\t}\n\n\t\tdefault:\n\t\t\tt.Fatalf(\"step %d: unknown step type %T\", i, step)\n\t\t}\n\t\t// After every step, cross validate the internal uniqueness invariants\n\t\t// between stage one and stage two.\n\t\tfor hash := range fetcher.waittime {\n\t\t\tif _, ok := fetcher.announced[hash]; ok {\n\t\t\t\tt.Errorf(\"step %d: hash %s present in both stage 1 and 2\", i, hash)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// containsHash returns whether a hash is contained within a hash slice.\nfunc containsHash(slice []common.Hash, hash common.Hash) bool {\n\tfor _, have := range slice {\n\t\tif have == hash {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "eth/filters/api.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage filters\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\n// filter is a helper struct that holds meta information over the filter type\n// and associated subscription in the event system.\ntype filter struct {\n\ttyp      Type\n\tdeadline *time.Timer // filter is inactiv when deadline triggers\n\thashes   []common.Hash\n\tcrit     FilterCriteria\n\tlogs     []*types.Log\n\ts        *Subscription // associated subscription in event system\n}\n\n// PublicFilterAPI offers support to create and manage filters. This will allow external clients to retrieve various\n// information related to the Ethereum protocol such als blocks, transactions and logs.\ntype PublicFilterAPI struct {\n\tbackend   Backend\n\tmux       *event.TypeMux\n\tquit      chan struct{}\n\tchainDb   ethdb.Database\n\tevents    *EventSystem\n\tfiltersMu sync.Mutex\n\tfilters   map[rpc.ID]*filter\n\ttimeout   time.Duration\n}\n\n// NewPublicFilterAPI returns a new PublicFilterAPI instance.\nfunc NewPublicFilterAPI(backend Backend, lightMode bool, timeout time.Duration) *PublicFilterAPI {\n\tapi := &PublicFilterAPI{\n\t\tbackend: backend,\n\t\tchainDb: backend.ChainDb(),\n\t\tevents:  NewEventSystem(backend, lightMode),\n\t\tfilters: make(map[rpc.ID]*filter),\n\t\ttimeout: timeout,\n\t}\n\tgo api.timeoutLoop(timeout)\n\n\treturn api\n}\n\n// timeoutLoop runs every 5 minutes and deletes filters that have not been recently used.\n// Tt is started when the api is created.\nfunc (api *PublicFilterAPI) timeoutLoop(timeout time.Duration) {\n\tvar toUninstall []*Subscription\n\tticker := time.NewTicker(timeout)\n\tdefer ticker.Stop()\n\tfor {\n\t\t<-ticker.C\n\t\tapi.filtersMu.Lock()\n\t\tfor id, f := range api.filters {\n\t\t\tselect {\n\t\t\tcase <-f.deadline.C:\n\t\t\t\ttoUninstall = append(toUninstall, f.s)\n\t\t\t\tdelete(api.filters, id)\n\t\t\tdefault:\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tapi.filtersMu.Unlock()\n\n\t\t// Unsubscribes are processed outside the lock to avoid the following scenario:\n\t\t// event loop attempts broadcasting events to still active filters while\n\t\t// Unsubscribe is waiting for it to process the uninstall request.\n\t\tfor _, s := range toUninstall {\n\t\t\ts.Unsubscribe()\n\t\t}\n\t\ttoUninstall = nil\n\t}\n}\n\n// NewPendingTransactionFilter creates a filter that fetches pending transaction hashes\n// as transactions enter the pending state.\n//\n// It is part of the filter package because this filter can be used through the\n// `eth_getFilterChanges` polling method that is also used for log filters.\n//\n// https://eth.wiki/json-rpc/API#eth_newpendingtransactionfilter\nfunc (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID {\n\tvar (\n\t\tpendingTxs   = make(chan []common.Hash)\n\t\tpendingTxSub = api.events.SubscribePendingTxs(pendingTxs)\n\t)\n\n\tapi.filtersMu.Lock()\n\tapi.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, deadline: time.NewTimer(api.timeout), hashes: make([]common.Hash, 0), s: pendingTxSub}\n\tapi.filtersMu.Unlock()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase ph := <-pendingTxs:\n\t\t\t\tapi.filtersMu.Lock()\n\t\t\t\tif f, found := api.filters[pendingTxSub.ID]; found {\n\t\t\t\t\tf.hashes = append(f.hashes, ph...)\n\t\t\t\t}\n\t\t\t\tapi.filtersMu.Unlock()\n\t\t\tcase <-pendingTxSub.Err():\n\t\t\t\tapi.filtersMu.Lock()\n\t\t\t\tdelete(api.filters, pendingTxSub.ID)\n\t\t\t\tapi.filtersMu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn pendingTxSub.ID\n}\n\n// NewPendingTransactions creates a subscription that is triggered each time a transaction\n// enters the transaction pool and was signed from one of the transactions this nodes manages.\nfunc (api *PublicFilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Subscription, error) {\n\tnotifier, supported := rpc.NotifierFromContext(ctx)\n\tif !supported {\n\t\treturn &rpc.Subscription{}, rpc.ErrNotificationsUnsupported\n\t}\n\n\trpcSub := notifier.CreateSubscription()\n\n\tgo func() {\n\t\ttxHashes := make(chan []common.Hash, 128)\n\t\tpendingTxSub := api.events.SubscribePendingTxs(txHashes)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase hashes := <-txHashes:\n\t\t\t\t// To keep the original behaviour, send a single tx hash in one notification.\n\t\t\t\t// TODO(rjl493456442) Send a batch of tx hashes in one notification\n\t\t\t\tfor _, h := range hashes {\n\t\t\t\t\tnotifier.Notify(rpcSub.ID, h)\n\t\t\t\t}\n\t\t\tcase <-rpcSub.Err():\n\t\t\t\tpendingTxSub.Unsubscribe()\n\t\t\t\treturn\n\t\t\tcase <-notifier.Closed():\n\t\t\t\tpendingTxSub.Unsubscribe()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn rpcSub, nil\n}\n\n// NewBlockFilter creates a filter that fetches blocks that are imported into the chain.\n// It is part of the filter package since polling goes with eth_getFilterChanges.\n//\n// https://eth.wiki/json-rpc/API#eth_newblockfilter\nfunc (api *PublicFilterAPI) NewBlockFilter() rpc.ID {\n\tvar (\n\t\theaders   = make(chan *types.Header)\n\t\theaderSub = api.events.SubscribeNewHeads(headers)\n\t)\n\n\tapi.filtersMu.Lock()\n\tapi.filters[headerSub.ID] = &filter{typ: BlocksSubscription, deadline: time.NewTimer(api.timeout), hashes: make([]common.Hash, 0), s: headerSub}\n\tapi.filtersMu.Unlock()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase h := <-headers:\n\t\t\t\tapi.filtersMu.Lock()\n\t\t\t\tif f, found := api.filters[headerSub.ID]; found {\n\t\t\t\t\tf.hashes = append(f.hashes, h.Hash())\n\t\t\t\t}\n\t\t\t\tapi.filtersMu.Unlock()\n\t\t\tcase <-headerSub.Err():\n\t\t\t\tapi.filtersMu.Lock()\n\t\t\t\tdelete(api.filters, headerSub.ID)\n\t\t\t\tapi.filtersMu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn headerSub.ID\n}\n\n// NewHeads send a notification each time a new (header) block is appended to the chain.\nfunc (api *PublicFilterAPI) NewHeads(ctx context.Context) (*rpc.Subscription, error) {\n\tnotifier, supported := rpc.NotifierFromContext(ctx)\n\tif !supported {\n\t\treturn &rpc.Subscription{}, rpc.ErrNotificationsUnsupported\n\t}\n\n\trpcSub := notifier.CreateSubscription()\n\n\tgo func() {\n\t\theaders := make(chan *types.Header)\n\t\theadersSub := api.events.SubscribeNewHeads(headers)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase h := <-headers:\n\t\t\t\tnotifier.Notify(rpcSub.ID, h)\n\t\t\tcase <-rpcSub.Err():\n\t\t\t\theadersSub.Unsubscribe()\n\t\t\t\treturn\n\t\t\tcase <-notifier.Closed():\n\t\t\t\theadersSub.Unsubscribe()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn rpcSub, nil\n}\n\n// Logs creates a subscription that fires for all new log that match the given filter criteria.\nfunc (api *PublicFilterAPI) Logs(ctx context.Context, crit FilterCriteria) (*rpc.Subscription, error) {\n\tnotifier, supported := rpc.NotifierFromContext(ctx)\n\tif !supported {\n\t\treturn &rpc.Subscription{}, rpc.ErrNotificationsUnsupported\n\t}\n\n\tvar (\n\t\trpcSub      = notifier.CreateSubscription()\n\t\tmatchedLogs = make(chan []*types.Log)\n\t)\n\n\tlogsSub, err := api.events.SubscribeLogs(ethereum.FilterQuery(crit), matchedLogs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo func() {\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase logs := <-matchedLogs:\n\t\t\t\tfor _, log := range logs {\n\t\t\t\t\tnotifier.Notify(rpcSub.ID, &log)\n\t\t\t\t}\n\t\t\tcase <-rpcSub.Err(): // client send an unsubscribe request\n\t\t\t\tlogsSub.Unsubscribe()\n\t\t\t\treturn\n\t\t\tcase <-notifier.Closed(): // connection dropped\n\t\t\t\tlogsSub.Unsubscribe()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn rpcSub, nil\n}\n\n// FilterCriteria represents a request to create a new filter.\n// Same as ethereum.FilterQuery but with UnmarshalJSON() method.\ntype FilterCriteria ethereum.FilterQuery\n\n// NewFilter creates a new filter and returns the filter id. It can be\n// used to retrieve logs when the state changes. This method cannot be\n// used to fetch logs that are already stored in the state.\n//\n// Default criteria for the from and to block are \"latest\".\n// Using \"latest\" as block number will return logs for mined blocks.\n// Using \"pending\" as block number returns logs for not yet mined (pending) blocks.\n// In case logs are removed (chain reorg) previously returned logs are returned\n// again but with the removed property set to true.\n//\n// In case \"fromBlock\" > \"toBlock\" an error is returned.\n//\n// https://eth.wiki/json-rpc/API#eth_newfilter\nfunc (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) {\n\tlogs := make(chan []*types.Log)\n\tlogsSub, err := api.events.SubscribeLogs(ethereum.FilterQuery(crit), logs)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tapi.filtersMu.Lock()\n\tapi.filters[logsSub.ID] = &filter{typ: LogsSubscription, crit: crit, deadline: time.NewTimer(api.timeout), logs: make([]*types.Log, 0), s: logsSub}\n\tapi.filtersMu.Unlock()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase l := <-logs:\n\t\t\t\tapi.filtersMu.Lock()\n\t\t\t\tif f, found := api.filters[logsSub.ID]; found {\n\t\t\t\t\tf.logs = append(f.logs, l...)\n\t\t\t\t}\n\t\t\t\tapi.filtersMu.Unlock()\n\t\t\tcase <-logsSub.Err():\n\t\t\t\tapi.filtersMu.Lock()\n\t\t\t\tdelete(api.filters, logsSub.ID)\n\t\t\t\tapi.filtersMu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn logsSub.ID, nil\n}\n\n// GetLogs returns logs matching the given argument that are stored within the state.\n//\n// https://eth.wiki/json-rpc/API#eth_getlogs\nfunc (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) {\n\tvar filter *Filter\n\tif crit.BlockHash != nil {\n\t\t// Block filter requested, construct a single-shot filter\n\t\tfilter = NewBlockFilter(api.backend, *crit.BlockHash, crit.Addresses, crit.Topics)\n\t} else {\n\t\t// Convert the RPC block numbers into internal representations\n\t\tbegin := rpc.LatestBlockNumber.Int64()\n\t\tif crit.FromBlock != nil {\n\t\t\tbegin = crit.FromBlock.Int64()\n\t\t}\n\t\tend := rpc.LatestBlockNumber.Int64()\n\t\tif crit.ToBlock != nil {\n\t\t\tend = crit.ToBlock.Int64()\n\t\t}\n\t\t// Construct the range filter\n\t\tfilter = NewRangeFilter(api.backend, begin, end, crit.Addresses, crit.Topics)\n\t}\n\t// Run the filter and return all the logs\n\tlogs, err := filter.Logs(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn returnLogs(logs), err\n}\n\n// UninstallFilter removes the filter with the given filter id.\n//\n// https://eth.wiki/json-rpc/API#eth_uninstallfilter\nfunc (api *PublicFilterAPI) UninstallFilter(id rpc.ID) bool {\n\tapi.filtersMu.Lock()\n\tf, found := api.filters[id]\n\tif found {\n\t\tdelete(api.filters, id)\n\t}\n\tapi.filtersMu.Unlock()\n\tif found {\n\t\tf.s.Unsubscribe()\n\t}\n\n\treturn found\n}\n\n// GetFilterLogs returns the logs for the filter with the given id.\n// If the filter could not be found an empty array of logs is returned.\n//\n// https://eth.wiki/json-rpc/API#eth_getfilterlogs\nfunc (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Log, error) {\n\tapi.filtersMu.Lock()\n\tf, found := api.filters[id]\n\tapi.filtersMu.Unlock()\n\n\tif !found || f.typ != LogsSubscription {\n\t\treturn nil, fmt.Errorf(\"filter not found\")\n\t}\n\n\tvar filter *Filter\n\tif f.crit.BlockHash != nil {\n\t\t// Block filter requested, construct a single-shot filter\n\t\tfilter = NewBlockFilter(api.backend, *f.crit.BlockHash, f.crit.Addresses, f.crit.Topics)\n\t} else {\n\t\t// Convert the RPC block numbers into internal representations\n\t\tbegin := rpc.LatestBlockNumber.Int64()\n\t\tif f.crit.FromBlock != nil {\n\t\t\tbegin = f.crit.FromBlock.Int64()\n\t\t}\n\t\tend := rpc.LatestBlockNumber.Int64()\n\t\tif f.crit.ToBlock != nil {\n\t\t\tend = f.crit.ToBlock.Int64()\n\t\t}\n\t\t// Construct the range filter\n\t\tfilter = NewRangeFilter(api.backend, begin, end, f.crit.Addresses, f.crit.Topics)\n\t}\n\t// Run the filter and return all the logs\n\tlogs, err := filter.Logs(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn returnLogs(logs), nil\n}\n\n// GetFilterChanges returns the logs for the filter with the given id since\n// last time it was called. This can be used for polling.\n//\n// For pending transaction and block filters the result is []common.Hash.\n// (pending)Log filters return []Log.\n//\n// https://eth.wiki/json-rpc/API#eth_getfilterchanges\nfunc (api *PublicFilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) {\n\tapi.filtersMu.Lock()\n\tdefer api.filtersMu.Unlock()\n\n\tif f, found := api.filters[id]; found {\n\t\tif !f.deadline.Stop() {\n\t\t\t// timer expired but filter is not yet removed in timeout loop\n\t\t\t// receive timer value and reset timer\n\t\t\t<-f.deadline.C\n\t\t}\n\t\tf.deadline.Reset(api.timeout)\n\n\t\tswitch f.typ {\n\t\tcase PendingTransactionsSubscription, BlocksSubscription:\n\t\t\thashes := f.hashes\n\t\t\tf.hashes = nil\n\t\t\treturn returnHashes(hashes), nil\n\t\tcase LogsSubscription, MinedAndPendingLogsSubscription:\n\t\t\tlogs := f.logs\n\t\t\tf.logs = nil\n\t\t\treturn returnLogs(logs), nil\n\t\t}\n\t}\n\n\treturn []interface{}{}, fmt.Errorf(\"filter not found\")\n}\n\n// returnHashes is a helper that will return an empty hash array case the given hash array is nil,\n// otherwise the given hashes array is returned.\nfunc returnHashes(hashes []common.Hash) []common.Hash {\n\tif hashes == nil {\n\t\treturn []common.Hash{}\n\t}\n\treturn hashes\n}\n\n// returnLogs is a helper that will return an empty log array in case the given logs array is nil,\n// otherwise the given logs array is returned.\nfunc returnLogs(logs []*types.Log) []*types.Log {\n\tif logs == nil {\n\t\treturn []*types.Log{}\n\t}\n\treturn logs\n}\n\n// UnmarshalJSON sets *args fields with given data.\nfunc (args *FilterCriteria) UnmarshalJSON(data []byte) error {\n\ttype input struct {\n\t\tBlockHash *common.Hash     `json:\"blockHash\"`\n\t\tFromBlock *rpc.BlockNumber `json:\"fromBlock\"`\n\t\tToBlock   *rpc.BlockNumber `json:\"toBlock\"`\n\t\tAddresses interface{}      `json:\"address\"`\n\t\tTopics    []interface{}    `json:\"topics\"`\n\t}\n\n\tvar raw input\n\tif err := json.Unmarshal(data, &raw); err != nil {\n\t\treturn err\n\t}\n\n\tif raw.BlockHash != nil {\n\t\tif raw.FromBlock != nil || raw.ToBlock != nil {\n\t\t\t// BlockHash is mutually exclusive with FromBlock/ToBlock criteria\n\t\t\treturn fmt.Errorf(\"cannot specify both BlockHash and FromBlock/ToBlock, choose one or the other\")\n\t\t}\n\t\targs.BlockHash = raw.BlockHash\n\t} else {\n\t\tif raw.FromBlock != nil {\n\t\t\targs.FromBlock = big.NewInt(raw.FromBlock.Int64())\n\t\t}\n\n\t\tif raw.ToBlock != nil {\n\t\t\targs.ToBlock = big.NewInt(raw.ToBlock.Int64())\n\t\t}\n\t}\n\n\targs.Addresses = []common.Address{}\n\n\tif raw.Addresses != nil {\n\t\t// raw.Address can contain a single address or an array of addresses\n\t\tswitch rawAddr := raw.Addresses.(type) {\n\t\tcase []interface{}:\n\t\t\tfor i, addr := range rawAddr {\n\t\t\t\tif strAddr, ok := addr.(string); ok {\n\t\t\t\t\taddr, err := decodeAddress(strAddr)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Errorf(\"invalid address at index %d: %v\", i, err)\n\t\t\t\t\t}\n\t\t\t\t\targs.Addresses = append(args.Addresses, addr)\n\t\t\t\t} else {\n\t\t\t\t\treturn fmt.Errorf(\"non-string address at index %d\", i)\n\t\t\t\t}\n\t\t\t}\n\t\tcase string:\n\t\t\taddr, err := decodeAddress(rawAddr)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"invalid address: %v\", err)\n\t\t\t}\n\t\t\targs.Addresses = []common.Address{addr}\n\t\tdefault:\n\t\t\treturn errors.New(\"invalid addresses in query\")\n\t\t}\n\t}\n\n\t// topics is an array consisting of strings and/or arrays of strings.\n\t// JSON null values are converted to common.Hash{} and ignored by the filter manager.\n\tif len(raw.Topics) > 0 {\n\t\targs.Topics = make([][]common.Hash, len(raw.Topics))\n\t\tfor i, t := range raw.Topics {\n\t\t\tswitch topic := t.(type) {\n\t\t\tcase nil:\n\t\t\t\t// ignore topic when matching logs\n\n\t\t\tcase string:\n\t\t\t\t// match specific topic\n\t\t\t\ttop, err := decodeTopic(topic)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\targs.Topics[i] = []common.Hash{top}\n\n\t\t\tcase []interface{}:\n\t\t\t\t// or case e.g. [null, \"topic0\", \"topic1\"]\n\t\t\t\tfor _, rawTopic := range topic {\n\t\t\t\t\tif rawTopic == nil {\n\t\t\t\t\t\t// null component, match all\n\t\t\t\t\t\targs.Topics[i] = nil\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif topic, ok := rawTopic.(string); ok {\n\t\t\t\t\t\tparsed, err := decodeTopic(topic)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\targs.Topics[i] = append(args.Topics[i], parsed)\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn fmt.Errorf(\"invalid topic(s)\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"invalid topic(s)\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc decodeAddress(s string) (common.Address, error) {\n\tb, err := hexutil.Decode(s)\n\tif err == nil && len(b) != common.AddressLength {\n\t\terr = fmt.Errorf(\"hex has invalid length %d after decoding; expected %d for address\", len(b), common.AddressLength)\n\t}\n\treturn common.BytesToAddress(b), err\n}\n\nfunc decodeTopic(s string) (common.Hash, error) {\n\tb, err := hexutil.Decode(s)\n\tif err == nil && len(b) != common.HashLength {\n\t\terr = fmt.Errorf(\"hex has invalid length %d after decoding; expected %d for topic\", len(b), common.HashLength)\n\t}\n\treturn common.BytesToHash(b), err\n}\n"
  },
  {
    "path": "eth/filters/api_test.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage filters\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\nfunc TestUnmarshalJSONNewFilterArgs(t *testing.T) {\n\tvar (\n\t\tfromBlock rpc.BlockNumber = 0x123435\n\t\ttoBlock   rpc.BlockNumber = 0xabcdef\n\t\taddress0                  = common.HexToAddress(\"70c87d191324e6712a591f304b4eedef6ad9bb9d\")\n\t\taddress1                  = common.HexToAddress(\"9b2055d370f73ec7d8a03e965129118dc8f5bf83\")\n\t\ttopic0                    = common.HexToHash(\"3ac225168df54212a25c1c01fd35bebfea408fdac2e31ddd6f80a4bbf9a5f1ca\")\n\t\ttopic1                    = common.HexToHash(\"9084a792d2f8b16a62b882fd56f7860c07bf5fa91dd8a2ae7e809e5180fef0b3\")\n\t\ttopic2                    = common.HexToHash(\"6ccae1c4af4152f460ff510e573399795dfab5dcf1fa60d1f33ac8fdc1e480ce\")\n\t)\n\n\t// default values\n\tvar test0 FilterCriteria\n\tif err := json.Unmarshal([]byte(\"{}\"), &test0); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif test0.FromBlock != nil {\n\t\tt.Fatalf(\"expected nil, got %d\", test0.FromBlock)\n\t}\n\tif test0.ToBlock != nil {\n\t\tt.Fatalf(\"expected nil, got %d\", test0.ToBlock)\n\t}\n\tif len(test0.Addresses) != 0 {\n\t\tt.Fatalf(\"expected 0 addresses, got %d\", len(test0.Addresses))\n\t}\n\tif len(test0.Topics) != 0 {\n\t\tt.Fatalf(\"expected 0 topics, got %d topics\", len(test0.Topics))\n\t}\n\n\t// from, to block number\n\tvar test1 FilterCriteria\n\tvector := fmt.Sprintf(`{\"fromBlock\":\"0x%x\",\"toBlock\":\"0x%x\"}`, fromBlock, toBlock)\n\tif err := json.Unmarshal([]byte(vector), &test1); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif test1.FromBlock.Int64() != fromBlock.Int64() {\n\t\tt.Fatalf(\"expected FromBlock %d, got %d\", fromBlock, test1.FromBlock)\n\t}\n\tif test1.ToBlock.Int64() != toBlock.Int64() {\n\t\tt.Fatalf(\"expected ToBlock %d, got %d\", toBlock, test1.ToBlock)\n\t}\n\n\t// single address\n\tvar test2 FilterCriteria\n\tvector = fmt.Sprintf(`{\"address\": \"%s\"}`, address0.Hex())\n\tif err := json.Unmarshal([]byte(vector), &test2); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(test2.Addresses) != 1 {\n\t\tt.Fatalf(\"expected 1 address, got %d address(es)\", len(test2.Addresses))\n\t}\n\tif test2.Addresses[0] != address0 {\n\t\tt.Fatalf(\"expected address %x, got %x\", address0, test2.Addresses[0])\n\t}\n\n\t// multiple address\n\tvar test3 FilterCriteria\n\tvector = fmt.Sprintf(`{\"address\": [\"%s\", \"%s\"]}`, address0.Hex(), address1.Hex())\n\tif err := json.Unmarshal([]byte(vector), &test3); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(test3.Addresses) != 2 {\n\t\tt.Fatalf(\"expected 2 addresses, got %d address(es)\", len(test3.Addresses))\n\t}\n\tif test3.Addresses[0] != address0 {\n\t\tt.Fatalf(\"expected address %x, got %x\", address0, test3.Addresses[0])\n\t}\n\tif test3.Addresses[1] != address1 {\n\t\tt.Fatalf(\"expected address %x, got %x\", address1, test3.Addresses[1])\n\t}\n\n\t// single topic\n\tvar test4 FilterCriteria\n\tvector = fmt.Sprintf(`{\"topics\": [\"%s\"]}`, topic0.Hex())\n\tif err := json.Unmarshal([]byte(vector), &test4); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(test4.Topics) != 1 {\n\t\tt.Fatalf(\"expected 1 topic, got %d\", len(test4.Topics))\n\t}\n\tif len(test4.Topics[0]) != 1 {\n\t\tt.Fatalf(\"expected len(topics[0]) to be 1, got %d\", len(test4.Topics[0]))\n\t}\n\tif test4.Topics[0][0] != topic0 {\n\t\tt.Fatalf(\"got %x, expected %x\", test4.Topics[0][0], topic0)\n\t}\n\n\t// test multiple \"AND\" topics\n\tvar test5 FilterCriteria\n\tvector = fmt.Sprintf(`{\"topics\": [\"%s\", \"%s\"]}`, topic0.Hex(), topic1.Hex())\n\tif err := json.Unmarshal([]byte(vector), &test5); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(test5.Topics) != 2 {\n\t\tt.Fatalf(\"expected 2 topics, got %d\", len(test5.Topics))\n\t}\n\tif len(test5.Topics[0]) != 1 {\n\t\tt.Fatalf(\"expected 1 topic, got %d\", len(test5.Topics[0]))\n\t}\n\tif test5.Topics[0][0] != topic0 {\n\t\tt.Fatalf(\"got %x, expected %x\", test5.Topics[0][0], topic0)\n\t}\n\tif len(test5.Topics[1]) != 1 {\n\t\tt.Fatalf(\"expected 1 topic, got %d\", len(test5.Topics[1]))\n\t}\n\tif test5.Topics[1][0] != topic1 {\n\t\tt.Fatalf(\"got %x, expected %x\", test5.Topics[1][0], topic1)\n\t}\n\n\t// test optional topic\n\tvar test6 FilterCriteria\n\tvector = fmt.Sprintf(`{\"topics\": [\"%s\", null, \"%s\"]}`, topic0.Hex(), topic2.Hex())\n\tif err := json.Unmarshal([]byte(vector), &test6); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(test6.Topics) != 3 {\n\t\tt.Fatalf(\"expected 3 topics, got %d\", len(test6.Topics))\n\t}\n\tif len(test6.Topics[0]) != 1 {\n\t\tt.Fatalf(\"expected 1 topic, got %d\", len(test6.Topics[0]))\n\t}\n\tif test6.Topics[0][0] != topic0 {\n\t\tt.Fatalf(\"got %x, expected %x\", test6.Topics[0][0], topic0)\n\t}\n\tif len(test6.Topics[1]) != 0 {\n\t\tt.Fatalf(\"expected 0 topic, got %d\", len(test6.Topics[1]))\n\t}\n\tif len(test6.Topics[2]) != 1 {\n\t\tt.Fatalf(\"expected 1 topic, got %d\", len(test6.Topics[2]))\n\t}\n\tif test6.Topics[2][0] != topic2 {\n\t\tt.Fatalf(\"got %x, expected %x\", test6.Topics[2][0], topic2)\n\t}\n\n\t// test OR topics\n\tvar test7 FilterCriteria\n\tvector = fmt.Sprintf(`{\"topics\": [[\"%s\", \"%s\"], null, [\"%s\", null]]}`, topic0.Hex(), topic1.Hex(), topic2.Hex())\n\tif err := json.Unmarshal([]byte(vector), &test7); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(test7.Topics) != 3 {\n\t\tt.Fatalf(\"expected 3 topics, got %d topics\", len(test7.Topics))\n\t}\n\tif len(test7.Topics[0]) != 2 {\n\t\tt.Fatalf(\"expected 2 topics, got %d topics\", len(test7.Topics[0]))\n\t}\n\tif test7.Topics[0][0] != topic0 || test7.Topics[0][1] != topic1 {\n\t\tt.Fatalf(\"invalid topics expected [%x,%x], got [%x,%x]\",\n\t\t\ttopic0, topic1, test7.Topics[0][0], test7.Topics[0][1],\n\t\t)\n\t}\n\tif len(test7.Topics[1]) != 0 {\n\t\tt.Fatalf(\"expected 0 topic, got %d topics\", len(test7.Topics[1]))\n\t}\n\tif len(test7.Topics[2]) != 0 {\n\t\tt.Fatalf(\"expected 0 topics, got %d topics\", len(test7.Topics[2]))\n\t}\n}\n"
  },
  {
    "path": "eth/filters/bench_test.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage filters\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/bitutil\"\n\t\"github.com/ethereum/go-ethereum/core/bloombits\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/node\"\n)\n\nfunc BenchmarkBloomBits512(b *testing.B) {\n\tbenchmarkBloomBits(b, 512)\n}\n\nfunc BenchmarkBloomBits1k(b *testing.B) {\n\tbenchmarkBloomBits(b, 1024)\n}\n\nfunc BenchmarkBloomBits2k(b *testing.B) {\n\tbenchmarkBloomBits(b, 2048)\n}\n\nfunc BenchmarkBloomBits4k(b *testing.B) {\n\tbenchmarkBloomBits(b, 4096)\n}\n\nfunc BenchmarkBloomBits8k(b *testing.B) {\n\tbenchmarkBloomBits(b, 8192)\n}\n\nfunc BenchmarkBloomBits16k(b *testing.B) {\n\tbenchmarkBloomBits(b, 16384)\n}\n\nfunc BenchmarkBloomBits32k(b *testing.B) {\n\tbenchmarkBloomBits(b, 32768)\n}\n\nconst benchFilterCnt = 2000\n\nfunc benchmarkBloomBits(b *testing.B, sectionSize uint64) {\n\tbenchDataDir := node.DefaultDataDir() + \"/geth/chaindata\"\n\tb.Log(\"Running bloombits benchmark   section size:\", sectionSize)\n\n\tdb, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, \"\", false)\n\tif err != nil {\n\t\tb.Fatalf(\"error opening database at %v: %v\", benchDataDir, err)\n\t}\n\thead := rawdb.ReadHeadBlockHash(db)\n\tif head == (common.Hash{}) {\n\t\tb.Fatalf(\"chain data not found at %v\", benchDataDir)\n\t}\n\n\tclearBloomBits(db)\n\tb.Log(\"Generating bloombits data...\")\n\theadNum := rawdb.ReadHeaderNumber(db, head)\n\tif headNum == nil || *headNum < sectionSize+512 {\n\t\tb.Fatalf(\"not enough blocks for running a benchmark\")\n\t}\n\n\tstart := time.Now()\n\tcnt := (*headNum - 512) / sectionSize\n\tvar dataSize, compSize uint64\n\tfor sectionIdx := uint64(0); sectionIdx < cnt; sectionIdx++ {\n\t\tbc, err := bloombits.NewGenerator(uint(sectionSize))\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"failed to create generator: %v\", err)\n\t\t}\n\t\tvar header *types.Header\n\t\tfor i := sectionIdx * sectionSize; i < (sectionIdx+1)*sectionSize; i++ {\n\t\t\thash := rawdb.ReadCanonicalHash(db, i)\n\t\t\theader = rawdb.ReadHeader(db, hash, i)\n\t\t\tif header == nil {\n\t\t\t\tb.Fatalf(\"Error creating bloomBits data\")\n\t\t\t}\n\t\t\tbc.AddBloom(uint(i-sectionIdx*sectionSize), header.Bloom)\n\t\t}\n\t\tsectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*sectionSize-1)\n\t\tfor i := 0; i < types.BloomBitLength; i++ {\n\t\t\tdata, err := bc.Bitset(uint(i))\n\t\t\tif err != nil {\n\t\t\t\tb.Fatalf(\"failed to retrieve bitset: %v\", err)\n\t\t\t}\n\t\t\tcomp := bitutil.CompressBytes(data)\n\t\t\tdataSize += uint64(len(data))\n\t\t\tcompSize += uint64(len(comp))\n\t\t\trawdb.WriteBloomBits(db, uint(i), sectionIdx, sectionHead, comp)\n\t\t}\n\t\t//if sectionIdx%50 == 0 {\n\t\t//\tb.Log(\" section\", sectionIdx, \"/\", cnt)\n\t\t//}\n\t}\n\n\td := time.Since(start)\n\tb.Log(\"Finished generating bloombits data\")\n\tb.Log(\" \", d, \"total  \", d/time.Duration(cnt*sectionSize), \"per block\")\n\tb.Log(\" data size:\", dataSize, \"  compressed size:\", compSize, \"  compression ratio:\", float64(compSize)/float64(dataSize))\n\n\tb.Log(\"Running filter benchmarks...\")\n\tstart = time.Now()\n\tvar backend *testBackend\n\n\tfor i := 0; i < benchFilterCnt; i++ {\n\t\tif i%20 == 0 {\n\t\t\tdb.Close()\n\t\t\tdb, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, \"\", false)\n\t\t\tbackend = &testBackend{db: db, sections: cnt}\n\t\t}\n\t\tvar addr common.Address\n\t\taddr[0] = byte(i)\n\t\taddr[1] = byte(i / 256)\n\t\tfilter := NewRangeFilter(backend, 0, int64(cnt*sectionSize-1), []common.Address{addr}, nil)\n\t\tif _, err := filter.Logs(context.Background()); err != nil {\n\t\t\tb.Error(\"filter.Find error:\", err)\n\t\t}\n\t}\n\td = time.Since(start)\n\tb.Log(\"Finished running filter benchmarks\")\n\tb.Log(\" \", d, \"total  \", d/time.Duration(benchFilterCnt), \"per address\", d*time.Duration(1000000)/time.Duration(benchFilterCnt*cnt*sectionSize), \"per million blocks\")\n\tdb.Close()\n}\n\nvar bloomBitsPrefix = []byte(\"bloomBits-\")\n\nfunc clearBloomBits(db ethdb.Database) {\n\tfmt.Println(\"Clearing bloombits data...\")\n\tit := db.NewIterator(bloomBitsPrefix, nil)\n\tfor it.Next() {\n\t\tdb.Delete(it.Key())\n\t}\n\tit.Release()\n}\n\nfunc BenchmarkNoBloomBits(b *testing.B) {\n\tbenchDataDir := node.DefaultDataDir() + \"/geth/chaindata\"\n\tb.Log(\"Running benchmark without bloombits\")\n\tdb, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, \"\", false)\n\tif err != nil {\n\t\tb.Fatalf(\"error opening database at %v: %v\", benchDataDir, err)\n\t}\n\thead := rawdb.ReadHeadBlockHash(db)\n\tif head == (common.Hash{}) {\n\t\tb.Fatalf(\"chain data not found at %v\", benchDataDir)\n\t}\n\theadNum := rawdb.ReadHeaderNumber(db, head)\n\n\tclearBloomBits(db)\n\n\tb.Log(\"Running filter benchmarks...\")\n\tstart := time.Now()\n\tbackend := &testBackend{db: db}\n\tfilter := NewRangeFilter(backend, 0, int64(*headNum), []common.Address{{}}, nil)\n\tfilter.Logs(context.Background())\n\td := time.Since(start)\n\tb.Log(\"Finished running filter benchmarks\")\n\tb.Log(\" \", d, \"total  \", d*time.Duration(1000000)/time.Duration(*headNum+1), \"per million blocks\")\n\tdb.Close()\n}\n"
  },
  {
    "path": "eth/filters/filter.go",
    "content": "// Copyright 2014 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage filters\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"math/big\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/bloombits\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\ntype Backend interface {\n\tChainDb() ethdb.Database\n\tHeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)\n\tHeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error)\n\tGetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)\n\tGetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error)\n\n\tSubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription\n\tSubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription\n\tSubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription\n\tSubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription\n\tSubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription\n\n\tBloomStatus() (uint64, uint64)\n\tServiceFilter(ctx context.Context, session *bloombits.MatcherSession)\n}\n\n// Filter can be used to retrieve and filter logs.\ntype Filter struct {\n\tbackend Backend\n\n\tdb        ethdb.Database\n\taddresses []common.Address\n\ttopics    [][]common.Hash\n\n\tblock      common.Hash // Block hash if filtering a single block\n\tbegin, end int64       // Range interval if filtering multiple blocks\n\n\tmatcher *bloombits.Matcher\n}\n\n// NewRangeFilter creates a new filter which uses a bloom filter on blocks to\n// figure out whether a particular block is interesting or not.\nfunc NewRangeFilter(backend Backend, begin, end int64, addresses []common.Address, topics [][]common.Hash) *Filter {\n\t// Flatten the address and topic filter clauses into a single bloombits filter\n\t// system. Since the bloombits are not positional, nil topics are permitted,\n\t// which get flattened into a nil byte slice.\n\tvar filters [][][]byte\n\tif len(addresses) > 0 {\n\t\tfilter := make([][]byte, len(addresses))\n\t\tfor i, address := range addresses {\n\t\t\tfilter[i] = address.Bytes()\n\t\t}\n\t\tfilters = append(filters, filter)\n\t}\n\tfor _, topicList := range topics {\n\t\tfilter := make([][]byte, len(topicList))\n\t\tfor i, topic := range topicList {\n\t\t\tfilter[i] = topic.Bytes()\n\t\t}\n\t\tfilters = append(filters, filter)\n\t}\n\tsize, _ := backend.BloomStatus()\n\n\t// Create a generic filter and convert it into a range filter\n\tfilter := newFilter(backend, addresses, topics)\n\n\tfilter.matcher = bloombits.NewMatcher(size, filters)\n\tfilter.begin = begin\n\tfilter.end = end\n\n\treturn filter\n}\n\n// NewBlockFilter creates a new filter which directly inspects the contents of\n// a block to figure out whether it is interesting or not.\nfunc NewBlockFilter(backend Backend, block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter {\n\t// Create a generic filter and convert it into a block filter\n\tfilter := newFilter(backend, addresses, topics)\n\tfilter.block = block\n\treturn filter\n}\n\n// newFilter creates a generic filter that can either filter based on a block hash,\n// or based on range queries. The search criteria needs to be explicitly set.\nfunc newFilter(backend Backend, addresses []common.Address, topics [][]common.Hash) *Filter {\n\treturn &Filter{\n\t\tbackend:   backend,\n\t\taddresses: addresses,\n\t\ttopics:    topics,\n\t\tdb:        backend.ChainDb(),\n\t}\n}\n\n// Logs searches the blockchain for matching log entries, returning all from the\n// first block that contains matches, updating the start of the filter accordingly.\nfunc (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) {\n\t// If we're doing singleton block filtering, execute and return\n\tif f.block != (common.Hash{}) {\n\t\theader, err := f.backend.HeaderByHash(ctx, f.block)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif header == nil {\n\t\t\treturn nil, errors.New(\"unknown block\")\n\t\t}\n\t\treturn f.blockLogs(ctx, header)\n\t}\n\t// Figure out the limits of the filter range\n\theader, _ := f.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)\n\tif header == nil {\n\t\treturn nil, nil\n\t}\n\thead := header.Number.Uint64()\n\n\tif f.begin == -1 {\n\t\tf.begin = int64(head)\n\t}\n\tend := uint64(f.end)\n\tif f.end == -1 {\n\t\tend = head\n\t}\n\t// Gather all indexed logs, and finish with non indexed ones\n\tvar (\n\t\tlogs []*types.Log\n\t\terr  error\n\t)\n\tsize, sections := f.backend.BloomStatus()\n\tif indexed := sections * size; indexed > uint64(f.begin) {\n\t\tif indexed > end {\n\t\t\tlogs, err = f.indexedLogs(ctx, end)\n\t\t} else {\n\t\t\tlogs, err = f.indexedLogs(ctx, indexed-1)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn logs, err\n\t\t}\n\t}\n\trest, err := f.unindexedLogs(ctx, end)\n\tlogs = append(logs, rest...)\n\treturn logs, err\n}\n\n// indexedLogs returns the logs matching the filter criteria based on the bloom\n// bits indexed available locally or via the network.\nfunc (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) {\n\t// Create a matcher session and request servicing from the backend\n\tmatches := make(chan uint64, 64)\n\n\tsession, err := f.matcher.Start(ctx, uint64(f.begin), end, matches)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer session.Close()\n\n\tf.backend.ServiceFilter(ctx, session)\n\n\t// Iterate over the matches until exhausted or context closed\n\tvar logs []*types.Log\n\n\tfor {\n\t\tselect {\n\t\tcase number, ok := <-matches:\n\t\t\t// Abort if all matches have been fulfilled\n\t\t\tif !ok {\n\t\t\t\terr := session.Error()\n\t\t\t\tif err == nil {\n\t\t\t\t\tf.begin = int64(end) + 1\n\t\t\t\t}\n\t\t\t\treturn logs, err\n\t\t\t}\n\t\t\tf.begin = int64(number) + 1\n\n\t\t\t// Retrieve the suggested block and pull any truly matching logs\n\t\t\theader, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(number))\n\t\t\tif header == nil || err != nil {\n\t\t\t\treturn logs, err\n\t\t\t}\n\t\t\tfound, err := f.checkMatches(ctx, header)\n\t\t\tif err != nil {\n\t\t\t\treturn logs, err\n\t\t\t}\n\t\t\tlogs = append(logs, found...)\n\n\t\tcase <-ctx.Done():\n\t\t\treturn logs, ctx.Err()\n\t\t}\n\t}\n}\n\n// unindexedLogs returns the logs matching the filter criteria based on raw block\n// iteration and bloom matching.\nfunc (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) {\n\tvar logs []*types.Log\n\n\tfor ; f.begin <= int64(end); f.begin++ {\n\t\theader, err := f.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin))\n\t\tif header == nil || err != nil {\n\t\t\treturn logs, err\n\t\t}\n\t\tfound, err := f.blockLogs(ctx, header)\n\t\tif err != nil {\n\t\t\treturn logs, err\n\t\t}\n\t\tlogs = append(logs, found...)\n\t}\n\treturn logs, nil\n}\n\n// blockLogs returns the logs matching the filter criteria within a single block.\nfunc (f *Filter) blockLogs(ctx context.Context, header *types.Header) (logs []*types.Log, err error) {\n\tif bloomFilter(header.Bloom, f.addresses, f.topics) {\n\t\tfound, err := f.checkMatches(ctx, header)\n\t\tif err != nil {\n\t\t\treturn logs, err\n\t\t}\n\t\tlogs = append(logs, found...)\n\t}\n\treturn logs, nil\n}\n\n// checkMatches checks if the receipts belonging to the given header contain any log events that\n// match the filter criteria. This function is called when the bloom filter signals a potential match.\nfunc (f *Filter) checkMatches(ctx context.Context, header *types.Header) (logs []*types.Log, err error) {\n\t// Get the logs of the block\n\tlogsList, err := f.backend.GetLogs(ctx, header.Hash())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar unfiltered []*types.Log\n\tfor _, logs := range logsList {\n\t\tunfiltered = append(unfiltered, logs...)\n\t}\n\tlogs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)\n\tif len(logs) > 0 {\n\t\t// We have matching logs, check if we need to resolve full logs via the light client\n\t\tif logs[0].TxHash == (common.Hash{}) {\n\t\t\treceipts, err := f.backend.GetReceipts(ctx, header.Hash())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tunfiltered = unfiltered[:0]\n\t\t\tfor _, receipt := range receipts {\n\t\t\t\tunfiltered = append(unfiltered, receipt.Logs...)\n\t\t\t}\n\t\t\tlogs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics)\n\t\t}\n\t\treturn logs, nil\n\t}\n\treturn nil, nil\n}\n\nfunc includes(addresses []common.Address, a common.Address) bool {\n\tfor _, addr := range addresses {\n\t\tif addr == a {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\n// filterLogs creates a slice of logs matching the given criteria.\nfunc filterLogs(logs []*types.Log, fromBlock, toBlock *big.Int, addresses []common.Address, topics [][]common.Hash) []*types.Log {\n\tvar ret []*types.Log\nLogs:\n\tfor _, log := range logs {\n\t\tif fromBlock != nil && fromBlock.Int64() >= 0 && fromBlock.Uint64() > log.BlockNumber {\n\t\t\tcontinue\n\t\t}\n\t\tif toBlock != nil && toBlock.Int64() >= 0 && toBlock.Uint64() < log.BlockNumber {\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(addresses) > 0 && !includes(addresses, log.Address) {\n\t\t\tcontinue\n\t\t}\n\t\t// If the to filtered topics is greater than the amount of topics in logs, skip.\n\t\tif len(topics) > len(log.Topics) {\n\t\t\tcontinue Logs\n\t\t}\n\t\tfor i, sub := range topics {\n\t\t\tmatch := len(sub) == 0 // empty rule set == wildcard\n\t\t\tfor _, topic := range sub {\n\t\t\t\tif log.Topics[i] == topic {\n\t\t\t\t\tmatch = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !match {\n\t\t\t\tcontinue Logs\n\t\t\t}\n\t\t}\n\t\tret = append(ret, log)\n\t}\n\treturn ret\n}\n\nfunc bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]common.Hash) bool {\n\tif len(addresses) > 0 {\n\t\tvar included bool\n\t\tfor _, addr := range addresses {\n\t\t\tif types.BloomLookup(bloom, addr) {\n\t\t\t\tincluded = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !included {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tfor _, sub := range topics {\n\t\tincluded := len(sub) == 0 // empty rule set == wildcard\n\t\tfor _, topic := range sub {\n\t\t\tif types.BloomLookup(bloom, topic) {\n\t\t\t\tincluded = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !included {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "eth/filters/filter_system.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Package filters implements an ethereum filtering system for block,\n// transactions and log events.\npackage filters\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\n// Type determines the kind of filter and is used to put the filter in to\n// the correct bucket when added.\ntype Type byte\n\nconst (\n\t// UnknownSubscription indicates an unknown subscription type\n\tUnknownSubscription Type = iota\n\t// LogsSubscription queries for new or removed (chain reorg) logs\n\tLogsSubscription\n\t// PendingLogsSubscription queries for logs in pending blocks\n\tPendingLogsSubscription\n\t// MinedAndPendingLogsSubscription queries for logs in mined and pending blocks.\n\tMinedAndPendingLogsSubscription\n\t// PendingTransactionsSubscription queries tx hashes for pending\n\t// transactions entering the pending state\n\tPendingTransactionsSubscription\n\t// BlocksSubscription queries hashes for blocks that are imported\n\tBlocksSubscription\n\t// LastSubscription keeps track of the last index\n\tLastIndexSubscription\n)\n\nconst (\n\t// txChanSize is the size of channel listening to NewTxsEvent.\n\t// The number is referenced from the size of tx pool.\n\ttxChanSize = 4096\n\t// rmLogsChanSize is the size of channel listening to RemovedLogsEvent.\n\trmLogsChanSize = 10\n\t// logsChanSize is the size of channel listening to LogsEvent.\n\tlogsChanSize = 10\n\t// chainEvChanSize is the size of channel listening to ChainEvent.\n\tchainEvChanSize = 10\n)\n\ntype subscription struct {\n\tid        rpc.ID\n\ttyp       Type\n\tcreated   time.Time\n\tlogsCrit  ethereum.FilterQuery\n\tlogs      chan []*types.Log\n\thashes    chan []common.Hash\n\theaders   chan *types.Header\n\tinstalled chan struct{} // closed when the filter is installed\n\terr       chan error    // closed when the filter is uninstalled\n}\n\n// EventSystem creates subscriptions, processes events and broadcasts them to the\n// subscription which match the subscription criteria.\ntype EventSystem struct {\n\tbackend   Backend\n\tlightMode bool\n\tlastHead  *types.Header\n\n\t// Subscriptions\n\ttxsSub         event.Subscription // Subscription for new transaction event\n\tlogsSub        event.Subscription // Subscription for new log event\n\trmLogsSub      event.Subscription // Subscription for removed log event\n\tpendingLogsSub event.Subscription // Subscription for pending log event\n\tchainSub       event.Subscription // Subscription for new chain event\n\n\t// Channels\n\tinstall       chan *subscription         // install filter for event notification\n\tuninstall     chan *subscription         // remove filter for event notification\n\ttxsCh         chan core.NewTxsEvent      // Channel to receive new transactions event\n\tlogsCh        chan []*types.Log          // Channel to receive new log event\n\tpendingLogsCh chan []*types.Log          // Channel to receive new log event\n\trmLogsCh      chan core.RemovedLogsEvent // Channel to receive removed log event\n\tchainCh       chan core.ChainEvent       // Channel to receive new chain event\n}\n\n// NewEventSystem creates a new manager that listens for event on the given mux,\n// parses and filters them. It uses the all map to retrieve filter changes. The\n// work loop holds its own index that is used to forward events to filters.\n//\n// The returned manager has a loop that needs to be stopped with the Stop function\n// or by stopping the given mux.\nfunc NewEventSystem(backend Backend, lightMode bool) *EventSystem {\n\tm := &EventSystem{\n\t\tbackend:       backend,\n\t\tlightMode:     lightMode,\n\t\tinstall:       make(chan *subscription),\n\t\tuninstall:     make(chan *subscription),\n\t\ttxsCh:         make(chan core.NewTxsEvent, txChanSize),\n\t\tlogsCh:        make(chan []*types.Log, logsChanSize),\n\t\trmLogsCh:      make(chan core.RemovedLogsEvent, rmLogsChanSize),\n\t\tpendingLogsCh: make(chan []*types.Log, logsChanSize),\n\t\tchainCh:       make(chan core.ChainEvent, chainEvChanSize),\n\t}\n\n\t// Subscribe events\n\tm.txsSub = m.backend.SubscribeNewTxsEvent(m.txsCh)\n\tm.logsSub = m.backend.SubscribeLogsEvent(m.logsCh)\n\tm.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh)\n\tm.chainSub = m.backend.SubscribeChainEvent(m.chainCh)\n\tm.pendingLogsSub = m.backend.SubscribePendingLogsEvent(m.pendingLogsCh)\n\n\t// Make sure none of the subscriptions are empty\n\tif m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil || m.pendingLogsSub == nil {\n\t\tlog.Crit(\"Subscribe for event system failed\")\n\t}\n\n\tgo m.eventLoop()\n\treturn m\n}\n\n// Subscription is created when the client registers itself for a particular event.\ntype Subscription struct {\n\tID        rpc.ID\n\tf         *subscription\n\tes        *EventSystem\n\tunsubOnce sync.Once\n}\n\n// Err returns a channel that is closed when unsubscribed.\nfunc (sub *Subscription) Err() <-chan error {\n\treturn sub.f.err\n}\n\n// Unsubscribe uninstalls the subscription from the event broadcast loop.\nfunc (sub *Subscription) Unsubscribe() {\n\tsub.unsubOnce.Do(func() {\n\tuninstallLoop:\n\t\tfor {\n\t\t\t// write uninstall request and consume logs/hashes. This prevents\n\t\t\t// the eventLoop broadcast method to deadlock when writing to the\n\t\t\t// filter event channel while the subscription loop is waiting for\n\t\t\t// this method to return (and thus not reading these events).\n\t\t\tselect {\n\t\t\tcase sub.es.uninstall <- sub.f:\n\t\t\t\tbreak uninstallLoop\n\t\t\tcase <-sub.f.logs:\n\t\t\tcase <-sub.f.hashes:\n\t\t\tcase <-sub.f.headers:\n\t\t\t}\n\t\t}\n\n\t\t// wait for filter to be uninstalled in work loop before returning\n\t\t// this ensures that the manager won't use the event channel which\n\t\t// will probably be closed by the client asap after this method returns.\n\t\t<-sub.Err()\n\t})\n}\n\n// subscribe installs the subscription in the event broadcast loop.\nfunc (es *EventSystem) subscribe(sub *subscription) *Subscription {\n\tes.install <- sub\n\t<-sub.installed\n\treturn &Subscription{ID: sub.id, f: sub, es: es}\n}\n\n// SubscribeLogs creates a subscription that will write all logs matching the\n// given criteria to the given logs channel. Default value for the from and to\n// block is \"latest\". If the fromBlock > toBlock an error is returned.\nfunc (es *EventSystem) SubscribeLogs(crit ethereum.FilterQuery, logs chan []*types.Log) (*Subscription, error) {\n\tvar from, to rpc.BlockNumber\n\tif crit.FromBlock == nil {\n\t\tfrom = rpc.LatestBlockNumber\n\t} else {\n\t\tfrom = rpc.BlockNumber(crit.FromBlock.Int64())\n\t}\n\tif crit.ToBlock == nil {\n\t\tto = rpc.LatestBlockNumber\n\t} else {\n\t\tto = rpc.BlockNumber(crit.ToBlock.Int64())\n\t}\n\n\t// only interested in pending logs\n\tif from == rpc.PendingBlockNumber && to == rpc.PendingBlockNumber {\n\t\treturn es.subscribePendingLogs(crit, logs), nil\n\t}\n\t// only interested in new mined logs\n\tif from == rpc.LatestBlockNumber && to == rpc.LatestBlockNumber {\n\t\treturn es.subscribeLogs(crit, logs), nil\n\t}\n\t// only interested in mined logs within a specific block range\n\tif from >= 0 && to >= 0 && to >= from {\n\t\treturn es.subscribeLogs(crit, logs), nil\n\t}\n\t// interested in mined logs from a specific block number, new logs and pending logs\n\tif from >= rpc.LatestBlockNumber && to == rpc.PendingBlockNumber {\n\t\treturn es.subscribeMinedPendingLogs(crit, logs), nil\n\t}\n\t// interested in logs from a specific block number to new mined blocks\n\tif from >= 0 && to == rpc.LatestBlockNumber {\n\t\treturn es.subscribeLogs(crit, logs), nil\n\t}\n\treturn nil, fmt.Errorf(\"invalid from and to block combination: from > to\")\n}\n\n// subscribeMinedPendingLogs creates a subscription that returned mined and\n// pending logs that match the given criteria.\nfunc (es *EventSystem) subscribeMinedPendingLogs(crit ethereum.FilterQuery, logs chan []*types.Log) *Subscription {\n\tsub := &subscription{\n\t\tid:        rpc.NewID(),\n\t\ttyp:       MinedAndPendingLogsSubscription,\n\t\tlogsCrit:  crit,\n\t\tcreated:   time.Now(),\n\t\tlogs:      logs,\n\t\thashes:    make(chan []common.Hash),\n\t\theaders:   make(chan *types.Header),\n\t\tinstalled: make(chan struct{}),\n\t\terr:       make(chan error),\n\t}\n\treturn es.subscribe(sub)\n}\n\n// subscribeLogs creates a subscription that will write all logs matching the\n// given criteria to the given logs channel.\nfunc (es *EventSystem) subscribeLogs(crit ethereum.FilterQuery, logs chan []*types.Log) *Subscription {\n\tsub := &subscription{\n\t\tid:        rpc.NewID(),\n\t\ttyp:       LogsSubscription,\n\t\tlogsCrit:  crit,\n\t\tcreated:   time.Now(),\n\t\tlogs:      logs,\n\t\thashes:    make(chan []common.Hash),\n\t\theaders:   make(chan *types.Header),\n\t\tinstalled: make(chan struct{}),\n\t\terr:       make(chan error),\n\t}\n\treturn es.subscribe(sub)\n}\n\n// subscribePendingLogs creates a subscription that writes transaction hashes for\n// transactions that enter the transaction pool.\nfunc (es *EventSystem) subscribePendingLogs(crit ethereum.FilterQuery, logs chan []*types.Log) *Subscription {\n\tsub := &subscription{\n\t\tid:        rpc.NewID(),\n\t\ttyp:       PendingLogsSubscription,\n\t\tlogsCrit:  crit,\n\t\tcreated:   time.Now(),\n\t\tlogs:      logs,\n\t\thashes:    make(chan []common.Hash),\n\t\theaders:   make(chan *types.Header),\n\t\tinstalled: make(chan struct{}),\n\t\terr:       make(chan error),\n\t}\n\treturn es.subscribe(sub)\n}\n\n// SubscribeNewHeads creates a subscription that writes the header of a block that is\n// imported in the chain.\nfunc (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscription {\n\tsub := &subscription{\n\t\tid:        rpc.NewID(),\n\t\ttyp:       BlocksSubscription,\n\t\tcreated:   time.Now(),\n\t\tlogs:      make(chan []*types.Log),\n\t\thashes:    make(chan []common.Hash),\n\t\theaders:   headers,\n\t\tinstalled: make(chan struct{}),\n\t\terr:       make(chan error),\n\t}\n\treturn es.subscribe(sub)\n}\n\n// SubscribePendingTxs creates a subscription that writes transaction hashes for\n// transactions that enter the transaction pool.\nfunc (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscription {\n\tsub := &subscription{\n\t\tid:        rpc.NewID(),\n\t\ttyp:       PendingTransactionsSubscription,\n\t\tcreated:   time.Now(),\n\t\tlogs:      make(chan []*types.Log),\n\t\thashes:    hashes,\n\t\theaders:   make(chan *types.Header),\n\t\tinstalled: make(chan struct{}),\n\t\terr:       make(chan error),\n\t}\n\treturn es.subscribe(sub)\n}\n\ntype filterIndex map[Type]map[rpc.ID]*subscription\n\nfunc (es *EventSystem) handleLogs(filters filterIndex, ev []*types.Log) {\n\tif len(ev) == 0 {\n\t\treturn\n\t}\n\tfor _, f := range filters[LogsSubscription] {\n\t\tmatchedLogs := filterLogs(ev, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)\n\t\tif len(matchedLogs) > 0 {\n\t\t\tf.logs <- matchedLogs\n\t\t}\n\t}\n}\n\nfunc (es *EventSystem) handlePendingLogs(filters filterIndex, ev []*types.Log) {\n\tif len(ev) == 0 {\n\t\treturn\n\t}\n\tfor _, f := range filters[PendingLogsSubscription] {\n\t\tmatchedLogs := filterLogs(ev, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)\n\t\tif len(matchedLogs) > 0 {\n\t\t\tf.logs <- matchedLogs\n\t\t}\n\t}\n}\n\nfunc (es *EventSystem) handleRemovedLogs(filters filterIndex, ev core.RemovedLogsEvent) {\n\tfor _, f := range filters[LogsSubscription] {\n\t\tmatchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)\n\t\tif len(matchedLogs) > 0 {\n\t\t\tf.logs <- matchedLogs\n\t\t}\n\t}\n}\n\nfunc (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent) {\n\thashes := make([]common.Hash, 0, len(ev.Txs))\n\tfor _, tx := range ev.Txs {\n\t\thashes = append(hashes, tx.Hash())\n\t}\n\tfor _, f := range filters[PendingTransactionsSubscription] {\n\t\tf.hashes <- hashes\n\t}\n}\n\nfunc (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent) {\n\tfor _, f := range filters[BlocksSubscription] {\n\t\tf.headers <- ev.Block.Header()\n\t}\n\tif es.lightMode && len(filters[LogsSubscription]) > 0 {\n\t\tes.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) {\n\t\t\tfor _, f := range filters[LogsSubscription] {\n\t\t\t\tif matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 {\n\t\t\t\t\tf.logs <- matchedLogs\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc (es *EventSystem) lightFilterNewHead(newHeader *types.Header, callBack func(*types.Header, bool)) {\n\toldh := es.lastHead\n\tes.lastHead = newHeader\n\tif oldh == nil {\n\t\treturn\n\t}\n\tnewh := newHeader\n\t// find common ancestor, create list of rolled back and new block hashes\n\tvar oldHeaders, newHeaders []*types.Header\n\tfor oldh.Hash() != newh.Hash() {\n\t\tif oldh.Number.Uint64() >= newh.Number.Uint64() {\n\t\t\toldHeaders = append(oldHeaders, oldh)\n\t\t\toldh = rawdb.ReadHeader(es.backend.ChainDb(), oldh.ParentHash, oldh.Number.Uint64()-1)\n\t\t}\n\t\tif oldh.Number.Uint64() < newh.Number.Uint64() {\n\t\t\tnewHeaders = append(newHeaders, newh)\n\t\t\tnewh = rawdb.ReadHeader(es.backend.ChainDb(), newh.ParentHash, newh.Number.Uint64()-1)\n\t\t\tif newh == nil {\n\t\t\t\t// happens when CHT syncing, nothing to do\n\t\t\t\tnewh = oldh\n\t\t\t}\n\t\t}\n\t}\n\t// roll back old blocks\n\tfor _, h := range oldHeaders {\n\t\tcallBack(h, true)\n\t}\n\t// check new blocks (array is in reverse order)\n\tfor i := len(newHeaders) - 1; i >= 0; i-- {\n\t\tcallBack(newHeaders[i], false)\n\t}\n}\n\n// filter logs of a single header in light client mode\nfunc (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.Address, topics [][]common.Hash, remove bool) []*types.Log {\n\tif bloomFilter(header.Bloom, addresses, topics) {\n\t\t// Get the logs of the block\n\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second*5)\n\t\tdefer cancel()\n\t\tlogsList, err := es.backend.GetLogs(ctx, header.Hash())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tvar unfiltered []*types.Log\n\t\tfor _, logs := range logsList {\n\t\t\tfor _, log := range logs {\n\t\t\t\tlogcopy := *log\n\t\t\t\tlogcopy.Removed = remove\n\t\t\t\tunfiltered = append(unfiltered, &logcopy)\n\t\t\t}\n\t\t}\n\t\tlogs := filterLogs(unfiltered, nil, nil, addresses, topics)\n\t\tif len(logs) > 0 && logs[0].TxHash == (common.Hash{}) {\n\t\t\t// We have matching but non-derived logs\n\t\t\treceipts, err := es.backend.GetReceipts(ctx, header.Hash())\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tunfiltered = unfiltered[:0]\n\t\t\tfor _, receipt := range receipts {\n\t\t\t\tfor _, log := range receipt.Logs {\n\t\t\t\t\tlogcopy := *log\n\t\t\t\t\tlogcopy.Removed = remove\n\t\t\t\t\tunfiltered = append(unfiltered, &logcopy)\n\t\t\t\t}\n\t\t\t}\n\t\t\tlogs = filterLogs(unfiltered, nil, nil, addresses, topics)\n\t\t}\n\t\treturn logs\n\t}\n\treturn nil\n}\n\n// eventLoop (un)installs filters and processes mux events.\nfunc (es *EventSystem) eventLoop() {\n\t// Ensure all subscriptions get cleaned up\n\tdefer func() {\n\t\tes.txsSub.Unsubscribe()\n\t\tes.logsSub.Unsubscribe()\n\t\tes.rmLogsSub.Unsubscribe()\n\t\tes.pendingLogsSub.Unsubscribe()\n\t\tes.chainSub.Unsubscribe()\n\t}()\n\n\tindex := make(filterIndex)\n\tfor i := UnknownSubscription; i < LastIndexSubscription; i++ {\n\t\tindex[i] = make(map[rpc.ID]*subscription)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase ev := <-es.txsCh:\n\t\t\tes.handleTxsEvent(index, ev)\n\t\tcase ev := <-es.logsCh:\n\t\t\tes.handleLogs(index, ev)\n\t\tcase ev := <-es.rmLogsCh:\n\t\t\tes.handleRemovedLogs(index, ev)\n\t\tcase ev := <-es.pendingLogsCh:\n\t\t\tes.handlePendingLogs(index, ev)\n\t\tcase ev := <-es.chainCh:\n\t\t\tes.handleChainEvent(index, ev)\n\n\t\tcase f := <-es.install:\n\t\t\tif f.typ == MinedAndPendingLogsSubscription {\n\t\t\t\t// the type are logs and pending logs subscriptions\n\t\t\t\tindex[LogsSubscription][f.id] = f\n\t\t\t\tindex[PendingLogsSubscription][f.id] = f\n\t\t\t} else {\n\t\t\t\tindex[f.typ][f.id] = f\n\t\t\t}\n\t\t\tclose(f.installed)\n\n\t\tcase f := <-es.uninstall:\n\t\t\tif f.typ == MinedAndPendingLogsSubscription {\n\t\t\t\t// the type are logs and pending logs subscriptions\n\t\t\t\tdelete(index[LogsSubscription], f.id)\n\t\t\t\tdelete(index[PendingLogsSubscription], f.id)\n\t\t\t} else {\n\t\t\t\tdelete(index[f.typ], f.id)\n\t\t\t}\n\t\t\tclose(f.err)\n\n\t\t// System stopped\n\t\tcase <-es.txsSub.Err():\n\t\t\treturn\n\t\tcase <-es.logsSub.Err():\n\t\t\treturn\n\t\tcase <-es.rmLogsSub.Err():\n\t\t\treturn\n\t\tcase <-es.chainSub.Err():\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "eth/filters/filter_system_test.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage filters\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/bloombits\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\nvar (\n\tdeadline = 5 * time.Minute\n)\n\ntype testBackend struct {\n\tmux             *event.TypeMux\n\tdb              ethdb.Database\n\tsections        uint64\n\ttxFeed          event.Feed\n\tlogsFeed        event.Feed\n\trmLogsFeed      event.Feed\n\tpendingLogsFeed event.Feed\n\tchainFeed       event.Feed\n}\n\nfunc (b *testBackend) ChainDb() ethdb.Database {\n\treturn b.db\n}\n\nfunc (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {\n\tvar (\n\t\thash common.Hash\n\t\tnum  uint64\n\t)\n\tif blockNr == rpc.LatestBlockNumber {\n\t\thash = rawdb.ReadHeadBlockHash(b.db)\n\t\tnumber := rawdb.ReadHeaderNumber(b.db, hash)\n\t\tif number == nil {\n\t\t\treturn nil, nil\n\t\t}\n\t\tnum = *number\n\t} else {\n\t\tnum = uint64(blockNr)\n\t\thash = rawdb.ReadCanonicalHash(b.db, num)\n\t}\n\treturn rawdb.ReadHeader(b.db, hash, num), nil\n}\n\nfunc (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {\n\tnumber := rawdb.ReadHeaderNumber(b.db, hash)\n\tif number == nil {\n\t\treturn nil, nil\n\t}\n\treturn rawdb.ReadHeader(b.db, hash, *number), nil\n}\n\nfunc (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {\n\tif number := rawdb.ReadHeaderNumber(b.db, hash); number != nil {\n\t\treturn rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig), nil\n\t}\n\treturn nil, nil\n}\n\nfunc (b *testBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {\n\tnumber := rawdb.ReadHeaderNumber(b.db, hash)\n\tif number == nil {\n\t\treturn nil, nil\n\t}\n\treceipts := rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig)\n\n\tlogs := make([][]*types.Log, len(receipts))\n\tfor i, receipt := range receipts {\n\t\tlogs[i] = receipt.Logs\n\t}\n\treturn logs, nil\n}\n\nfunc (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {\n\treturn b.txFeed.Subscribe(ch)\n}\n\nfunc (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {\n\treturn b.rmLogsFeed.Subscribe(ch)\n}\n\nfunc (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {\n\treturn b.logsFeed.Subscribe(ch)\n}\n\nfunc (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {\n\treturn b.pendingLogsFeed.Subscribe(ch)\n}\n\nfunc (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {\n\treturn b.chainFeed.Subscribe(ch)\n}\n\nfunc (b *testBackend) BloomStatus() (uint64, uint64) {\n\treturn params.BloomBitsBlocks, b.sections\n}\n\nfunc (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {\n\trequests := make(chan chan *bloombits.Retrieval)\n\n\tgo session.Multiplex(16, 0, requests)\n\tgo func() {\n\t\tfor {\n\t\t\t// Wait for a service request or a shutdown\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\n\t\t\tcase request := <-requests:\n\t\t\t\ttask := <-request\n\n\t\t\t\ttask.Bitsets = make([][]byte, len(task.Sections))\n\t\t\t\tfor i, section := range task.Sections {\n\t\t\t\t\tif rand.Int()%4 != 0 { // Handle occasional missing deliveries\n\t\t\t\t\t\thead := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1)\n\t\t\t\t\t\ttask.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\trequest <- task\n\t\t\t}\n\t\t}\n\t}()\n}\n\n// TestBlockSubscription tests if a block subscription returns block hashes for posted chain events.\n// It creates multiple subscriptions:\n// - one at the start and should receive all posted chain events and a second (blockHashes)\n// - one that is created after a cutoff moment and uninstalled after a second cutoff moment (blockHashes[cutoff1:cutoff2])\n// - one that is created after the second cutoff moment (blockHashes[cutoff2:])\nfunc TestBlockSubscription(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tdb          = rawdb.NewMemoryDatabase()\n\t\tbackend     = &testBackend{db: db}\n\t\tapi         = NewPublicFilterAPI(backend, false, deadline)\n\t\tgenesis     = new(core.Genesis).MustCommit(db)\n\t\tchain, _    = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {})\n\t\tchainEvents = []core.ChainEvent{}\n\t)\n\n\tfor _, blk := range chain {\n\t\tchainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk})\n\t}\n\n\tchan0 := make(chan *types.Header)\n\tsub0 := api.events.SubscribeNewHeads(chan0)\n\tchan1 := make(chan *types.Header)\n\tsub1 := api.events.SubscribeNewHeads(chan1)\n\n\tgo func() { // simulate client\n\t\ti1, i2 := 0, 0\n\t\tfor i1 != len(chainEvents) || i2 != len(chainEvents) {\n\t\t\tselect {\n\t\t\tcase header := <-chan0:\n\t\t\t\tif chainEvents[i1].Hash != header.Hash() {\n\t\t\t\t\tt.Errorf(\"sub0 received invalid hash on index %d, want %x, got %x\", i1, chainEvents[i1].Hash, header.Hash())\n\t\t\t\t}\n\t\t\t\ti1++\n\t\t\tcase header := <-chan1:\n\t\t\t\tif chainEvents[i2].Hash != header.Hash() {\n\t\t\t\t\tt.Errorf(\"sub1 received invalid hash on index %d, want %x, got %x\", i2, chainEvents[i2].Hash, header.Hash())\n\t\t\t\t}\n\t\t\t\ti2++\n\t\t\t}\n\t\t}\n\n\t\tsub0.Unsubscribe()\n\t\tsub1.Unsubscribe()\n\t}()\n\n\ttime.Sleep(1 * time.Second)\n\tfor _, e := range chainEvents {\n\t\tbackend.chainFeed.Send(e)\n\t}\n\n\t<-sub0.Err()\n\t<-sub1.Err()\n}\n\n// TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux.\nfunc TestPendingTxFilter(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tdb      = rawdb.NewMemoryDatabase()\n\t\tbackend = &testBackend{db: db}\n\t\tapi     = NewPublicFilterAPI(backend, false, deadline)\n\n\t\ttransactions = []*types.Transaction{\n\t\t\ttypes.NewTransaction(0, common.HexToAddress(\"0xb794f5ea0ba39494ce83a213fffba74279579268\"), new(big.Int), 0, new(big.Int), nil),\n\t\t\ttypes.NewTransaction(1, common.HexToAddress(\"0xb794f5ea0ba39494ce83a213fffba74279579268\"), new(big.Int), 0, new(big.Int), nil),\n\t\t\ttypes.NewTransaction(2, common.HexToAddress(\"0xb794f5ea0ba39494ce83a213fffba74279579268\"), new(big.Int), 0, new(big.Int), nil),\n\t\t\ttypes.NewTransaction(3, common.HexToAddress(\"0xb794f5ea0ba39494ce83a213fffba74279579268\"), new(big.Int), 0, new(big.Int), nil),\n\t\t\ttypes.NewTransaction(4, common.HexToAddress(\"0xb794f5ea0ba39494ce83a213fffba74279579268\"), new(big.Int), 0, new(big.Int), nil),\n\t\t}\n\n\t\thashes []common.Hash\n\t)\n\n\tfid0 := api.NewPendingTransactionFilter()\n\n\ttime.Sleep(1 * time.Second)\n\tbackend.txFeed.Send(core.NewTxsEvent{Txs: transactions})\n\n\ttimeout := time.Now().Add(1 * time.Second)\n\tfor {\n\t\tresults, err := api.GetFilterChanges(fid0)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unable to retrieve logs: %v\", err)\n\t\t}\n\n\t\th := results.([]common.Hash)\n\t\thashes = append(hashes, h...)\n\t\tif len(hashes) >= len(transactions) {\n\t\t\tbreak\n\t\t}\n\t\t// check timeout\n\t\tif time.Now().After(timeout) {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\n\tif len(hashes) != len(transactions) {\n\t\tt.Errorf(\"invalid number of transactions, want %d transactions(s), got %d\", len(transactions), len(hashes))\n\t\treturn\n\t}\n\tfor i := range hashes {\n\t\tif hashes[i] != transactions[i].Hash() {\n\t\t\tt.Errorf(\"hashes[%d] invalid, want %x, got %x\", i, transactions[i].Hash(), hashes[i])\n\t\t}\n\t}\n}\n\n// TestLogFilterCreation test whether a given filter criteria makes sense.\n// If not it must return an error.\nfunc TestLogFilterCreation(t *testing.T) {\n\tvar (\n\t\tdb      = rawdb.NewMemoryDatabase()\n\t\tbackend = &testBackend{db: db}\n\t\tapi     = NewPublicFilterAPI(backend, false, deadline)\n\n\t\ttestCases = []struct {\n\t\t\tcrit    FilterCriteria\n\t\t\tsuccess bool\n\t\t}{\n\t\t\t// defaults\n\t\t\t{FilterCriteria{}, true},\n\t\t\t// valid block number range\n\t\t\t{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true},\n\t\t\t// \"mined\" block range to pending\n\t\t\t{FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true},\n\t\t\t// new mined and pending blocks\n\t\t\t{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true},\n\t\t\t// from block \"higher\" than to block\n\t\t\t{FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false},\n\t\t\t// from block \"higher\" than to block\n\t\t\t{FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},\n\t\t\t// from block \"higher\" than to block\n\t\t\t{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false},\n\t\t\t// from block \"higher\" than to block\n\t\t\t{FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false},\n\t\t}\n\t)\n\n\tfor i, test := range testCases {\n\t\t_, err := api.NewFilter(test.crit)\n\t\tif test.success && err != nil {\n\t\t\tt.Errorf(\"expected filter creation for case %d to success, got %v\", i, err)\n\t\t}\n\t\tif !test.success && err == nil {\n\t\t\tt.Errorf(\"expected testcase %d to fail with an error\", i)\n\t\t}\n\t}\n}\n\n// TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error\n// when the filter is created.\nfunc TestInvalidLogFilterCreation(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tdb      = rawdb.NewMemoryDatabase()\n\t\tbackend = &testBackend{db: db}\n\t\tapi     = NewPublicFilterAPI(backend, false, deadline)\n\t)\n\n\t// different situations where log filter creation should fail.\n\t// Reason: fromBlock > toBlock\n\ttestCases := []FilterCriteria{\n\t\t0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},\n\t\t1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)},\n\t\t2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)},\n\t}\n\n\tfor i, test := range testCases {\n\t\tif _, err := api.NewFilter(test); err == nil {\n\t\t\tt.Errorf(\"Expected NewFilter for case #%d to fail\", i)\n\t\t}\n\t}\n}\n\nfunc TestInvalidGetLogsRequest(t *testing.T) {\n\tvar (\n\t\tdb        = rawdb.NewMemoryDatabase()\n\t\tbackend   = &testBackend{db: db}\n\t\tapi       = NewPublicFilterAPI(backend, false, deadline)\n\t\tblockHash = common.HexToHash(\"0x1111111111111111111111111111111111111111111111111111111111111111\")\n\t)\n\n\t// Reason: Cannot specify both BlockHash and FromBlock/ToBlock)\n\ttestCases := []FilterCriteria{\n\t\t0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)},\n\t\t1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)},\n\t\t2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())},\n\t}\n\n\tfor i, test := range testCases {\n\t\tif _, err := api.GetLogs(context.Background(), test); err == nil {\n\t\t\tt.Errorf(\"Expected Logs for case #%d to fail\", i)\n\t\t}\n\t}\n}\n\n// TestLogFilter tests whether log filters match the correct logs that are posted to the event feed.\nfunc TestLogFilter(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tdb      = rawdb.NewMemoryDatabase()\n\t\tbackend = &testBackend{db: db}\n\t\tapi     = NewPublicFilterAPI(backend, false, deadline)\n\n\t\tfirstAddr      = common.HexToAddress(\"0x1111111111111111111111111111111111111111\")\n\t\tsecondAddr     = common.HexToAddress(\"0x2222222222222222222222222222222222222222\")\n\t\tthirdAddress   = common.HexToAddress(\"0x3333333333333333333333333333333333333333\")\n\t\tnotUsedAddress = common.HexToAddress(\"0x9999999999999999999999999999999999999999\")\n\t\tfirstTopic     = common.HexToHash(\"0x1111111111111111111111111111111111111111111111111111111111111111\")\n\t\tsecondTopic    = common.HexToHash(\"0x2222222222222222222222222222222222222222222222222222222222222222\")\n\t\tnotUsedTopic   = common.HexToHash(\"0x9999999999999999999999999999999999999999999999999999999999999999\")\n\n\t\t// posted twice, once as regular logs and once as pending logs.\n\t\tallLogs = []*types.Log{\n\t\t\t{Address: firstAddr},\n\t\t\t{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},\n\t\t\t{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},\n\t\t\t{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2},\n\t\t\t{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3},\n\t\t}\n\n\t\texpectedCase7  = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]}\n\t\texpectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]}\n\n\t\ttestCases = []struct {\n\t\t\tcrit     FilterCriteria\n\t\t\texpected []*types.Log\n\t\t\tid       rpc.ID\n\t\t}{\n\t\t\t// match all\n\t\t\t0: {FilterCriteria{}, allLogs, \"\"},\n\t\t\t// match none due to no matching addresses\n\t\t\t1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, \"\"},\n\t\t\t// match logs based on addresses, ignore topics\n\t\t\t2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], \"\"},\n\t\t\t// match none due to no matching topics (match with address)\n\t\t\t3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, \"\"},\n\t\t\t// match logs based on addresses and topics\n\t\t\t4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], \"\"},\n\t\t\t// match logs based on multiple addresses and \"or\" topics\n\t\t\t5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], \"\"},\n\t\t\t// logs in the pending block\n\t\t\t6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], \"\"},\n\t\t\t// mined logs with block num >= 2 or pending logs\n\t\t\t7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, \"\"},\n\t\t\t// all \"mined\" logs with block num >= 2\n\t\t\t8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], \"\"},\n\t\t\t// all \"mined\" logs\n\t\t\t9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, \"\"},\n\t\t\t// all \"mined\" logs with 1>= block num <=2 and topic secondTopic\n\t\t\t10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], \"\"},\n\t\t\t// all \"mined\" and pending logs with topic firstTopic\n\t\t\t11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, \"\"},\n\t\t\t// match all logs due to wildcard topic\n\t\t\t12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], \"\"},\n\t\t}\n\t)\n\n\t// create all filters\n\tfor i := range testCases {\n\t\ttestCases[i].id, _ = api.NewFilter(testCases[i].crit)\n\t}\n\n\t// raise events\n\ttime.Sleep(1 * time.Second)\n\tif nsend := backend.logsFeed.Send(allLogs); nsend == 0 {\n\t\tt.Fatal(\"Logs event not delivered\")\n\t}\n\tif nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 {\n\t\tt.Fatal(\"Pending logs event not delivered\")\n\t}\n\n\tfor i, tt := range testCases {\n\t\tvar fetched []*types.Log\n\t\ttimeout := time.Now().Add(1 * time.Second)\n\t\tfor { // fetch all expected logs\n\t\t\tresults, err := api.GetFilterChanges(tt.id)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Unable to fetch logs: %v\", err)\n\t\t\t}\n\n\t\t\tfetched = append(fetched, results.([]*types.Log)...)\n\t\t\tif len(fetched) >= len(tt.expected) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// check timeout\n\t\t\tif time.Now().After(timeout) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t}\n\n\t\tif len(fetched) != len(tt.expected) {\n\t\t\tt.Errorf(\"invalid number of logs for case %d, want %d log(s), got %d\", i, len(tt.expected), len(fetched))\n\t\t\treturn\n\t\t}\n\n\t\tfor l := range fetched {\n\t\t\tif fetched[l].Removed {\n\t\t\t\tt.Errorf(\"expected log not to be removed for log %d in case %d\", l, i)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(fetched[l], tt.expected[l]) {\n\t\t\t\tt.Errorf(\"invalid log on index %d for case %d\", l, i)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed.\nfunc TestPendingLogsSubscription(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tdb      = rawdb.NewMemoryDatabase()\n\t\tbackend = &testBackend{db: db}\n\t\tapi     = NewPublicFilterAPI(backend, false, deadline)\n\n\t\tfirstAddr      = common.HexToAddress(\"0x1111111111111111111111111111111111111111\")\n\t\tsecondAddr     = common.HexToAddress(\"0x2222222222222222222222222222222222222222\")\n\t\tthirdAddress   = common.HexToAddress(\"0x3333333333333333333333333333333333333333\")\n\t\tnotUsedAddress = common.HexToAddress(\"0x9999999999999999999999999999999999999999\")\n\t\tfirstTopic     = common.HexToHash(\"0x1111111111111111111111111111111111111111111111111111111111111111\")\n\t\tsecondTopic    = common.HexToHash(\"0x2222222222222222222222222222222222222222222222222222222222222222\")\n\t\tthirdTopic     = common.HexToHash(\"0x3333333333333333333333333333333333333333333333333333333333333333\")\n\t\tfourthTopic    = common.HexToHash(\"0x4444444444444444444444444444444444444444444444444444444444444444\")\n\t\tnotUsedTopic   = common.HexToHash(\"0x9999999999999999999999999999999999999999999999999999999999999999\")\n\n\t\tallLogs = [][]*types.Log{\n\t\t\t{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}},\n\t\t\t{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}},\n\t\t\t{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}},\n\t\t\t{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}},\n\t\t\t{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}},\n\t\t\t{\n\t\t\t\t{Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},\n\t\t\t\t{Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},\n\t\t\t\t{Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5},\n\t\t\t\t{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},\n\t\t\t},\n\t\t}\n\n\t\ttestCases = []struct {\n\t\t\tcrit     ethereum.FilterQuery\n\t\t\texpected []*types.Log\n\t\t\tc        chan []*types.Log\n\t\t\tsub      *Subscription\n\t\t}{\n\t\t\t// match all\n\t\t\t{\n\t\t\t\tethereum.FilterQuery{}, flattenLogs(allLogs),\n\t\t\t\tnil, nil,\n\t\t\t},\n\t\t\t// match none due to no matching addresses\n\t\t\t{\n\t\t\t\tethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}},\n\t\t\t\tnil,\n\t\t\t\tnil, nil,\n\t\t\t},\n\t\t\t// match logs based on addresses, ignore topics\n\t\t\t{\n\t\t\t\tethereum.FilterQuery{Addresses: []common.Address{firstAddr}},\n\t\t\t\tappend(flattenLogs(allLogs[:2]), allLogs[5][3]),\n\t\t\t\tnil, nil,\n\t\t\t},\n\t\t\t// match none due to no matching topics (match with address)\n\t\t\t{\n\t\t\t\tethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}},\n\t\t\t\tnil, nil, nil,\n\t\t\t},\n\t\t\t// match logs based on addresses and topics\n\t\t\t{\n\t\t\t\tethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}},\n\t\t\t\tappend(flattenLogs(allLogs[3:5]), allLogs[5][0]),\n\t\t\t\tnil, nil,\n\t\t\t},\n\t\t\t// match logs based on multiple addresses and \"or\" topics\n\t\t\t{\n\t\t\t\tethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}},\n\t\t\t\tappend(flattenLogs(allLogs[2:5]), allLogs[5][0]),\n\t\t\t\tnil,\n\t\t\t\tnil,\n\t\t\t},\n\t\t\t// block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes\n\t\t\t{\n\t\t\t\tethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)},\n\t\t\t\tappend(flattenLogs(allLogs[:2]), allLogs[5][3]),\n\t\t\t\tnil, nil,\n\t\t\t},\n\t\t\t// multiple pending logs, should match only 2 topics from the logs in block 5\n\t\t\t{\n\t\t\t\tethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}},\n\t\t\t\t[]*types.Log{allLogs[5][0], allLogs[5][2]},\n\t\t\t\tnil, nil,\n\t\t\t},\n\t\t}\n\t)\n\n\t// create all subscriptions, this ensures all subscriptions are created before the events are posted.\n\t// on slow machines this could otherwise lead to missing events when the subscription is created after\n\t// (some) events are posted.\n\tfor i := range testCases {\n\t\ttestCases[i].c = make(chan []*types.Log)\n\t\ttestCases[i].sub, _ = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c)\n\t}\n\n\tfor n, test := range testCases {\n\t\ti := n\n\t\ttt := test\n\t\tgo func() {\n\t\t\tvar fetched []*types.Log\n\t\tfetchLoop:\n\t\t\tfor {\n\t\t\t\tlogs := <-tt.c\n\t\t\t\tfetched = append(fetched, logs...)\n\t\t\t\tif len(fetched) >= len(tt.expected) {\n\t\t\t\t\tbreak fetchLoop\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(fetched) != len(tt.expected) {\n\t\t\t\tpanic(fmt.Sprintf(\"invalid number of logs for case %d, want %d log(s), got %d\", i, len(tt.expected), len(fetched)))\n\t\t\t}\n\n\t\t\tfor l := range fetched {\n\t\t\t\tif fetched[l].Removed {\n\t\t\t\t\tpanic(fmt.Sprintf(\"expected log not to be removed for log %d in case %d\", l, i))\n\t\t\t\t}\n\t\t\t\tif !reflect.DeepEqual(fetched[l], tt.expected[l]) {\n\t\t\t\t\tpanic(fmt.Sprintf(\"invalid log on index %d for case %d\", l, i))\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\t// raise events\n\ttime.Sleep(1 * time.Second)\n\tfor _, ev := range allLogs {\n\t\tbackend.pendingLogsFeed.Send(ev)\n\t}\n}\n\n// TestPendingTxFilterDeadlock tests if the event loop hangs when pending\n// txes arrive at the same time that one of multiple filters is timing out.\n// Please refer to #22131 for more details.\nfunc TestPendingTxFilterDeadlock(t *testing.T) {\n\tt.Parallel()\n\ttimeout := 100 * time.Millisecond\n\n\tvar (\n\t\tdb      = rawdb.NewMemoryDatabase()\n\t\tbackend = &testBackend{db: db}\n\t\tapi     = NewPublicFilterAPI(backend, false, timeout)\n\t\tdone    = make(chan struct{})\n\t)\n\n\tgo func() {\n\t\t// Bombard feed with txes until signal was received to stop\n\t\ti := uint64(0)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-done:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\n\t\t\ttx := types.NewTransaction(i, common.HexToAddress(\"0xb794f5ea0ba39494ce83a213fffba74279579268\"), new(big.Int), 0, new(big.Int), nil)\n\t\t\tbackend.txFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx}})\n\t\t\ti++\n\t\t}\n\t}()\n\n\t// Create a bunch of filters that will\n\t// timeout either in 100ms or 200ms\n\tfids := make([]rpc.ID, 20)\n\tfor i := 0; i < len(fids); i++ {\n\t\tfid := api.NewPendingTransactionFilter()\n\t\tfids[i] = fid\n\t\t// Wait for at least one tx to arrive in filter\n\t\tfor {\n\t\t\thashes, err := api.GetFilterChanges(fid)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Filter should exist: %v\\n\", err)\n\t\t\t}\n\t\t\tif len(hashes.([]common.Hash)) > 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\truntime.Gosched()\n\t\t}\n\t}\n\n\t// Wait until filters have timed out\n\ttime.Sleep(3 * timeout)\n\n\t// If tx loop doesn't consume `done` after a second\n\t// it's hanging.\n\tselect {\n\tcase done <- struct{}{}:\n\t\t// Check that all filters have been uninstalled\n\t\tfor _, fid := range fids {\n\t\t\tif _, err := api.GetFilterChanges(fid); err == nil {\n\t\t\t\tt.Errorf(\"Filter %s should have been uninstalled\\n\", fid)\n\t\t\t}\n\t\t}\n\tcase <-time.After(1 * time.Second):\n\t\tt.Error(\"Tx sending loop hangs\")\n\t}\n}\n\nfunc flattenLogs(pl [][]*types.Log) []*types.Log {\n\tvar logs []*types.Log\n\tfor _, l := range pl {\n\t\tlogs = append(logs, l...)\n\t}\n\treturn logs\n}\n"
  },
  {
    "path": "eth/filters/filter_test.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage filters\n\nimport (\n\t\"context\"\n\t\"io/ioutil\"\n\t\"math/big\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/params\"\n)\n\nfunc makeReceipt(addr common.Address) *types.Receipt {\n\treceipt := types.NewReceipt(nil, false, 0)\n\treceipt.Logs = []*types.Log{\n\t\t{Address: addr},\n\t}\n\treceipt.Bloom = types.CreateBloom(types.Receipts{receipt})\n\treturn receipt\n}\n\nfunc BenchmarkFilters(b *testing.B) {\n\tdir, err := ioutil.TempDir(\"\", \"filtertest\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tvar (\n\t\tdb, _   = rawdb.NewLevelDBDatabase(dir, 0, 0, \"\", false)\n\t\tbackend = &testBackend{db: db}\n\t\tkey1, _ = crypto.HexToECDSA(\"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291\")\n\t\taddr1   = crypto.PubkeyToAddress(key1.PublicKey)\n\t\taddr2   = common.BytesToAddress([]byte(\"jeff\"))\n\t\taddr3   = common.BytesToAddress([]byte(\"ethereum\"))\n\t\taddr4   = common.BytesToAddress([]byte(\"random addresses please\"))\n\t)\n\tdefer db.Close()\n\n\tgenesis := core.GenesisBlockForTesting(db, addr1, big.NewInt(1000000))\n\tchain, receipts := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 100010, func(i int, gen *core.BlockGen) {\n\t\tswitch i {\n\t\tcase 2403:\n\t\t\treceipt := makeReceipt(addr1)\n\t\t\tgen.AddUncheckedReceipt(receipt)\n\t\tcase 1034:\n\t\t\treceipt := makeReceipt(addr2)\n\t\t\tgen.AddUncheckedReceipt(receipt)\n\t\tcase 34:\n\t\t\treceipt := makeReceipt(addr3)\n\t\t\tgen.AddUncheckedReceipt(receipt)\n\t\tcase 99999:\n\t\t\treceipt := makeReceipt(addr4)\n\t\t\tgen.AddUncheckedReceipt(receipt)\n\n\t\t}\n\t})\n\tfor i, block := range chain {\n\t\trawdb.WriteBlock(db, block)\n\t\trawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())\n\t\trawdb.WriteHeadBlockHash(db, block.Hash())\n\t\trawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i])\n\t}\n\tb.ResetTimer()\n\n\tfilter := NewRangeFilter(backend, 0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil)\n\n\tfor i := 0; i < b.N; i++ {\n\t\tlogs, _ := filter.Logs(context.Background())\n\t\tif len(logs) != 4 {\n\t\t\tb.Fatal(\"expected 4 logs, got\", len(logs))\n\t\t}\n\t}\n}\n\nfunc TestFilters(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"filtertest\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tvar (\n\t\tdb, _   = rawdb.NewLevelDBDatabase(dir, 0, 0, \"\", false)\n\t\tbackend = &testBackend{db: db}\n\t\tkey1, _ = crypto.HexToECDSA(\"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291\")\n\t\taddr    = crypto.PubkeyToAddress(key1.PublicKey)\n\n\t\thash1 = common.BytesToHash([]byte(\"topic1\"))\n\t\thash2 = common.BytesToHash([]byte(\"topic2\"))\n\t\thash3 = common.BytesToHash([]byte(\"topic3\"))\n\t\thash4 = common.BytesToHash([]byte(\"topic4\"))\n\t)\n\tdefer db.Close()\n\n\tgenesis := core.GenesisBlockForTesting(db, addr, big.NewInt(1000000))\n\tchain, receipts := core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 1000, func(i int, gen *core.BlockGen) {\n\t\tswitch i {\n\t\tcase 1:\n\t\t\treceipt := types.NewReceipt(nil, false, 0)\n\t\t\treceipt.Logs = []*types.Log{\n\t\t\t\t{\n\t\t\t\t\tAddress: addr,\n\t\t\t\t\tTopics:  []common.Hash{hash1},\n\t\t\t\t},\n\t\t\t}\n\t\t\tgen.AddUncheckedReceipt(receipt)\n\t\t\tgen.AddUncheckedTx(types.NewTransaction(1, common.HexToAddress(\"0x1\"), big.NewInt(1), 1, big.NewInt(1), nil))\n\t\tcase 2:\n\t\t\treceipt := types.NewReceipt(nil, false, 0)\n\t\t\treceipt.Logs = []*types.Log{\n\t\t\t\t{\n\t\t\t\t\tAddress: addr,\n\t\t\t\t\tTopics:  []common.Hash{hash2},\n\t\t\t\t},\n\t\t\t}\n\t\t\tgen.AddUncheckedReceipt(receipt)\n\t\t\tgen.AddUncheckedTx(types.NewTransaction(2, common.HexToAddress(\"0x2\"), big.NewInt(2), 2, big.NewInt(2), nil))\n\n\t\tcase 998:\n\t\t\treceipt := types.NewReceipt(nil, false, 0)\n\t\t\treceipt.Logs = []*types.Log{\n\t\t\t\t{\n\t\t\t\t\tAddress: addr,\n\t\t\t\t\tTopics:  []common.Hash{hash3},\n\t\t\t\t},\n\t\t\t}\n\t\t\tgen.AddUncheckedReceipt(receipt)\n\t\t\tgen.AddUncheckedTx(types.NewTransaction(998, common.HexToAddress(\"0x998\"), big.NewInt(998), 998, big.NewInt(998), nil))\n\t\tcase 999:\n\t\t\treceipt := types.NewReceipt(nil, false, 0)\n\t\t\treceipt.Logs = []*types.Log{\n\t\t\t\t{\n\t\t\t\t\tAddress: addr,\n\t\t\t\t\tTopics:  []common.Hash{hash4},\n\t\t\t\t},\n\t\t\t}\n\t\t\tgen.AddUncheckedReceipt(receipt)\n\t\t\tgen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress(\"0x999\"), big.NewInt(999), 999, big.NewInt(999), nil))\n\t\t}\n\t})\n\tfor i, block := range chain {\n\t\trawdb.WriteBlock(db, block)\n\t\trawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64())\n\t\trawdb.WriteHeadBlockHash(db, block.Hash())\n\t\trawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i])\n\t}\n\n\tfilter := NewRangeFilter(backend, 0, -1, []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}})\n\n\tlogs, _ := filter.Logs(context.Background())\n\tif len(logs) != 4 {\n\t\tt.Error(\"expected 4 log, got\", len(logs))\n\t}\n\n\tfilter = NewRangeFilter(backend, 900, 999, []common.Address{addr}, [][]common.Hash{{hash3}})\n\tlogs, _ = filter.Logs(context.Background())\n\tif len(logs) != 1 {\n\t\tt.Error(\"expected 1 log, got\", len(logs))\n\t}\n\tif len(logs) > 0 && logs[0].Topics[0] != hash3 {\n\t\tt.Errorf(\"expected log[0].Topics[0] to be %x, got %x\", hash3, logs[0].Topics[0])\n\t}\n\n\tfilter = NewRangeFilter(backend, 990, -1, []common.Address{addr}, [][]common.Hash{{hash3}})\n\tlogs, _ = filter.Logs(context.Background())\n\tif len(logs) != 1 {\n\t\tt.Error(\"expected 1 log, got\", len(logs))\n\t}\n\tif len(logs) > 0 && logs[0].Topics[0] != hash3 {\n\t\tt.Errorf(\"expected log[0].Topics[0] to be %x, got %x\", hash3, logs[0].Topics[0])\n\t}\n\n\tfilter = NewRangeFilter(backend, 1, 10, nil, [][]common.Hash{{hash1, hash2}})\n\n\tlogs, _ = filter.Logs(context.Background())\n\tif len(logs) != 2 {\n\t\tt.Error(\"expected 2 log, got\", len(logs))\n\t}\n\n\tfailHash := common.BytesToHash([]byte(\"fail\"))\n\tfilter = NewRangeFilter(backend, 0, -1, nil, [][]common.Hash{{failHash}})\n\n\tlogs, _ = filter.Logs(context.Background())\n\tif len(logs) != 0 {\n\t\tt.Error(\"expected 0 log, got\", len(logs))\n\t}\n\n\tfailAddr := common.BytesToAddress([]byte(\"failmenow\"))\n\tfilter = NewRangeFilter(backend, 0, -1, []common.Address{failAddr}, nil)\n\n\tlogs, _ = filter.Logs(context.Background())\n\tif len(logs) != 0 {\n\t\tt.Error(\"expected 0 log, got\", len(logs))\n\t}\n\n\tfilter = NewRangeFilter(backend, 0, -1, nil, [][]common.Hash{{failHash}, {hash1}})\n\n\tlogs, _ = filter.Logs(context.Background())\n\tif len(logs) != 0 {\n\t\tt.Error(\"expected 0 log, got\", len(logs))\n\t}\n}\n"
  },
  {
    "path": "eth/gasprice/gasprice.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage gasprice\n\nimport (\n\t\"context\"\n\t\"math/big\"\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\nconst sampleNumber = 3 // Number of transactions sampled in a block\n\nvar DefaultMaxPrice = big.NewInt(500 * params.GWei)\n\ntype Config struct {\n\tBlocks     int\n\tPercentile int\n\tDefault    *big.Int `toml:\",omitempty\"`\n\tMaxPrice   *big.Int `toml:\",omitempty\"`\n}\n\n// OracleBackend includes all necessary background APIs for oracle.\ntype OracleBackend interface {\n\tHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error)\n\tBlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error)\n\tChainConfig() *params.ChainConfig\n}\n\n// Oracle recommends gas prices based on the content of recent\n// blocks. Suitable for both light and full clients.\ntype Oracle struct {\n\tbackend   OracleBackend\n\tlastHead  common.Hash\n\tlastPrice *big.Int\n\tmaxPrice  *big.Int\n\tcacheLock sync.RWMutex\n\tfetchLock sync.Mutex\n\n\tcheckBlocks int\n\tpercentile  int\n}\n\n// NewOracle returns a new gasprice oracle which can recommend suitable\n// gasprice for newly created transaction.\nfunc NewOracle(backend OracleBackend, params Config) *Oracle {\n\tblocks := params.Blocks\n\tif blocks < 1 {\n\t\tblocks = 1\n\t\tlog.Warn(\"Sanitizing invalid gasprice oracle sample blocks\", \"provided\", params.Blocks, \"updated\", blocks)\n\t}\n\tpercent := params.Percentile\n\tif percent < 0 {\n\t\tpercent = 0\n\t\tlog.Warn(\"Sanitizing invalid gasprice oracle sample percentile\", \"provided\", params.Percentile, \"updated\", percent)\n\t}\n\tif percent > 100 {\n\t\tpercent = 100\n\t\tlog.Warn(\"Sanitizing invalid gasprice oracle sample percentile\", \"provided\", params.Percentile, \"updated\", percent)\n\t}\n\tmaxPrice := params.MaxPrice\n\tif maxPrice == nil || maxPrice.Int64() <= 0 {\n\t\tmaxPrice = DefaultMaxPrice\n\t\tlog.Warn(\"Sanitizing invalid gasprice oracle price cap\", \"provided\", params.MaxPrice, \"updated\", maxPrice)\n\t}\n\treturn &Oracle{\n\t\tbackend:     backend,\n\t\tlastPrice:   params.Default,\n\t\tmaxPrice:    maxPrice,\n\t\tcheckBlocks: blocks,\n\t\tpercentile:  percent,\n\t}\n}\n\n// SuggestPrice returns a gasprice so that newly created transaction can\n// have a very high chance to be included in the following blocks.\nfunc (gpo *Oracle) SuggestPrice(ctx context.Context) (*big.Int, error) {\n\thead, _ := gpo.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber)\n\theadHash := head.Hash()\n\n\t// If the latest gasprice is still available, return it.\n\tgpo.cacheLock.RLock()\n\tlastHead, lastPrice := gpo.lastHead, gpo.lastPrice\n\tgpo.cacheLock.RUnlock()\n\tif headHash == lastHead {\n\t\treturn lastPrice, nil\n\t}\n\tgpo.fetchLock.Lock()\n\tdefer gpo.fetchLock.Unlock()\n\n\t// Try checking the cache again, maybe the last fetch fetched what we need\n\tgpo.cacheLock.RLock()\n\tlastHead, lastPrice = gpo.lastHead, gpo.lastPrice\n\tgpo.cacheLock.RUnlock()\n\tif headHash == lastHead {\n\t\treturn lastPrice, nil\n\t}\n\tvar (\n\t\tsent, exp int\n\t\tnumber    = head.Number.Uint64()\n\t\tresult    = make(chan getBlockPricesResult, gpo.checkBlocks)\n\t\tquit      = make(chan struct{})\n\t\ttxPrices  []*big.Int\n\t)\n\tfor sent < gpo.checkBlocks && number > 0 {\n\t\tgo gpo.getBlockPrices(ctx, types.MakeSigner(gpo.backend.ChainConfig(), big.NewInt(int64(number))), number, sampleNumber, result, quit)\n\t\tsent++\n\t\texp++\n\t\tnumber--\n\t}\n\tfor exp > 0 {\n\t\tres := <-result\n\t\tif res.err != nil {\n\t\t\tclose(quit)\n\t\t\treturn lastPrice, res.err\n\t\t}\n\t\texp--\n\t\t// Nothing returned. There are two special cases here:\n\t\t// - The block is empty\n\t\t// - All the transactions included are sent by the miner itself.\n\t\t// In these cases, use the latest calculated price for samping.\n\t\tif len(res.prices) == 0 {\n\t\t\tres.prices = []*big.Int{lastPrice}\n\t\t}\n\t\t// Besides, in order to collect enough data for sampling, if nothing\n\t\t// meaningful returned, try to query more blocks. But the maximum\n\t\t// is 2*checkBlocks.\n\t\tif len(res.prices) == 1 && len(txPrices)+1+exp < gpo.checkBlocks*2 && number > 0 {\n\t\t\tgo gpo.getBlockPrices(ctx, types.MakeSigner(gpo.backend.ChainConfig(), big.NewInt(int64(number))), number, sampleNumber, result, quit)\n\t\t\tsent++\n\t\t\texp++\n\t\t\tnumber--\n\t\t}\n\t\ttxPrices = append(txPrices, res.prices...)\n\t}\n\tprice := lastPrice\n\tif len(txPrices) > 0 {\n\t\tsort.Sort(bigIntArray(txPrices))\n\t\tprice = txPrices[(len(txPrices)-1)*gpo.percentile/100]\n\t}\n\tif price.Cmp(gpo.maxPrice) > 0 {\n\t\tprice = new(big.Int).Set(gpo.maxPrice)\n\t}\n\tgpo.cacheLock.Lock()\n\tgpo.lastHead = headHash\n\tgpo.lastPrice = price\n\tgpo.cacheLock.Unlock()\n\treturn price, nil\n}\n\ntype getBlockPricesResult struct {\n\tprices []*big.Int\n\terr    error\n}\n\ntype transactionsByGasPrice []*types.Transaction\n\nfunc (t transactionsByGasPrice) Len() int           { return len(t) }\nfunc (t transactionsByGasPrice) Swap(i, j int)      { t[i], t[j] = t[j], t[i] }\nfunc (t transactionsByGasPrice) Less(i, j int) bool { return t[i].GasPriceCmp(t[j]) < 0 }\n\n// getBlockPrices calculates the lowest transaction gas price in a given block\n// and sends it to the result channel. If the block is empty or all transactions\n// are sent by the miner itself(it doesn't make any sense to include this kind of\n// transaction prices for sampling), nil gasprice is returned.\nfunc (gpo *Oracle) getBlockPrices(ctx context.Context, signer types.Signer, blockNum uint64, limit int, result chan getBlockPricesResult, quit chan struct{}) {\n\tblock, err := gpo.backend.BlockByNumber(ctx, rpc.BlockNumber(blockNum))\n\tif block == nil {\n\t\tselect {\n\t\tcase result <- getBlockPricesResult{nil, err}:\n\t\tcase <-quit:\n\t\t}\n\t\treturn\n\t}\n\tblockTxs := block.Transactions()\n\ttxs := make([]*types.Transaction, len(blockTxs))\n\tcopy(txs, blockTxs)\n\tsort.Sort(transactionsByGasPrice(txs))\n\n\tvar prices []*big.Int\n\tfor _, tx := range txs {\n\t\tsender, err := types.Sender(signer, tx)\n\t\tif err == nil && sender != block.Coinbase() {\n\t\t\tprices = append(prices, tx.GasPrice())\n\t\t\tif len(prices) >= limit {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tselect {\n\tcase result <- getBlockPricesResult{prices, nil}:\n\tcase <-quit:\n\t}\n}\n\ntype bigIntArray []*big.Int\n\nfunc (s bigIntArray) Len() int           { return len(s) }\nfunc (s bigIntArray) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 }\nfunc (s bigIntArray) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }\n"
  },
  {
    "path": "eth/gasprice/gasprice_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage gasprice\n\nimport (\n\t\"context\"\n\t\"math\"\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\ntype testBackend struct {\n\tchain *core.BlockChain\n}\n\nfunc (b *testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) {\n\tif number == rpc.LatestBlockNumber {\n\t\treturn b.chain.CurrentBlock().Header(), nil\n\t}\n\treturn b.chain.GetHeaderByNumber(uint64(number)), nil\n}\n\nfunc (b *testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {\n\tif number == rpc.LatestBlockNumber {\n\t\treturn b.chain.CurrentBlock(), nil\n\t}\n\treturn b.chain.GetBlockByNumber(uint64(number)), nil\n}\n\nfunc (b *testBackend) ChainConfig() *params.ChainConfig {\n\treturn b.chain.Config()\n}\n\nfunc newTestBackend(t *testing.T) *testBackend {\n\tvar (\n\t\tkey, _ = crypto.HexToECDSA(\"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291\")\n\t\taddr   = crypto.PubkeyToAddress(key.PublicKey)\n\t\tgspec  = &core.Genesis{\n\t\t\tConfig: params.TestChainConfig,\n\t\t\tAlloc:  core.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}},\n\t\t}\n\t\tsigner = types.LatestSigner(gspec.Config)\n\t)\n\tengine := ethash.NewFaker()\n\tdb := rawdb.NewMemoryDatabase()\n\tgenesis, _ := gspec.Commit(db)\n\n\t// Generate testing blocks\n\tblocks, _ := core.GenerateChain(params.TestChainConfig, genesis, engine, db, 32, func(i int, b *core.BlockGen) {\n\t\tb.SetCoinbase(common.Address{1})\n\t\ttx, err := types.SignTx(types.NewTransaction(b.TxNonce(addr), common.HexToAddress(\"deadbeef\"), big.NewInt(100), 21000, big.NewInt(int64(i+1)*params.GWei), nil), signer, key)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"failed to create tx: %v\", err)\n\t\t}\n\t\tb.AddTx(tx)\n\t})\n\t// Construct testing chain\n\tdiskdb := rawdb.NewMemoryDatabase()\n\tgspec.Commit(diskdb)\n\tchain, err := core.NewBlockChain(diskdb, nil, params.TestChainConfig, engine, vm.Config{}, nil, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create local chain, %v\", err)\n\t}\n\tchain.InsertChain(blocks)\n\treturn &testBackend{chain: chain}\n}\n\nfunc (b *testBackend) CurrentHeader() *types.Header {\n\treturn b.chain.CurrentHeader()\n}\n\nfunc (b *testBackend) GetBlockByNumber(number uint64) *types.Block {\n\treturn b.chain.GetBlockByNumber(number)\n}\n\nfunc TestSuggestPrice(t *testing.T) {\n\tconfig := Config{\n\t\tBlocks:     3,\n\t\tPercentile: 60,\n\t\tDefault:    big.NewInt(params.GWei),\n\t}\n\tbackend := newTestBackend(t)\n\toracle := NewOracle(backend, config)\n\n\t// The gas price sampled is: 32G, 31G, 30G, 29G, 28G, 27G\n\tgot, err := oracle.SuggestPrice(context.Background())\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to retrieve recommended gas price: %v\", err)\n\t}\n\texpect := big.NewInt(params.GWei * int64(30))\n\tif got.Cmp(expect) != 0 {\n\t\tt.Fatalf(\"Gas price mismatch, want %d, got %d\", expect, got)\n\t}\n}\n"
  },
  {
    "path": "eth/handler.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"errors\"\n\t\"math\"\n\t\"math/big\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/forkid\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/eth/fetcher\"\n\t\"github.com/ethereum/go-ethereum/eth/protocols/eth\"\n\t\"github.com/ethereum/go-ethereum/eth/protocols/snap\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\nconst (\n\t// txChanSize is the size of channel listening to NewTxsEvent.\n\t// The number is referenced from the size of tx pool.\n\ttxChanSize = 4096\n)\n\nvar (\n\tsyncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge\n)\n\n// txPool defines the methods needed from a transaction pool implementation to\n// support all the operations needed by the Ethereum chain protocols.\ntype txPool interface {\n\t// Has returns an indicator whether txpool has a transaction\n\t// cached with the given hash.\n\tHas(hash common.Hash) bool\n\n\t// Get retrieves the transaction from local txpool with given\n\t// tx hash.\n\tGet(hash common.Hash) *types.Transaction\n\n\t// AddRemotes should add the given transactions to the pool.\n\tAddRemotes([]*types.Transaction) []error\n\n\t// Pending should return pending transactions.\n\t// The slice should be modifiable by the caller.\n\tPending() (map[common.Address]types.Transactions, error)\n\n\t// SubscribeNewTxsEvent should return an event subscription of\n\t// NewTxsEvent and send events to the given channel.\n\tSubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription\n}\n\n// handlerConfig is the collection of initialization parameters to create a full\n// node network handler.\ntype handlerConfig struct {\n\tDatabase   ethdb.Database            // Database for direct sync insertions\n\tChain      *core.BlockChain          // Blockchain to serve data from\n\tTxPool     txPool                    // Transaction pool to propagate from\n\tNetwork    uint64                    // Network identifier to adfvertise\n\tSync       downloader.SyncMode       // Whether to fast or full sync\n\tBloomCache uint64                    // Megabytes to alloc for fast sync bloom\n\tEventMux   *event.TypeMux            // Legacy event mux, deprecate for `feed`\n\tCheckpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges\n\tWhitelist  map[uint64]common.Hash    // Hard coded whitelist for sync challenged\n}\n\ntype handler struct {\n\tnetworkID  uint64\n\tforkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node\n\n\tfastSync  uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)\n\tsnapSync  uint32 // Flag whether fast sync should operate on top of the snap protocol\n\tacceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)\n\n\tcheckpointNumber uint64      // Block number for the sync progress validator to cross reference\n\tcheckpointHash   common.Hash // Block hash for the sync progress validator to cross reference\n\n\tdatabase ethdb.Database\n\ttxpool   txPool\n\tchain    *core.BlockChain\n\tmaxPeers int\n\n\tdownloader   *downloader.Downloader\n\tstateBloom   *trie.SyncBloom\n\tblockFetcher *fetcher.BlockFetcher\n\ttxFetcher    *fetcher.TxFetcher\n\tpeers        *peerSet\n\n\teventMux      *event.TypeMux\n\ttxsCh         chan core.NewTxsEvent\n\ttxsSub        event.Subscription\n\tminedBlockSub *event.TypeMuxSubscription\n\n\twhitelist map[uint64]common.Hash\n\n\t// channels for fetcher, syncer, txsyncLoop\n\ttxsyncCh chan *txsync\n\tquitSync chan struct{}\n\n\tchainSync *chainSyncer\n\twg        sync.WaitGroup\n\tpeerWG    sync.WaitGroup\n}\n\n// newHandler returns a handler for all Ethereum chain management protocol.\nfunc newHandler(config *handlerConfig) (*handler, error) {\n\t// Create the protocol manager with the base fields\n\tif config.EventMux == nil {\n\t\tconfig.EventMux = new(event.TypeMux) // Nicety initialization for tests\n\t}\n\th := &handler{\n\t\tnetworkID:  config.Network,\n\t\tforkFilter: forkid.NewFilter(config.Chain),\n\t\teventMux:   config.EventMux,\n\t\tdatabase:   config.Database,\n\t\ttxpool:     config.TxPool,\n\t\tchain:      config.Chain,\n\t\tpeers:      newPeerSet(),\n\t\twhitelist:  config.Whitelist,\n\t\ttxsyncCh:   make(chan *txsync),\n\t\tquitSync:   make(chan struct{}),\n\t}\n\tif config.Sync == downloader.FullSync {\n\t\t// The database seems empty as the current block is the genesis. Yet the fast\n\t\t// block is ahead, so fast sync was enabled for this node at a certain point.\n\t\t// The scenarios where this can happen is\n\t\t// * if the user manually (or via a bad block) rolled back a fast sync node\n\t\t//   below the sync point.\n\t\t// * the last fast sync is not finished while user specifies a full sync this\n\t\t//   time. But we don't have any recent state for full sync.\n\t\t// In these cases however it's safe to reenable fast sync.\n\t\tfullBlock, fastBlock := h.chain.CurrentBlock(), h.chain.CurrentFastBlock()\n\t\tif fullBlock.NumberU64() == 0 && fastBlock.NumberU64() > 0 {\n\t\t\th.fastSync = uint32(1)\n\t\t\tlog.Warn(\"Switch sync mode from full sync to fast sync\")\n\t\t}\n\t} else {\n\t\tif h.chain.CurrentBlock().NumberU64() > 0 {\n\t\t\t// Print warning log if database is not empty to run fast sync.\n\t\t\tlog.Warn(\"Switch sync mode from fast sync to full sync\")\n\t\t} else {\n\t\t\t// If fast sync was requested and our database is empty, grant it\n\t\t\th.fastSync = uint32(1)\n\t\t\tif config.Sync == downloader.SnapSync {\n\t\t\t\th.snapSync = uint32(1)\n\t\t\t}\n\t\t}\n\t}\n\t// If we have trusted checkpoints, enforce them on the chain\n\tif config.Checkpoint != nil {\n\t\th.checkpointNumber = (config.Checkpoint.SectionIndex+1)*params.CHTFrequency - 1\n\t\th.checkpointHash = config.Checkpoint.SectionHead\n\t}\n\t// Construct the downloader (long sync) and its backing state bloom if fast\n\t// sync is requested. The downloader is responsible for deallocating the state\n\t// bloom when it's done.\n\t// Note: we don't enable it if snap-sync is performed, since it's very heavy\n\t// and the heal-portion of the snap sync is much lighter than fast. What we particularly\n\t// want to avoid, is a 90%-finished (but restarted) snap-sync to begin\n\t// indexing the entire trie\n\tif atomic.LoadUint32(&h.fastSync) == 1 && atomic.LoadUint32(&h.snapSync) == 0 {\n\t\th.stateBloom = trie.NewSyncBloom(config.BloomCache, config.Database)\n\t}\n\th.downloader = downloader.New(h.checkpointNumber, config.Database, h.stateBloom, h.eventMux, h.chain, nil, h.removePeer)\n\n\t// Construct the fetcher (short sync)\n\tvalidator := func(header *types.Header) error {\n\t\treturn h.chain.Engine().VerifyHeader(h.chain, header, true)\n\t}\n\theighter := func() uint64 {\n\t\treturn h.chain.CurrentBlock().NumberU64()\n\t}\n\tinserter := func(blocks types.Blocks) (int, error) {\n\t\t// If sync hasn't reached the checkpoint yet, deny importing weird blocks.\n\t\t//\n\t\t// Ideally we would also compare the head block's timestamp and similarly reject\n\t\t// the propagated block if the head is too old. Unfortunately there is a corner\n\t\t// case when starting new networks, where the genesis might be ancient (0 unix)\n\t\t// which would prevent full nodes from accepting it.\n\t\tif h.chain.CurrentBlock().NumberU64() < h.checkpointNumber {\n\t\t\tlog.Warn(\"Unsynced yet, discarded propagated block\", \"number\", blocks[0].Number(), \"hash\", blocks[0].Hash())\n\t\t\treturn 0, nil\n\t\t}\n\t\t// If fast sync is running, deny importing weird blocks. This is a problematic\n\t\t// clause when starting up a new network, because fast-syncing miners might not\n\t\t// accept each others' blocks until a restart. Unfortunately we haven't figured\n\t\t// out a way yet where nodes can decide unilaterally whether the network is new\n\t\t// or not. This should be fixed if we figure out a solution.\n\t\tif atomic.LoadUint32(&h.fastSync) == 1 {\n\t\t\tlog.Warn(\"Fast syncing, discarded propagated block\", \"number\", blocks[0].Number(), \"hash\", blocks[0].Hash())\n\t\t\treturn 0, nil\n\t\t}\n\t\tn, err := h.chain.InsertChain(blocks)\n\t\tif err == nil {\n\t\t\tatomic.StoreUint32(&h.acceptTxs, 1) // Mark initial sync done on any fetcher import\n\t\t}\n\t\treturn n, err\n\t}\n\th.blockFetcher = fetcher.NewBlockFetcher(false, nil, h.chain.GetBlockByHash, validator, h.BroadcastBlock, heighter, nil, inserter, h.removePeer)\n\n\tfetchTx := func(peer string, hashes []common.Hash) error {\n\t\tp := h.peers.peer(peer)\n\t\tif p == nil {\n\t\t\treturn errors.New(\"unknown peer\")\n\t\t}\n\t\treturn p.RequestTxs(hashes)\n\t}\n\th.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, h.txpool.AddRemotes, fetchTx)\n\th.chainSync = newChainSyncer(h)\n\treturn h, nil\n}\n\n// runEthPeer registers an eth peer into the joint eth/snap peerset, adds it to\n// various subsistems and starts handling messages.\nfunc (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error {\n\t// If the peer has a `snap` extension, wait for it to connect so we can have\n\t// a uniform initialization/teardown mechanism\n\tsnap, err := h.peers.waitSnapExtension(peer)\n\tif err != nil {\n\t\tpeer.Log().Error(\"Snapshot extension barrier failed\", \"err\", err)\n\t\treturn err\n\t}\n\t// TODO(karalabe): Not sure why this is needed\n\tif !h.chainSync.handlePeerEvent(peer) {\n\t\treturn p2p.DiscQuitting\n\t}\n\th.peerWG.Add(1)\n\tdefer h.peerWG.Done()\n\n\t// Execute the Ethereum handshake\n\tvar (\n\t\tgenesis = h.chain.Genesis()\n\t\thead    = h.chain.CurrentHeader()\n\t\thash    = head.Hash()\n\t\tnumber  = head.Number.Uint64()\n\t\ttd      = h.chain.GetTd(hash, number)\n\t)\n\tforkID := forkid.NewID(h.chain.Config(), h.chain.Genesis().Hash(), h.chain.CurrentHeader().Number.Uint64())\n\tif err := peer.Handshake(h.networkID, td, hash, genesis.Hash(), forkID, h.forkFilter); err != nil {\n\t\tpeer.Log().Debug(\"Ethereum handshake failed\", \"err\", err)\n\t\treturn err\n\t}\n\treject := false // reserved peer slots\n\tif atomic.LoadUint32(&h.snapSync) == 1 {\n\t\tif snap == nil {\n\t\t\t// If we are running snap-sync, we want to reserve roughly half the peer\n\t\t\t// slots for peers supporting the snap protocol.\n\t\t\t// The logic here is; we only allow up to 5 more non-snap peers than snap-peers.\n\t\t\tif all, snp := h.peers.len(), h.peers.snapLen(); all-snp > snp+5 {\n\t\t\t\treject = true\n\t\t\t}\n\t\t}\n\t}\n\t// Ignore maxPeers if this is a trusted peer\n\tif !peer.Peer.Info().Network.Trusted {\n\t\tif reject || h.peers.len() >= h.maxPeers {\n\t\t\treturn p2p.DiscTooManyPeers\n\t\t}\n\t}\n\tpeer.Log().Debug(\"Ethereum peer connected\", \"name\", peer.Name())\n\n\t// Register the peer locally\n\tif err := h.peers.registerPeer(peer, snap); err != nil {\n\t\tpeer.Log().Error(\"Ethereum peer registration failed\", \"err\", err)\n\t\treturn err\n\t}\n\tdefer h.removePeer(peer.ID())\n\n\tp := h.peers.peer(peer.ID())\n\tif p == nil {\n\t\treturn errors.New(\"peer dropped during handling\")\n\t}\n\t// Register the peer in the downloader. If the downloader considers it banned, we disconnect\n\tif err := h.downloader.RegisterPeer(peer.ID(), peer.Version(), peer); err != nil {\n\t\tpeer.Log().Error(\"Failed to register peer in eth syncer\", \"err\", err)\n\t\treturn err\n\t}\n\tif snap != nil {\n\t\tif err := h.downloader.SnapSyncer.Register(snap); err != nil {\n\t\t\tpeer.Log().Error(\"Failed to register peer in snap syncer\", \"err\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\th.chainSync.handlePeerEvent(peer)\n\n\t// Propagate existing transactions. new transactions appearing\n\t// after this will be sent via broadcasts.\n\th.syncTransactions(peer)\n\n\t// If we have a trusted CHT, reject all peers below that (avoid fast sync eclipse)\n\tif h.checkpointHash != (common.Hash{}) {\n\t\t// Request the peer's checkpoint header for chain height/weight validation\n\t\tif err := peer.RequestHeadersByNumber(h.checkpointNumber, 1, 0, false); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Start a timer to disconnect if the peer doesn't reply in time\n\t\tp.syncDrop = time.AfterFunc(syncChallengeTimeout, func() {\n\t\t\tpeer.Log().Warn(\"Checkpoint challenge timed out, dropping\", \"addr\", peer.RemoteAddr(), \"type\", peer.Name())\n\t\t\th.removePeer(peer.ID())\n\t\t})\n\t\t// Make sure it's cleaned up if the peer dies off\n\t\tdefer func() {\n\t\t\tif p.syncDrop != nil {\n\t\t\t\tp.syncDrop.Stop()\n\t\t\t\tp.syncDrop = nil\n\t\t\t}\n\t\t}()\n\t}\n\t// If we have any explicit whitelist block hashes, request them\n\tfor number := range h.whitelist {\n\t\tif err := peer.RequestHeadersByNumber(number, 1, 0, false); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// Handle incoming messages until the connection is torn down\n\treturn handler(peer)\n}\n\n// runSnapExtension registers a `snap` peer into the joint eth/snap peerset and\n// starts handling inbound messages. As `snap` is only a satellite protocol to\n// `eth`, all subsystem registrations and lifecycle management will be done by\n// the main `eth` handler to prevent strange races.\nfunc (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error {\n\th.peerWG.Add(1)\n\tdefer h.peerWG.Done()\n\n\tif err := h.peers.registerSnapExtension(peer); err != nil {\n\t\tpeer.Log().Error(\"Snapshot extension registration failed\", \"err\", err)\n\t\treturn err\n\t}\n\treturn handler(peer)\n}\n\n// removePeer unregisters a peer from the downloader and fetchers, removes it from\n// the set of tracked peers and closes the network connection to it.\nfunc (h *handler) removePeer(id string) {\n\t// Create a custom logger to avoid printing the entire id\n\tvar logger log.Logger\n\tif len(id) < 16 {\n\t\t// Tests use short IDs, don't choke on them\n\t\tlogger = log.New(\"peer\", id)\n\t} else {\n\t\tlogger = log.New(\"peer\", id[:8])\n\t}\n\t// Abort if the peer does not exist\n\tpeer := h.peers.peer(id)\n\tif peer == nil {\n\t\tlogger.Error(\"Ethereum peer removal failed\", \"err\", errPeerNotRegistered)\n\t\treturn\n\t}\n\t// Remove the `eth` peer if it exists\n\tlogger.Debug(\"Removing Ethereum peer\", \"snap\", peer.snapExt != nil)\n\n\t// Remove the `snap` extension if it exists\n\tif peer.snapExt != nil {\n\t\th.downloader.SnapSyncer.Unregister(id)\n\t}\n\th.downloader.UnregisterPeer(id)\n\th.txFetcher.Drop(id)\n\n\tif err := h.peers.unregisterPeer(id); err != nil {\n\t\tlogger.Error(\"Ethereum peer removal failed\", \"err\", err)\n\t}\n\t// Hard disconnect at the networking layer\n\tpeer.Peer.Disconnect(p2p.DiscUselessPeer)\n}\n\nfunc (h *handler) Start(maxPeers int) {\n\th.maxPeers = maxPeers\n\n\t// broadcast transactions\n\th.wg.Add(1)\n\th.txsCh = make(chan core.NewTxsEvent, txChanSize)\n\th.txsSub = h.txpool.SubscribeNewTxsEvent(h.txsCh)\n\tgo h.txBroadcastLoop()\n\n\t// broadcast mined blocks\n\th.wg.Add(1)\n\th.minedBlockSub = h.eventMux.Subscribe(core.NewMinedBlockEvent{})\n\tgo h.minedBroadcastLoop()\n\n\t// start sync handlers\n\th.wg.Add(2)\n\tgo h.chainSync.loop()\n\tgo h.txsyncLoop64() // TODO(karalabe): Legacy initial tx echange, drop with eth/64.\n}\n\nfunc (h *handler) Stop() {\n\th.txsSub.Unsubscribe()        // quits txBroadcastLoop\n\th.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop\n\n\t// Quit chainSync and txsync64.\n\t// After this is done, no new peers will be accepted.\n\tclose(h.quitSync)\n\th.wg.Wait()\n\n\t// Disconnect existing sessions.\n\t// This also closes the gate for any new registrations on the peer set.\n\t// sessions which are already established but not added to h.peers yet\n\t// will exit when they try to register.\n\th.peers.close()\n\th.peerWG.Wait()\n\n\tlog.Info(\"Ethereum protocol stopped\")\n}\n\n// BroadcastBlock will either propagate a block to a subset of its peers, or\n// will only announce its availability (depending what's requested).\nfunc (h *handler) BroadcastBlock(block *types.Block, propagate bool) {\n\thash := block.Hash()\n\tpeers := h.peers.peersWithoutBlock(hash)\n\n\t// If propagation is requested, send to a subset of the peer\n\tif propagate {\n\t\t// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)\n\t\tvar td *big.Int\n\t\tif parent := h.chain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {\n\t\t\ttd = new(big.Int).Add(block.Difficulty(), h.chain.GetTd(block.ParentHash(), block.NumberU64()-1))\n\t\t} else {\n\t\t\tlog.Error(\"Propagating dangling block\", \"number\", block.Number(), \"hash\", hash)\n\t\t\treturn\n\t\t}\n\t\t// Send the block to a subset of our peers\n\t\ttransfer := peers[:int(math.Sqrt(float64(len(peers))))]\n\t\tfor _, peer := range transfer {\n\t\t\tpeer.AsyncSendNewBlock(block, td)\n\t\t}\n\t\tlog.Trace(\"Propagated block\", \"hash\", hash, \"recipients\", len(transfer), \"duration\", common.PrettyDuration(time.Since(block.ReceivedAt)))\n\t\treturn\n\t}\n\t// Otherwise if the block is indeed in out own chain, announce it\n\tif h.chain.HasBlock(hash, block.NumberU64()) {\n\t\tfor _, peer := range peers {\n\t\t\tpeer.AsyncSendNewBlockHash(block)\n\t\t}\n\t\tlog.Trace(\"Announced block\", \"hash\", hash, \"recipients\", len(peers), \"duration\", common.PrettyDuration(time.Since(block.ReceivedAt)))\n\t}\n}\n\n// BroadcastTransactions will propagate a batch of transactions\n// - To a square root of all peers\n// - And, separately, as announcements to all peers which are not known to\n// already have the given transaction.\nfunc (h *handler) BroadcastTransactions(txs types.Transactions) {\n\tvar (\n\t\tannoCount   int // Count of announcements made\n\t\tannoPeers   int\n\t\tdirectCount int // Count of the txs sent directly to peers\n\t\tdirectPeers int // Count of the peers that were sent transactions directly\n\n\t\ttxset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly\n\t\tannos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce\n\n\t)\n\t// Broadcast transactions to a batch of peers not knowing about it\n\tfor _, tx := range txs {\n\t\tpeers := h.peers.peersWithoutTransaction(tx.Hash())\n\t\t// Send the tx unconditionally to a subset of our peers\n\t\tnumDirect := int(math.Sqrt(float64(len(peers))))\n\t\tfor _, peer := range peers[:numDirect] {\n\t\t\ttxset[peer] = append(txset[peer], tx.Hash())\n\t\t}\n\t\t// For the remaining peers, send announcement only\n\t\tfor _, peer := range peers[numDirect:] {\n\t\t\tannos[peer] = append(annos[peer], tx.Hash())\n\t\t}\n\t}\n\tfor peer, hashes := range txset {\n\t\tdirectPeers++\n\t\tdirectCount += len(hashes)\n\t\tpeer.AsyncSendTransactions(hashes)\n\t}\n\tfor peer, hashes := range annos {\n\t\tannoPeers++\n\t\tannoCount += len(hashes)\n\t\tif peer.Version() >= eth.ETH65 {\n\t\t\tpeer.AsyncSendPooledTransactionHashes(hashes)\n\t\t} else {\n\t\t\tpeer.AsyncSendTransactions(hashes)\n\t\t}\n\t}\n\tlog.Debug(\"Transaction broadcast\", \"txs\", len(txs),\n\t\t\"announce packs\", annoPeers, \"announced hashes\", annoCount,\n\t\t\"tx packs\", directPeers, \"broadcast txs\", directCount)\n}\n\n// minedBroadcastLoop sends mined blocks to connected peers.\nfunc (h *handler) minedBroadcastLoop() {\n\tdefer h.wg.Done()\n\n\tfor obj := range h.minedBlockSub.Chan() {\n\t\tif ev, ok := obj.Data.(core.NewMinedBlockEvent); ok {\n\t\t\th.BroadcastBlock(ev.Block, true)  // First propagate block to peers\n\t\t\th.BroadcastBlock(ev.Block, false) // Only then announce to the rest\n\t\t}\n\t}\n}\n\n// txBroadcastLoop announces new transactions to connected peers.\nfunc (h *handler) txBroadcastLoop() {\n\tdefer h.wg.Done()\n\tfor {\n\t\tselect {\n\t\tcase event := <-h.txsCh:\n\t\t\th.BroadcastTransactions(event.Txs)\n\t\tcase <-h.txsSub.Err():\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "eth/handler_eth.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/eth/protocols/eth\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\n// ethHandler implements the eth.Backend interface to handle the various network\n// packets that are sent as replies or broadcasts.\ntype ethHandler handler\n\nfunc (h *ethHandler) Chain() *core.BlockChain     { return h.chain }\nfunc (h *ethHandler) StateBloom() *trie.SyncBloom { return h.stateBloom }\nfunc (h *ethHandler) TxPool() eth.TxPool          { return h.txpool }\n\n// RunPeer is invoked when a peer joins on the `eth` protocol.\nfunc (h *ethHandler) RunPeer(peer *eth.Peer, hand eth.Handler) error {\n\treturn (*handler)(h).runEthPeer(peer, hand)\n}\n\n// PeerInfo retrieves all known `eth` information about a peer.\nfunc (h *ethHandler) PeerInfo(id enode.ID) interface{} {\n\tif p := h.peers.peer(id.String()); p != nil {\n\t\treturn p.info()\n\t}\n\treturn nil\n}\n\n// AcceptTxs retrieves whether transaction processing is enabled on the node\n// or if inbound transactions should simply be dropped.\nfunc (h *ethHandler) AcceptTxs() bool {\n\treturn atomic.LoadUint32(&h.acceptTxs) == 1\n}\n\n// Handle is invoked from a peer's message handler when it receives a new remote\n// message that the handler couldn't consume and serve itself.\nfunc (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error {\n\t// Consume any broadcasts and announces, forwarding the rest to the downloader\n\tswitch packet := packet.(type) {\n\tcase *eth.BlockHeadersPacket:\n\t\treturn h.handleHeaders(peer, *packet)\n\n\tcase *eth.BlockBodiesPacket:\n\t\ttxset, uncleset := packet.Unpack()\n\t\treturn h.handleBodies(peer, txset, uncleset)\n\n\tcase *eth.NodeDataPacket:\n\t\tif err := h.downloader.DeliverNodeData(peer.ID(), *packet); err != nil {\n\t\t\tlog.Debug(\"Failed to deliver node state data\", \"err\", err)\n\t\t}\n\t\treturn nil\n\n\tcase *eth.ReceiptsPacket:\n\t\tif err := h.downloader.DeliverReceipts(peer.ID(), *packet); err != nil {\n\t\t\tlog.Debug(\"Failed to deliver receipts\", \"err\", err)\n\t\t}\n\t\treturn nil\n\n\tcase *eth.NewBlockHashesPacket:\n\t\thashes, numbers := packet.Unpack()\n\t\treturn h.handleBlockAnnounces(peer, hashes, numbers)\n\n\tcase *eth.NewBlockPacket:\n\t\treturn h.handleBlockBroadcast(peer, packet.Block, packet.TD)\n\n\tcase *eth.NewPooledTransactionHashesPacket:\n\t\treturn h.txFetcher.Notify(peer.ID(), *packet)\n\n\tcase *eth.TransactionsPacket:\n\t\treturn h.txFetcher.Enqueue(peer.ID(), *packet, false)\n\n\tcase *eth.PooledTransactionsPacket:\n\t\treturn h.txFetcher.Enqueue(peer.ID(), *packet, true)\n\n\tdefault:\n\t\treturn fmt.Errorf(\"unexpected eth packet type: %T\", packet)\n\t}\n}\n\n// handleHeaders is invoked from a peer's message handler when it transmits a batch\n// of headers for the local node to process.\nfunc (h *ethHandler) handleHeaders(peer *eth.Peer, headers []*types.Header) error {\n\tp := h.peers.peer(peer.ID())\n\tif p == nil {\n\t\treturn errors.New(\"unregistered during callback\")\n\t}\n\t// If no headers were received, but we're expencting a checkpoint header, consider it that\n\tif len(headers) == 0 && p.syncDrop != nil {\n\t\t// Stop the timer either way, decide later to drop or not\n\t\tp.syncDrop.Stop()\n\t\tp.syncDrop = nil\n\n\t\t// If we're doing a fast (or snap) sync, we must enforce the checkpoint block to avoid\n\t\t// eclipse attacks. Unsynced nodes are welcome to connect after we're done\n\t\t// joining the network\n\t\tif atomic.LoadUint32(&h.fastSync) == 1 {\n\t\t\tpeer.Log().Warn(\"Dropping unsynced node during sync\", \"addr\", peer.RemoteAddr(), \"type\", peer.Name())\n\t\t\treturn errors.New(\"unsynced node cannot serve sync\")\n\t\t}\n\t}\n\t// Filter out any explicitly requested headers, deliver the rest to the downloader\n\tfilter := len(headers) == 1\n\tif filter {\n\t\t// If it's a potential sync progress check, validate the content and advertised chain weight\n\t\tif p.syncDrop != nil && headers[0].Number.Uint64() == h.checkpointNumber {\n\t\t\t// Disable the sync drop timer\n\t\t\tp.syncDrop.Stop()\n\t\t\tp.syncDrop = nil\n\n\t\t\t// Validate the header and either drop the peer or continue\n\t\t\tif headers[0].Hash() != h.checkpointHash {\n\t\t\t\treturn errors.New(\"checkpoint hash mismatch\")\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\t// Otherwise if it's a whitelisted block, validate against the set\n\t\tif want, ok := h.whitelist[headers[0].Number.Uint64()]; ok {\n\t\t\tif hash := headers[0].Hash(); want != hash {\n\t\t\t\tpeer.Log().Info(\"Whitelist mismatch, dropping peer\", \"number\", headers[0].Number.Uint64(), \"hash\", hash, \"want\", want)\n\t\t\t\treturn errors.New(\"whitelist block mismatch\")\n\t\t\t}\n\t\t\tpeer.Log().Debug(\"Whitelist block verified\", \"number\", headers[0].Number.Uint64(), \"hash\", want)\n\t\t}\n\t\t// Irrelevant of the fork checks, send the header to the fetcher just in case\n\t\theaders = h.blockFetcher.FilterHeaders(peer.ID(), headers, time.Now())\n\t}\n\tif len(headers) > 0 || !filter {\n\t\terr := h.downloader.DeliverHeaders(peer.ID(), headers)\n\t\tif err != nil {\n\t\t\tlog.Debug(\"Failed to deliver headers\", \"err\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\n// handleBodies is invoked from a peer's message handler when it transmits a batch\n// of block bodies for the local node to process.\nfunc (h *ethHandler) handleBodies(peer *eth.Peer, txs [][]*types.Transaction, uncles [][]*types.Header) error {\n\t// Filter out any explicitly requested bodies, deliver the rest to the downloader\n\tfilter := len(txs) > 0 || len(uncles) > 0\n\tif filter {\n\t\ttxs, uncles = h.blockFetcher.FilterBodies(peer.ID(), txs, uncles, time.Now())\n\t}\n\tif len(txs) > 0 || len(uncles) > 0 || !filter {\n\t\terr := h.downloader.DeliverBodies(peer.ID(), txs, uncles)\n\t\tif err != nil {\n\t\t\tlog.Debug(\"Failed to deliver bodies\", \"err\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\n// handleBlockAnnounces is invoked from a peer's message handler when it transmits a\n// batch of block announcements for the local node to process.\nfunc (h *ethHandler) handleBlockAnnounces(peer *eth.Peer, hashes []common.Hash, numbers []uint64) error {\n\t// Schedule all the unknown hashes for retrieval\n\tvar (\n\t\tunknownHashes  = make([]common.Hash, 0, len(hashes))\n\t\tunknownNumbers = make([]uint64, 0, len(numbers))\n\t)\n\tfor i := 0; i < len(hashes); i++ {\n\t\tif !h.chain.HasBlock(hashes[i], numbers[i]) {\n\t\t\tunknownHashes = append(unknownHashes, hashes[i])\n\t\t\tunknownNumbers = append(unknownNumbers, numbers[i])\n\t\t}\n\t}\n\tfor i := 0; i < len(unknownHashes); i++ {\n\t\th.blockFetcher.Notify(peer.ID(), unknownHashes[i], unknownNumbers[i], time.Now(), peer.RequestOneHeader, peer.RequestBodies)\n\t}\n\treturn nil\n}\n\n// handleBlockBroadcast is invoked from a peer's message handler when it transmits a\n// block broadcast for the local node to process.\nfunc (h *ethHandler) handleBlockBroadcast(peer *eth.Peer, block *types.Block, td *big.Int) error {\n\t// Schedule the block for import\n\th.blockFetcher.Enqueue(peer.ID(), block)\n\n\t// Assuming the block is importable by the peer, but possibly not yet done so,\n\t// calculate the head hash and TD that the peer truly must have.\n\tvar (\n\t\ttrueHead = block.ParentHash()\n\t\ttrueTD   = new(big.Int).Sub(td, block.Difficulty())\n\t)\n\t// Update the peer's total difficulty if better than the previous\n\tif _, td := peer.Head(); trueTD.Cmp(td) > 0 {\n\t\tpeer.SetHead(trueHead, trueTD)\n\t\th.chainSync.handlePeerEvent(peer)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "eth/handler_eth_test.go",
    "content": "// Copyright 2014 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"math/rand\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/forkid\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/eth/protocols/eth\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\n// testEthHandler is a mock event handler to listen for inbound network requests\n// on the `eth` protocol and convert them into a more easily testable form.\ntype testEthHandler struct {\n\tblockBroadcasts event.Feed\n\ttxAnnounces     event.Feed\n\ttxBroadcasts    event.Feed\n}\n\nfunc (h *testEthHandler) Chain() *core.BlockChain              { panic(\"no backing chain\") }\nfunc (h *testEthHandler) StateBloom() *trie.SyncBloom          { panic(\"no backing state bloom\") }\nfunc (h *testEthHandler) TxPool() eth.TxPool                   { panic(\"no backing tx pool\") }\nfunc (h *testEthHandler) AcceptTxs() bool                      { return true }\nfunc (h *testEthHandler) RunPeer(*eth.Peer, eth.Handler) error { panic(\"not used in tests\") }\nfunc (h *testEthHandler) PeerInfo(enode.ID) interface{}        { panic(\"not used in tests\") }\n\nfunc (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error {\n\tswitch packet := packet.(type) {\n\tcase *eth.NewBlockPacket:\n\t\th.blockBroadcasts.Send(packet.Block)\n\t\treturn nil\n\n\tcase *eth.NewPooledTransactionHashesPacket:\n\t\th.txAnnounces.Send(([]common.Hash)(*packet))\n\t\treturn nil\n\n\tcase *eth.TransactionsPacket:\n\t\th.txBroadcasts.Send(([]*types.Transaction)(*packet))\n\t\treturn nil\n\n\tcase *eth.PooledTransactionsPacket:\n\t\th.txBroadcasts.Send(([]*types.Transaction)(*packet))\n\t\treturn nil\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unexpected eth packet type in tests: %T\", packet))\n\t}\n}\n\n// Tests that peers are correctly accepted (or rejected) based on the advertised\n// fork IDs in the protocol handshake.\nfunc TestForkIDSplit64(t *testing.T) { testForkIDSplit(t, 64) }\nfunc TestForkIDSplit65(t *testing.T) { testForkIDSplit(t, 65) }\n\nfunc testForkIDSplit(t *testing.T, protocol uint) {\n\tt.Parallel()\n\n\tvar (\n\t\tengine = ethash.NewFaker()\n\n\t\tconfigNoFork  = &params.ChainConfig{HomesteadBlock: big.NewInt(1)}\n\t\tconfigProFork = &params.ChainConfig{\n\t\t\tHomesteadBlock: big.NewInt(1),\n\t\t\tEIP150Block:    big.NewInt(2),\n\t\t\tEIP155Block:    big.NewInt(2),\n\t\t\tEIP158Block:    big.NewInt(2),\n\t\t\tByzantiumBlock: big.NewInt(3),\n\t\t}\n\t\tdbNoFork  = rawdb.NewMemoryDatabase()\n\t\tdbProFork = rawdb.NewMemoryDatabase()\n\n\t\tgspecNoFork  = &core.Genesis{Config: configNoFork}\n\t\tgspecProFork = &core.Genesis{Config: configProFork}\n\n\t\tgenesisNoFork  = gspecNoFork.MustCommit(dbNoFork)\n\t\tgenesisProFork = gspecProFork.MustCommit(dbProFork)\n\n\t\tchainNoFork, _  = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil, nil)\n\t\tchainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{}, nil, nil)\n\n\t\tblocksNoFork, _  = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil)\n\t\tblocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil)\n\n\t\tethNoFork, _ = newHandler(&handlerConfig{\n\t\t\tDatabase:   dbNoFork,\n\t\t\tChain:      chainNoFork,\n\t\t\tTxPool:     newTestTxPool(),\n\t\t\tNetwork:    1,\n\t\t\tSync:       downloader.FullSync,\n\t\t\tBloomCache: 1,\n\t\t})\n\t\tethProFork, _ = newHandler(&handlerConfig{\n\t\t\tDatabase:   dbProFork,\n\t\t\tChain:      chainProFork,\n\t\t\tTxPool:     newTestTxPool(),\n\t\t\tNetwork:    1,\n\t\t\tSync:       downloader.FullSync,\n\t\t\tBloomCache: 1,\n\t\t})\n\t)\n\tethNoFork.Start(1000)\n\tethProFork.Start(1000)\n\n\t// Clean up everything after ourselves\n\tdefer chainNoFork.Stop()\n\tdefer chainProFork.Stop()\n\n\tdefer ethNoFork.Stop()\n\tdefer ethProFork.Stop()\n\n\t// Both nodes should allow the other to connect (same genesis, next fork is the same)\n\tp2pNoFork, p2pProFork := p2p.MsgPipe()\n\tdefer p2pNoFork.Close()\n\tdefer p2pProFork.Close()\n\n\tpeerNoFork := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, \"\", nil), p2pNoFork, nil)\n\tpeerProFork := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, \"\", nil), p2pProFork, nil)\n\tdefer peerNoFork.Close()\n\tdefer peerProFork.Close()\n\n\terrc := make(chan error, 2)\n\tgo func(errc chan error) {\n\t\terrc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil })\n\t}(errc)\n\tgo func(errc chan error) {\n\t\terrc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil })\n\t}(errc)\n\n\tfor i := 0; i < 2; i++ {\n\t\tselect {\n\t\tcase err := <-errc:\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"frontier nofork <-> profork failed: %v\", err)\n\t\t\t}\n\t\tcase <-time.After(250 * time.Millisecond):\n\t\t\tt.Fatalf(\"frontier nofork <-> profork handler timeout\")\n\t\t}\n\t}\n\t// Progress into Homestead. Fork's match, so we don't care what the future holds\n\tchainNoFork.InsertChain(blocksNoFork[:1])\n\tchainProFork.InsertChain(blocksProFork[:1])\n\n\tp2pNoFork, p2pProFork = p2p.MsgPipe()\n\tdefer p2pNoFork.Close()\n\tdefer p2pProFork.Close()\n\n\tpeerNoFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, \"\", nil), p2pNoFork, nil)\n\tpeerProFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, \"\", nil), p2pProFork, nil)\n\tdefer peerNoFork.Close()\n\tdefer peerProFork.Close()\n\n\terrc = make(chan error, 2)\n\tgo func(errc chan error) {\n\t\terrc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil })\n\t}(errc)\n\tgo func(errc chan error) {\n\t\terrc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil })\n\t}(errc)\n\n\tfor i := 0; i < 2; i++ {\n\t\tselect {\n\t\tcase err := <-errc:\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"homestead nofork <-> profork failed: %v\", err)\n\t\t\t}\n\t\tcase <-time.After(250 * time.Millisecond):\n\t\t\tt.Fatalf(\"homestead nofork <-> profork handler timeout\")\n\t\t}\n\t}\n\t// Progress into Spurious. Forks mismatch, signalling differing chains, reject\n\tchainNoFork.InsertChain(blocksNoFork[1:2])\n\tchainProFork.InsertChain(blocksProFork[1:2])\n\n\tp2pNoFork, p2pProFork = p2p.MsgPipe()\n\tdefer p2pNoFork.Close()\n\tdefer p2pProFork.Close()\n\n\tpeerNoFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, \"\", nil), p2pNoFork, nil)\n\tpeerProFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, \"\", nil), p2pProFork, nil)\n\tdefer peerNoFork.Close()\n\tdefer peerProFork.Close()\n\n\terrc = make(chan error, 2)\n\tgo func(errc chan error) {\n\t\terrc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil })\n\t}(errc)\n\tgo func(errc chan error) {\n\t\terrc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil })\n\t}(errc)\n\n\tvar successes int\n\tfor i := 0; i < 2; i++ {\n\t\tselect {\n\t\tcase err := <-errc:\n\t\t\tif err == nil {\n\t\t\t\tsuccesses++\n\t\t\t\tif successes == 2 { // Only one side disconnects\n\t\t\t\t\tt.Fatalf(\"fork ID rejection didn't happen\")\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-time.After(250 * time.Millisecond):\n\t\t\tt.Fatalf(\"split peers not rejected\")\n\t\t}\n\t}\n}\n\n// Tests that received transactions are added to the local pool.\nfunc TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) }\nfunc TestRecvTransactions65(t *testing.T) { testRecvTransactions(t, 65) }\n\nfunc testRecvTransactions(t *testing.T, protocol uint) {\n\tt.Parallel()\n\n\t// Create a message handler, configure it to accept transactions and watch them\n\thandler := newTestHandler()\n\tdefer handler.close()\n\n\thandler.handler.acceptTxs = 1 // mark synced to accept transactions\n\n\ttxs := make(chan core.NewTxsEvent)\n\tsub := handler.txpool.SubscribeNewTxsEvent(txs)\n\tdefer sub.Unsubscribe()\n\n\t// Create a source peer to send messages through and a sink handler to receive them\n\tp2pSrc, p2pSink := p2p.MsgPipe()\n\tdefer p2pSrc.Close()\n\tdefer p2pSink.Close()\n\n\tsrc := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, \"\", nil), p2pSrc, handler.txpool)\n\tsink := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, \"\", nil), p2pSink, handler.txpool)\n\tdefer src.Close()\n\tdefer sink.Close()\n\n\tgo handler.handler.runEthPeer(sink, func(peer *eth.Peer) error {\n\t\treturn eth.Handle((*ethHandler)(handler.handler), peer)\n\t})\n\t// Run the handshake locally to avoid spinning up a source handler\n\tvar (\n\t\tgenesis = handler.chain.Genesis()\n\t\thead    = handler.chain.CurrentBlock()\n\t\ttd      = handler.chain.GetTd(head.Hash(), head.NumberU64())\n\t)\n\tif err := src.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {\n\t\tt.Fatalf(\"failed to run protocol handshake\")\n\t}\n\t// Send the transaction to the sink and verify that it's added to the tx pool\n\ttx := types.NewTransaction(0, common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil)\n\ttx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)\n\n\tif err := src.SendTransactions([]*types.Transaction{tx}); err != nil {\n\t\tt.Fatalf(\"failed to send transaction: %v\", err)\n\t}\n\tselect {\n\tcase event := <-txs:\n\t\tif len(event.Txs) != 1 {\n\t\t\tt.Errorf(\"wrong number of added transactions: got %d, want 1\", len(event.Txs))\n\t\t} else if event.Txs[0].Hash() != tx.Hash() {\n\t\t\tt.Errorf(\"added wrong tx hash: got %v, want %v\", event.Txs[0].Hash(), tx.Hash())\n\t\t}\n\tcase <-time.After(2 * time.Second):\n\t\tt.Errorf(\"no NewTxsEvent received within 2 seconds\")\n\t}\n}\n\n// This test checks that pending transactions are sent.\nfunc TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) }\nfunc TestSendTransactions65(t *testing.T) { testSendTransactions(t, 65) }\n\nfunc testSendTransactions(t *testing.T, protocol uint) {\n\tt.Parallel()\n\n\t// Create a message handler and fill the pool with big transactions\n\thandler := newTestHandler()\n\tdefer handler.close()\n\n\tinsert := make([]*types.Transaction, 100)\n\tfor nonce := range insert {\n\t\ttx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), make([]byte, txsyncPackSize/10))\n\t\ttx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)\n\n\t\tinsert[nonce] = tx\n\t}\n\tgo handler.txpool.AddRemotes(insert) // Need goroutine to not block on feed\n\ttime.Sleep(250 * time.Millisecond)   // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join)\n\n\t// Create a source handler to send messages through and a sink peer to receive them\n\tp2pSrc, p2pSink := p2p.MsgPipe()\n\tdefer p2pSrc.Close()\n\tdefer p2pSink.Close()\n\n\tsrc := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, \"\", nil), p2pSrc, handler.txpool)\n\tsink := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, \"\", nil), p2pSink, handler.txpool)\n\tdefer src.Close()\n\tdefer sink.Close()\n\n\tgo handler.handler.runEthPeer(src, func(peer *eth.Peer) error {\n\t\treturn eth.Handle((*ethHandler)(handler.handler), peer)\n\t})\n\t// Run the handshake locally to avoid spinning up a source handler\n\tvar (\n\t\tgenesis = handler.chain.Genesis()\n\t\thead    = handler.chain.CurrentBlock()\n\t\ttd      = handler.chain.GetTd(head.Hash(), head.NumberU64())\n\t)\n\tif err := sink.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {\n\t\tt.Fatalf(\"failed to run protocol handshake\")\n\t}\n\t// After the handshake completes, the source handler should stream the sink\n\t// the transactions, subscribe to all inbound network events\n\tbackend := new(testEthHandler)\n\n\tanns := make(chan []common.Hash)\n\tannSub := backend.txAnnounces.Subscribe(anns)\n\tdefer annSub.Unsubscribe()\n\n\tbcasts := make(chan []*types.Transaction)\n\tbcastSub := backend.txBroadcasts.Subscribe(bcasts)\n\tdefer bcastSub.Unsubscribe()\n\n\tgo eth.Handle(backend, sink)\n\n\t// Make sure we get all the transactions on the correct channels\n\tseen := make(map[common.Hash]struct{})\n\tfor len(seen) < len(insert) {\n\t\tswitch protocol {\n\t\tcase 63, 64:\n\t\t\tselect {\n\t\t\tcase <-anns:\n\t\t\t\tt.Errorf(\"tx announce received on pre eth/65\")\n\t\t\tcase txs := <-bcasts:\n\t\t\t\tfor _, tx := range txs {\n\t\t\t\t\tif _, ok := seen[tx.Hash()]; ok {\n\t\t\t\t\t\tt.Errorf(\"duplicate transaction announced: %x\", tx.Hash())\n\t\t\t\t\t}\n\t\t\t\t\tseen[tx.Hash()] = struct{}{}\n\t\t\t\t}\n\t\t\t}\n\t\tcase 65:\n\t\t\tselect {\n\t\t\tcase hashes := <-anns:\n\t\t\t\tfor _, hash := range hashes {\n\t\t\t\t\tif _, ok := seen[hash]; ok {\n\t\t\t\t\t\tt.Errorf(\"duplicate transaction announced: %x\", hash)\n\t\t\t\t\t}\n\t\t\t\t\tseen[hash] = struct{}{}\n\t\t\t\t}\n\t\t\tcase <-bcasts:\n\t\t\t\tt.Errorf(\"initial tx broadcast received on post eth/65\")\n\t\t\t}\n\n\t\tdefault:\n\t\t\tpanic(\"unsupported protocol, please extend test\")\n\t\t}\n\t}\n\tfor _, tx := range insert {\n\t\tif _, ok := seen[tx.Hash()]; !ok {\n\t\t\tt.Errorf(\"missing transaction: %x\", tx.Hash())\n\t\t}\n\t}\n}\n\n// Tests that transactions get propagated to all attached peers, either via direct\n// broadcasts or via announcements/retrievals.\nfunc TestTransactionPropagation64(t *testing.T) { testTransactionPropagation(t, 64) }\nfunc TestTransactionPropagation65(t *testing.T) { testTransactionPropagation(t, 65) }\n\nfunc testTransactionPropagation(t *testing.T, protocol uint) {\n\tt.Parallel()\n\n\t// Create a source handler to send transactions from and a number of sinks\n\t// to receive them. We need multiple sinks since a one-to-one peering would\n\t// broadcast all transactions without announcement.\n\tsource := newTestHandler()\n\tdefer source.close()\n\n\tsinks := make([]*testHandler, 10)\n\tfor i := 0; i < len(sinks); i++ {\n\t\tsinks[i] = newTestHandler()\n\t\tdefer sinks[i].close()\n\n\t\tsinks[i].handler.acceptTxs = 1 // mark synced to accept transactions\n\t}\n\t// Interconnect all the sink handlers with the source handler\n\tfor i, sink := range sinks {\n\t\tsink := sink // Closure for gorotuine below\n\n\t\tsourcePipe, sinkPipe := p2p.MsgPipe()\n\t\tdefer sourcePipe.Close()\n\t\tdefer sinkPipe.Close()\n\n\t\tsourcePeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{byte(i)}, \"\", nil), sourcePipe, source.txpool)\n\t\tsinkPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{0}, \"\", nil), sinkPipe, sink.txpool)\n\t\tdefer sourcePeer.Close()\n\t\tdefer sinkPeer.Close()\n\n\t\tgo source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error {\n\t\t\treturn eth.Handle((*ethHandler)(source.handler), peer)\n\t\t})\n\t\tgo sink.handler.runEthPeer(sinkPeer, func(peer *eth.Peer) error {\n\t\t\treturn eth.Handle((*ethHandler)(sink.handler), peer)\n\t\t})\n\t}\n\t// Subscribe to all the transaction pools\n\ttxChs := make([]chan core.NewTxsEvent, len(sinks))\n\tfor i := 0; i < len(sinks); i++ {\n\t\ttxChs[i] = make(chan core.NewTxsEvent, 1024)\n\n\t\tsub := sinks[i].txpool.SubscribeNewTxsEvent(txChs[i])\n\t\tdefer sub.Unsubscribe()\n\t}\n\t// Fill the source pool with transactions and wait for them at the sinks\n\ttxs := make([]*types.Transaction, 1024)\n\tfor nonce := range txs {\n\t\ttx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil)\n\t\ttx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey)\n\n\t\ttxs[nonce] = tx\n\t}\n\tsource.txpool.AddRemotes(txs)\n\n\t// Iterate through all the sinks and ensure they all got the transactions\n\tfor i := range sinks {\n\t\tfor arrived := 0; arrived < len(txs); {\n\t\t\tselect {\n\t\t\tcase event := <-txChs[i]:\n\t\t\t\tarrived += len(event.Txs)\n\t\t\tcase <-time.NewTimer(time.Second).C:\n\t\t\t\tt.Errorf(\"sink %d: transaction propagation timed out: have %d, want %d\", i, arrived, len(txs))\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Tests that post eth protocol handshake, clients perform a mutual checkpoint\n// challenge to validate each other's chains. Hash mismatches, or missing ones\n// during a fast sync should lead to the peer getting dropped.\nfunc TestCheckpointChallenge(t *testing.T) {\n\ttests := []struct {\n\t\tsyncmode   downloader.SyncMode\n\t\tcheckpoint bool\n\t\ttimeout    bool\n\t\tempty      bool\n\t\tmatch      bool\n\t\tdrop       bool\n\t}{\n\t\t// If checkpointing is not enabled locally, don't challenge and don't drop\n\t\t{downloader.FullSync, false, false, false, false, false},\n\t\t{downloader.FastSync, false, false, false, false, false},\n\n\t\t// If checkpointing is enabled locally and remote response is empty, only drop during fast sync\n\t\t{downloader.FullSync, true, false, true, false, false},\n\t\t{downloader.FastSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer\n\n\t\t// If checkpointing is enabled locally and remote response mismatches, always drop\n\t\t{downloader.FullSync, true, false, false, false, true},\n\t\t{downloader.FastSync, true, false, false, false, true},\n\n\t\t// If checkpointing is enabled locally and remote response matches, never drop\n\t\t{downloader.FullSync, true, false, false, true, false},\n\t\t{downloader.FastSync, true, false, false, true, false},\n\n\t\t// If checkpointing is enabled locally and remote times out, always drop\n\t\t{downloader.FullSync, true, true, false, true, true},\n\t\t{downloader.FastSync, true, true, false, true, true},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(fmt.Sprintf(\"sync %v checkpoint %v timeout %v empty %v match %v\", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) {\n\t\t\ttestCheckpointChallenge(t, tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match, tt.drop)\n\t\t})\n\t}\n}\n\nfunc testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpoint bool, timeout bool, empty bool, match bool, drop bool) {\n\t// Reduce the checkpoint handshake challenge timeout\n\tdefer func(old time.Duration) { syncChallengeTimeout = old }(syncChallengeTimeout)\n\tsyncChallengeTimeout = 250 * time.Millisecond\n\n\t// Create a test handler and inject a CHT into it. The injection is a bit\n\t// ugly, but it beats creating everything manually just to avoid reaching\n\t// into the internals a bit.\n\thandler := newTestHandler()\n\tdefer handler.close()\n\n\tif syncmode == downloader.FastSync {\n\t\tatomic.StoreUint32(&handler.handler.fastSync, 1)\n\t} else {\n\t\tatomic.StoreUint32(&handler.handler.fastSync, 0)\n\t}\n\tvar response *types.Header\n\tif checkpoint {\n\t\tnumber := (uint64(rand.Intn(500))+1)*params.CHTFrequency - 1\n\t\tresponse = &types.Header{Number: big.NewInt(int64(number)), Extra: []byte(\"valid\")}\n\n\t\thandler.handler.checkpointNumber = number\n\t\thandler.handler.checkpointHash = response.Hash()\n\t}\n\t// Create a challenger peer and a challenged one\n\tp2pLocal, p2pRemote := p2p.MsgPipe()\n\tdefer p2pLocal.Close()\n\tdefer p2pRemote.Close()\n\n\tlocal := eth.NewPeer(eth.ETH64, p2p.NewPeer(enode.ID{1}, \"\", nil), p2pLocal, handler.txpool)\n\tremote := eth.NewPeer(eth.ETH64, p2p.NewPeer(enode.ID{2}, \"\", nil), p2pRemote, handler.txpool)\n\tdefer local.Close()\n\tdefer remote.Close()\n\n\tgo handler.handler.runEthPeer(local, func(peer *eth.Peer) error {\n\t\treturn eth.Handle((*ethHandler)(handler.handler), peer)\n\t})\n\t// Run the handshake locally to avoid spinning up a remote handler\n\tvar (\n\t\tgenesis = handler.chain.Genesis()\n\t\thead    = handler.chain.CurrentBlock()\n\t\ttd      = handler.chain.GetTd(head.Hash(), head.NumberU64())\n\t)\n\tif err := remote.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil {\n\t\tt.Fatalf(\"failed to run protocol handshake\")\n\t}\n\t// Connect a new peer and check that we receive the checkpoint challenge\n\tif checkpoint {\n\t\tif err := remote.ExpectRequestHeadersByNumber(response.Number.Uint64(), 1, 0, false); err != nil {\n\t\t\tt.Fatalf(\"challenge mismatch: %v\", err)\n\t\t}\n\t\t// Create a block to reply to the challenge if no timeout is simulated\n\t\tif !timeout {\n\t\t\tif empty {\n\t\t\t\tif err := remote.SendBlockHeaders([]*types.Header{}); err != nil {\n\t\t\t\t\tt.Fatalf(\"failed to answer challenge: %v\", err)\n\t\t\t\t}\n\t\t\t} else if match {\n\t\t\t\tif err := remote.SendBlockHeaders([]*types.Header{response}); err != nil {\n\t\t\t\t\tt.Fatalf(\"failed to answer challenge: %v\", err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err := remote.SendBlockHeaders([]*types.Header{{Number: response.Number}}); err != nil {\n\t\t\t\t\tt.Fatalf(\"failed to answer challenge: %v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// Wait until the test timeout passes to ensure proper cleanup\n\ttime.Sleep(syncChallengeTimeout + 300*time.Millisecond)\n\n\t// Verify that the remote peer is maintained or dropped\n\tif drop {\n\t\tif peers := handler.handler.peers.len(); peers != 0 {\n\t\t\tt.Fatalf(\"peer count mismatch: have %d, want %d\", peers, 0)\n\t\t}\n\t} else {\n\t\tif peers := handler.handler.peers.len(); peers != 1 {\n\t\t\tt.Fatalf(\"peer count mismatch: have %d, want %d\", peers, 1)\n\t\t}\n\t}\n}\n\n// Tests that blocks are broadcast to a sqrt number of peers only.\nfunc TestBroadcastBlock1Peer(t *testing.T)    { testBroadcastBlock(t, 1, 1) }\nfunc TestBroadcastBlock2Peers(t *testing.T)   { testBroadcastBlock(t, 2, 1) }\nfunc TestBroadcastBlock3Peers(t *testing.T)   { testBroadcastBlock(t, 3, 1) }\nfunc TestBroadcastBlock4Peers(t *testing.T)   { testBroadcastBlock(t, 4, 2) }\nfunc TestBroadcastBlock5Peers(t *testing.T)   { testBroadcastBlock(t, 5, 2) }\nfunc TestBroadcastBlock8Peers(t *testing.T)   { testBroadcastBlock(t, 9, 3) }\nfunc TestBroadcastBlock12Peers(t *testing.T)  { testBroadcastBlock(t, 12, 3) }\nfunc TestBroadcastBlock16Peers(t *testing.T)  { testBroadcastBlock(t, 16, 4) }\nfunc TestBroadcastBloc26Peers(t *testing.T)   { testBroadcastBlock(t, 26, 5) }\nfunc TestBroadcastBlock100Peers(t *testing.T) { testBroadcastBlock(t, 100, 10) }\n\nfunc testBroadcastBlock(t *testing.T, peers, bcasts int) {\n\tt.Parallel()\n\n\t// Create a source handler to broadcast blocks from and a number of sinks\n\t// to receive them.\n\tsource := newTestHandlerWithBlocks(1)\n\tdefer source.close()\n\n\tsinks := make([]*testEthHandler, peers)\n\tfor i := 0; i < len(sinks); i++ {\n\t\tsinks[i] = new(testEthHandler)\n\t}\n\t// Interconnect all the sink handlers with the source handler\n\tvar (\n\t\tgenesis = source.chain.Genesis()\n\t\ttd      = source.chain.GetTd(genesis.Hash(), genesis.NumberU64())\n\t)\n\tfor i, sink := range sinks {\n\t\tsink := sink // Closure for gorotuine below\n\n\t\tsourcePipe, sinkPipe := p2p.MsgPipe()\n\t\tdefer sourcePipe.Close()\n\t\tdefer sinkPipe.Close()\n\n\t\tsourcePeer := eth.NewPeer(eth.ETH64, p2p.NewPeer(enode.ID{byte(i)}, \"\", nil), sourcePipe, nil)\n\t\tsinkPeer := eth.NewPeer(eth.ETH64, p2p.NewPeer(enode.ID{0}, \"\", nil), sinkPipe, nil)\n\t\tdefer sourcePeer.Close()\n\t\tdefer sinkPeer.Close()\n\n\t\tgo source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error {\n\t\t\treturn eth.Handle((*ethHandler)(source.handler), peer)\n\t\t})\n\t\tif err := sinkPeer.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain)); err != nil {\n\t\t\tt.Fatalf(\"failed to run protocol handshake\")\n\t\t}\n\t\tgo eth.Handle(sink, sinkPeer)\n\t}\n\t// Subscribe to all the transaction pools\n\tblockChs := make([]chan *types.Block, len(sinks))\n\tfor i := 0; i < len(sinks); i++ {\n\t\tblockChs[i] = make(chan *types.Block, 1)\n\t\tdefer close(blockChs[i])\n\n\t\tsub := sinks[i].blockBroadcasts.Subscribe(blockChs[i])\n\t\tdefer sub.Unsubscribe()\n\t}\n\t// Initiate a block propagation across the peers\n\ttime.Sleep(100 * time.Millisecond)\n\tsource.handler.BroadcastBlock(source.chain.CurrentBlock(), true)\n\n\t// Iterate through all the sinks and ensure the correct number got the block\n\tdone := make(chan struct{}, peers)\n\tfor _, ch := range blockChs {\n\t\tch := ch\n\t\tgo func() {\n\t\t\t<-ch\n\t\t\tdone <- struct{}{}\n\t\t}()\n\t}\n\tvar received int\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treceived++\n\n\t\tcase <-time.After(100 * time.Millisecond):\n\t\t\tif received != bcasts {\n\t\t\t\tt.Errorf(\"broadcast count mismatch: have %d, want %d\", received, bcasts)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Tests that a propagated malformed block (uncles or transactions don't match\n// with the hashes in the header) gets discarded and not broadcast forward.\nfunc TestBroadcastMalformedBlock64(t *testing.T) { testBroadcastMalformedBlock(t, 64) }\nfunc TestBroadcastMalformedBlock65(t *testing.T) { testBroadcastMalformedBlock(t, 65) }\n\nfunc testBroadcastMalformedBlock(t *testing.T, protocol uint) {\n\tt.Parallel()\n\n\t// Create a source handler to broadcast blocks from and a number of sinks\n\t// to receive them.\n\tsource := newTestHandlerWithBlocks(1)\n\tdefer source.close()\n\n\t// Create a source handler to send messages through and a sink peer to receive them\n\tp2pSrc, p2pSink := p2p.MsgPipe()\n\tdefer p2pSrc.Close()\n\tdefer p2pSink.Close()\n\n\tsrc := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, \"\", nil), p2pSrc, source.txpool)\n\tsink := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, \"\", nil), p2pSink, source.txpool)\n\tdefer src.Close()\n\tdefer sink.Close()\n\n\tgo source.handler.runEthPeer(src, func(peer *eth.Peer) error {\n\t\treturn eth.Handle((*ethHandler)(source.handler), peer)\n\t})\n\t// Run the handshake locally to avoid spinning up a sink handler\n\tvar (\n\t\tgenesis = source.chain.Genesis()\n\t\ttd      = source.chain.GetTd(genesis.Hash(), genesis.NumberU64())\n\t)\n\tif err := sink.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain)); err != nil {\n\t\tt.Fatalf(\"failed to run protocol handshake\")\n\t}\n\t// After the handshake completes, the source handler should stream the sink\n\t// the blocks, subscribe to inbound network events\n\tbackend := new(testEthHandler)\n\n\tblocks := make(chan *types.Block, 1)\n\tsub := backend.blockBroadcasts.Subscribe(blocks)\n\tdefer sub.Unsubscribe()\n\n\tgo eth.Handle(backend, sink)\n\n\t// Create various combinations of malformed blocks\n\thead := source.chain.CurrentBlock()\n\n\tmalformedUncles := head.Header()\n\tmalformedUncles.UncleHash[0]++\n\tmalformedTransactions := head.Header()\n\tmalformedTransactions.TxHash[0]++\n\tmalformedEverything := head.Header()\n\tmalformedEverything.UncleHash[0]++\n\tmalformedEverything.TxHash[0]++\n\n\t// Try to broadcast all malformations and ensure they all get discarded\n\tfor _, header := range []*types.Header{malformedUncles, malformedTransactions, malformedEverything} {\n\t\tblock := types.NewBlockWithHeader(header).WithBody(head.Transactions(), head.Uncles())\n\t\tif err := src.SendNewBlock(block, big.NewInt(131136)); err != nil {\n\t\t\tt.Fatalf(\"failed to broadcast block: %v\", err)\n\t\t}\n\t\tselect {\n\t\tcase <-blocks:\n\t\t\tt.Fatalf(\"malformed block forwarded\")\n\t\tcase <-time.After(100 * time.Millisecond):\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "eth/handler_snap.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/eth/protocols/snap\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n)\n\n// snapHandler implements the snap.Backend interface to handle the various network\n// packets that are sent as replies or broadcasts.\ntype snapHandler handler\n\nfunc (h *snapHandler) Chain() *core.BlockChain { return h.chain }\n\n// RunPeer is invoked when a peer joins on the `snap` protocol.\nfunc (h *snapHandler) RunPeer(peer *snap.Peer, hand snap.Handler) error {\n\treturn (*handler)(h).runSnapExtension(peer, hand)\n}\n\n// PeerInfo retrieves all known `snap` information about a peer.\nfunc (h *snapHandler) PeerInfo(id enode.ID) interface{} {\n\tif p := h.peers.peer(id.String()); p != nil {\n\t\tif p.snapExt != nil {\n\t\t\treturn p.snapExt.info()\n\t\t}\n\t}\n\treturn nil\n}\n\n// Handle is invoked from a peer's message handler when it receives a new remote\n// message that the handler couldn't consume and serve itself.\nfunc (h *snapHandler) Handle(peer *snap.Peer, packet snap.Packet) error {\n\treturn h.downloader.DeliverSnapPacket(peer, packet)\n}\n"
  },
  {
    "path": "eth/handler_test.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"math/big\"\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/params\"\n)\n\nvar (\n\t// testKey is a private key to use for funding a tester account.\n\ttestKey, _ = crypto.HexToECDSA(\"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291\")\n\n\t// testAddr is the Ethereum address of the tester account.\n\ttestAddr = crypto.PubkeyToAddress(testKey.PublicKey)\n)\n\n// testTxPool is a mock transaction pool that blindly accepts all transactions.\n// Its goal is to get around setting up a valid statedb for the balance and nonce\n// checks.\ntype testTxPool struct {\n\tpool map[common.Hash]*types.Transaction // Hash map of collected transactions\n\n\ttxFeed event.Feed   // Notification feed to allow waiting for inclusion\n\tlock   sync.RWMutex // Protects the transaction pool\n}\n\n// newTestTxPool creates a mock transaction pool.\nfunc newTestTxPool() *testTxPool {\n\treturn &testTxPool{\n\t\tpool: make(map[common.Hash]*types.Transaction),\n\t}\n}\n\n// Has returns an indicator whether txpool has a transaction\n// cached with the given hash.\nfunc (p *testTxPool) Has(hash common.Hash) bool {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\n\treturn p.pool[hash] != nil\n}\n\n// Get retrieves the transaction from local txpool with given\n// tx hash.\nfunc (p *testTxPool) Get(hash common.Hash) *types.Transaction {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\n\treturn p.pool[hash]\n}\n\n// AddRemotes appends a batch of transactions to the pool, and notifies any\n// listeners if the addition channel is non nil\nfunc (p *testTxPool) AddRemotes(txs []*types.Transaction) []error {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\n\tfor _, tx := range txs {\n\t\tp.pool[tx.Hash()] = tx\n\t}\n\tp.txFeed.Send(core.NewTxsEvent{Txs: txs})\n\treturn make([]error, len(txs))\n}\n\n// Pending returns all the transactions known to the pool\nfunc (p *testTxPool) Pending() (map[common.Address]types.Transactions, error) {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\tbatches := make(map[common.Address]types.Transactions)\n\tfor _, tx := range p.pool {\n\t\tfrom, _ := types.Sender(types.HomesteadSigner{}, tx)\n\t\tbatches[from] = append(batches[from], tx)\n\t}\n\tfor _, batch := range batches {\n\t\tsort.Sort(types.TxByNonce(batch))\n\t}\n\treturn batches, nil\n}\n\n// SubscribeNewTxsEvent should return an event subscription of NewTxsEvent and\n// send events to the given channel.\nfunc (p *testTxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {\n\treturn p.txFeed.Subscribe(ch)\n}\n\n// testHandler is a live implementation of the Ethereum protocol handler, just\n// preinitialized with some sane testing defaults and the transaction pool mocked\n// out.\ntype testHandler struct {\n\tdb      ethdb.Database\n\tchain   *core.BlockChain\n\ttxpool  *testTxPool\n\thandler *handler\n}\n\n// newTestHandler creates a new handler for testing purposes with no blocks.\nfunc newTestHandler() *testHandler {\n\treturn newTestHandlerWithBlocks(0)\n}\n\n// newTestHandlerWithBlocks creates a new handler for testing purposes, with a\n// given number of initial blocks.\nfunc newTestHandlerWithBlocks(blocks int) *testHandler {\n\t// Create a database pre-initialize with a genesis block\n\tdb := rawdb.NewMemoryDatabase()\n\t(&core.Genesis{\n\t\tConfig: params.TestChainConfig,\n\t\tAlloc:  core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},\n\t}).MustCommit(db)\n\n\tchain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)\n\n\tbs, _ := core.GenerateChain(params.TestChainConfig, chain.Genesis(), ethash.NewFaker(), db, blocks, nil)\n\tif _, err := chain.InsertChain(bs); err != nil {\n\t\tpanic(err)\n\t}\n\ttxpool := newTestTxPool()\n\n\thandler, _ := newHandler(&handlerConfig{\n\t\tDatabase:   db,\n\t\tChain:      chain,\n\t\tTxPool:     txpool,\n\t\tNetwork:    1,\n\t\tSync:       downloader.FastSync,\n\t\tBloomCache: 1,\n\t})\n\thandler.Start(1000)\n\n\treturn &testHandler{\n\t\tdb:      db,\n\t\tchain:   chain,\n\t\ttxpool:  txpool,\n\t\thandler: handler,\n\t}\n}\n\n// close tears down the handler and all its internal constructs.\nfunc (b *testHandler) close() {\n\tb.handler.Stop()\n\tb.chain.Stop()\n}\n"
  },
  {
    "path": "eth/peer.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"math/big\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/eth/protocols/eth\"\n\t\"github.com/ethereum/go-ethereum/eth/protocols/snap\"\n)\n\n// ethPeerInfo represents a short summary of the `eth` sub-protocol metadata known\n// about a connected peer.\ntype ethPeerInfo struct {\n\tVersion    uint     `json:\"version\"`    // Ethereum protocol version negotiated\n\tDifficulty *big.Int `json:\"difficulty\"` // Total difficulty of the peer's blockchain\n\tHead       string   `json:\"head\"`       // Hex hash of the peer's best owned block\n}\n\n// ethPeer is a wrapper around eth.Peer to maintain a few extra metadata.\ntype ethPeer struct {\n\t*eth.Peer\n\tsnapExt *snapPeer // Satellite `snap` connection\n\n\tsyncDrop *time.Timer   // Connection dropper if `eth` sync progress isn't validated in time\n\tsnapWait chan struct{} // Notification channel for snap connections\n\tlock     sync.RWMutex  // Mutex protecting the internal fields\n}\n\n// info gathers and returns some `eth` protocol metadata known about a peer.\nfunc (p *ethPeer) info() *ethPeerInfo {\n\thash, td := p.Head()\n\n\treturn &ethPeerInfo{\n\t\tVersion:    p.Version(),\n\t\tDifficulty: td,\n\t\tHead:       hash.Hex(),\n\t}\n}\n\n// snapPeerInfo represents a short summary of the `snap` sub-protocol metadata known\n// about a connected peer.\ntype snapPeerInfo struct {\n\tVersion uint `json:\"version\"` // Snapshot protocol version negotiated\n}\n\n// snapPeer is a wrapper around snap.Peer to maintain a few extra metadata.\ntype snapPeer struct {\n\t*snap.Peer\n}\n\n// info gathers and returns some `snap` protocol metadata known about a peer.\nfunc (p *snapPeer) info() *snapPeerInfo {\n\treturn &snapPeerInfo{\n\t\tVersion: p.Version(),\n\t}\n}\n"
  },
  {
    "path": "eth/peerset.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"errors\"\n\t\"math/big\"\n\t\"sync\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/eth/protocols/eth\"\n\t\"github.com/ethereum/go-ethereum/eth/protocols/snap\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n)\n\nvar (\n\t// errPeerSetClosed is returned if a peer is attempted to be added or removed\n\t// from the peer set after it has been terminated.\n\terrPeerSetClosed = errors.New(\"peerset closed\")\n\n\t// errPeerAlreadyRegistered is returned if a peer is attempted to be added\n\t// to the peer set, but one with the same id already exists.\n\terrPeerAlreadyRegistered = errors.New(\"peer already registered\")\n\n\t// errPeerNotRegistered is returned if a peer is attempted to be removed from\n\t// a peer set, but no peer with the given id exists.\n\terrPeerNotRegistered = errors.New(\"peer not registered\")\n\n\t// errSnapWithoutEth is returned if a peer attempts to connect only on the\n\t// snap protocol without advertizing the eth main protocol.\n\terrSnapWithoutEth = errors.New(\"peer connected on snap without compatible eth support\")\n)\n\n// peerSet represents the collection of active peers currently participating in\n// the `eth` protocol, with or without the `snap` extension.\ntype peerSet struct {\n\tpeers     map[string]*ethPeer // Peers connected on the `eth` protocol\n\tsnapPeers int                 // Number of `snap` compatible peers for connection prioritization\n\n\tsnapWait map[string]chan *snap.Peer // Peers connected on `eth` waiting for their snap extension\n\tsnapPend map[string]*snap.Peer      // Peers connected on the `snap` protocol, but not yet on `eth`\n\n\tlock   sync.RWMutex\n\tclosed bool\n}\n\n// newPeerSet creates a new peer set to track the active participants.\nfunc newPeerSet() *peerSet {\n\treturn &peerSet{\n\t\tpeers:    make(map[string]*ethPeer),\n\t\tsnapWait: make(map[string]chan *snap.Peer),\n\t\tsnapPend: make(map[string]*snap.Peer),\n\t}\n}\n\n// registerSnapExtension unblocks an already connected `eth` peer waiting for its\n// `snap` extension, or if no such peer exists, tracks the extension for the time\n// being until the `eth` main protocol starts looking for it.\nfunc (ps *peerSet) registerSnapExtension(peer *snap.Peer) error {\n\t// Reject the peer if it advertises `snap` without `eth` as `snap` is only a\n\t// satellite protocol meaningful with the chain selection of `eth`\n\tif !peer.RunningCap(eth.ProtocolName, eth.ProtocolVersions) {\n\t\treturn errSnapWithoutEth\n\t}\n\t// Ensure nobody can double connect\n\tps.lock.Lock()\n\tdefer ps.lock.Unlock()\n\n\tid := peer.ID()\n\tif _, ok := ps.peers[id]; ok {\n\t\treturn errPeerAlreadyRegistered // avoid connections with the same id as existing ones\n\t}\n\tif _, ok := ps.snapPend[id]; ok {\n\t\treturn errPeerAlreadyRegistered // avoid connections with the same id as pending ones\n\t}\n\t// Inject the peer into an `eth` counterpart is available, otherwise save for later\n\tif wait, ok := ps.snapWait[id]; ok {\n\t\tdelete(ps.snapWait, id)\n\t\twait <- peer\n\t\treturn nil\n\t}\n\tps.snapPend[id] = peer\n\treturn nil\n}\n\n// waitExtensions blocks until all satellite protocols are connected and tracked\n// by the peerset.\nfunc (ps *peerSet) waitSnapExtension(peer *eth.Peer) (*snap.Peer, error) {\n\t// If the peer does not support a compatible `snap`, don't wait\n\tif !peer.RunningCap(snap.ProtocolName, snap.ProtocolVersions) {\n\t\treturn nil, nil\n\t}\n\t// Ensure nobody can double connect\n\tps.lock.Lock()\n\n\tid := peer.ID()\n\tif _, ok := ps.peers[id]; ok {\n\t\tps.lock.Unlock()\n\t\treturn nil, errPeerAlreadyRegistered // avoid connections with the same id as existing ones\n\t}\n\tif _, ok := ps.snapWait[id]; ok {\n\t\tps.lock.Unlock()\n\t\treturn nil, errPeerAlreadyRegistered // avoid connections with the same id as pending ones\n\t}\n\t// If `snap` already connected, retrieve the peer from the pending set\n\tif snap, ok := ps.snapPend[id]; ok {\n\t\tdelete(ps.snapPend, id)\n\n\t\tps.lock.Unlock()\n\t\treturn snap, nil\n\t}\n\t// Otherwise wait for `snap` to connect concurrently\n\twait := make(chan *snap.Peer)\n\tps.snapWait[id] = wait\n\tps.lock.Unlock()\n\n\treturn <-wait, nil\n}\n\n// registerPeer injects a new `eth` peer into the working set, or returns an error\n// if the peer is already known.\nfunc (ps *peerSet) registerPeer(peer *eth.Peer, ext *snap.Peer) error {\n\t// Start tracking the new peer\n\tps.lock.Lock()\n\tdefer ps.lock.Unlock()\n\n\tif ps.closed {\n\t\treturn errPeerSetClosed\n\t}\n\tid := peer.ID()\n\tif _, ok := ps.peers[id]; ok {\n\t\treturn errPeerAlreadyRegistered\n\t}\n\teth := &ethPeer{\n\t\tPeer: peer,\n\t}\n\tif ext != nil {\n\t\teth.snapExt = &snapPeer{ext}\n\t\tps.snapPeers++\n\t}\n\tps.peers[id] = eth\n\treturn nil\n}\n\n// unregisterPeer removes a remote peer from the active set, disabling any further\n// actions to/from that particular entity.\nfunc (ps *peerSet) unregisterPeer(id string) error {\n\tps.lock.Lock()\n\tdefer ps.lock.Unlock()\n\n\tpeer, ok := ps.peers[id]\n\tif !ok {\n\t\treturn errPeerNotRegistered\n\t}\n\tdelete(ps.peers, id)\n\tif peer.snapExt != nil {\n\t\tps.snapPeers--\n\t}\n\treturn nil\n}\n\n// peer retrieves the registered peer with the given id.\nfunc (ps *peerSet) peer(id string) *ethPeer {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\treturn ps.peers[id]\n}\n\n// peersWithoutBlock retrieves a list of peers that do not have a given block in\n// their set of known hashes so it might be propagated to them.\nfunc (ps *peerSet) peersWithoutBlock(hash common.Hash) []*ethPeer {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\tlist := make([]*ethPeer, 0, len(ps.peers))\n\tfor _, p := range ps.peers {\n\t\tif !p.KnownBlock(hash) {\n\t\t\tlist = append(list, p)\n\t\t}\n\t}\n\treturn list\n}\n\n// peersWithoutTransaction retrieves a list of peers that do not have a given\n// transaction in their set of known hashes.\nfunc (ps *peerSet) peersWithoutTransaction(hash common.Hash) []*ethPeer {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\tlist := make([]*ethPeer, 0, len(ps.peers))\n\tfor _, p := range ps.peers {\n\t\tif !p.KnownTransaction(hash) {\n\t\t\tlist = append(list, p)\n\t\t}\n\t}\n\treturn list\n}\n\n// len returns if the current number of `eth` peers in the set. Since the `snap`\n// peers are tied to the existence of an `eth` connection, that will always be a\n// subset of `eth`.\nfunc (ps *peerSet) len() int {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\treturn len(ps.peers)\n}\n\n// snapLen returns if the current number of `snap` peers in the set.\nfunc (ps *peerSet) snapLen() int {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\treturn ps.snapPeers\n}\n\n// peerWithHighestTD retrieves the known peer with the currently highest total\n// difficulty.\nfunc (ps *peerSet) peerWithHighestTD() *eth.Peer {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\tvar (\n\t\tbestPeer *eth.Peer\n\t\tbestTd   *big.Int\n\t)\n\tfor _, p := range ps.peers {\n\t\tif _, td := p.Head(); bestPeer == nil || td.Cmp(bestTd) > 0 {\n\t\t\tbestPeer, bestTd = p.Peer, td\n\t\t}\n\t}\n\treturn bestPeer\n}\n\n// close disconnects all peers.\nfunc (ps *peerSet) close() {\n\tps.lock.Lock()\n\tdefer ps.lock.Unlock()\n\n\tfor _, p := range ps.peers {\n\t\tp.Disconnect(p2p.DiscQuitting)\n\t}\n\tps.closed = true\n}\n"
  },
  {
    "path": "eth/protocols/eth/broadcast.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"math/big\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n)\n\nconst (\n\t// This is the target size for the packs of transactions or announcements. A\n\t// pack can get larger than this if a single transactions exceeds this size.\n\tmaxTxPacketSize = 100 * 1024\n)\n\n// blockPropagation is a block propagation event, waiting for its turn in the\n// broadcast queue.\ntype blockPropagation struct {\n\tblock *types.Block\n\ttd    *big.Int\n}\n\n// broadcastBlocks is a write loop that multiplexes blocks and block accouncements\n// to the remote peer. The goal is to have an async writer that does not lock up\n// node internals and at the same time rate limits queued data.\nfunc (p *Peer) broadcastBlocks() {\n\tfor {\n\t\tselect {\n\t\tcase prop := <-p.queuedBlocks:\n\t\t\tif err := p.SendNewBlock(prop.block, prop.td); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tp.Log().Trace(\"Propagated block\", \"number\", prop.block.Number(), \"hash\", prop.block.Hash(), \"td\", prop.td)\n\n\t\tcase block := <-p.queuedBlockAnns:\n\t\t\tif err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tp.Log().Trace(\"Announced block\", \"number\", block.Number(), \"hash\", block.Hash())\n\n\t\tcase <-p.term:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// broadcastTransactions is a write loop that schedules transaction broadcasts\n// to the remote peer. The goal is to have an async writer that does not lock up\n// node internals and at the same time rate limits queued data.\nfunc (p *Peer) broadcastTransactions() {\n\tvar (\n\t\tqueue  []common.Hash         // Queue of hashes to broadcast as full transactions\n\t\tdone   chan struct{}         // Non-nil if background broadcaster is running\n\t\tfail   = make(chan error, 1) // Channel used to receive network error\n\t\tfailed bool                  // Flag whether a send failed, discard everything onward\n\t)\n\tfor {\n\t\t// If there's no in-flight broadcast running, check if a new one is needed\n\t\tif done == nil && len(queue) > 0 {\n\t\t\t// Pile transaction until we reach our allowed network limit\n\t\t\tvar (\n\t\t\t\thashes []common.Hash\n\t\t\t\ttxs    []*types.Transaction\n\t\t\t\tsize   common.StorageSize\n\t\t\t)\n\t\t\tfor i := 0; i < len(queue) && size < maxTxPacketSize; i++ {\n\t\t\t\tif tx := p.txpool.Get(queue[i]); tx != nil {\n\t\t\t\t\ttxs = append(txs, tx)\n\t\t\t\t\tsize += tx.Size()\n\t\t\t\t}\n\t\t\t\thashes = append(hashes, queue[i])\n\t\t\t}\n\t\t\tqueue = queue[:copy(queue, queue[len(hashes):])]\n\n\t\t\t// If there's anything available to transfer, fire up an async writer\n\t\t\tif len(txs) > 0 {\n\t\t\t\tdone = make(chan struct{})\n\t\t\t\tgo func() {\n\t\t\t\t\tif err := p.SendTransactions(txs); err != nil {\n\t\t\t\t\t\tfail <- err\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tclose(done)\n\t\t\t\t\tp.Log().Trace(\"Sent transactions\", \"count\", len(txs))\n\t\t\t\t}()\n\t\t\t}\n\t\t}\n\t\t// Transfer goroutine may or may not have been started, listen for events\n\t\tselect {\n\t\tcase hashes := <-p.txBroadcast:\n\t\t\t// If the connection failed, discard all transaction events\n\t\t\tif failed {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// New batch of transactions to be broadcast, queue them (with cap)\n\t\t\tqueue = append(queue, hashes...)\n\t\t\tif len(queue) > maxQueuedTxs {\n\t\t\t\t// Fancy copy and resize to ensure buffer doesn't grow indefinitely\n\t\t\t\tqueue = queue[:copy(queue, queue[len(queue)-maxQueuedTxs:])]\n\t\t\t}\n\n\t\tcase <-done:\n\t\t\tdone = nil\n\n\t\tcase <-fail:\n\t\t\tfailed = true\n\n\t\tcase <-p.term:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// announceTransactions is a write loop that schedules transaction broadcasts\n// to the remote peer. The goal is to have an async writer that does not lock up\n// node internals and at the same time rate limits queued data.\nfunc (p *Peer) announceTransactions() {\n\tvar (\n\t\tqueue  []common.Hash         // Queue of hashes to announce as transaction stubs\n\t\tdone   chan struct{}         // Non-nil if background announcer is running\n\t\tfail   = make(chan error, 1) // Channel used to receive network error\n\t\tfailed bool                  // Flag whether a send failed, discard everything onward\n\t)\n\tfor {\n\t\t// If there's no in-flight announce running, check if a new one is needed\n\t\tif done == nil && len(queue) > 0 {\n\t\t\t// Pile transaction hashes until we reach our allowed network limit\n\t\t\tvar (\n\t\t\t\tcount   int\n\t\t\t\tpending []common.Hash\n\t\t\t\tsize    common.StorageSize\n\t\t\t)\n\t\t\tfor count = 0; count < len(queue) && size < maxTxPacketSize; count++ {\n\t\t\t\tif p.txpool.Get(queue[count]) != nil {\n\t\t\t\t\tpending = append(pending, queue[count])\n\t\t\t\t\tsize += common.HashLength\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Shift and trim queue\n\t\t\tqueue = queue[:copy(queue, queue[count:])]\n\n\t\t\t// If there's anything available to transfer, fire up an async writer\n\t\t\tif len(pending) > 0 {\n\t\t\t\tdone = make(chan struct{})\n\t\t\t\tgo func() {\n\t\t\t\t\tif err := p.sendPooledTransactionHashes(pending); err != nil {\n\t\t\t\t\t\tfail <- err\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tclose(done)\n\t\t\t\t\tp.Log().Trace(\"Sent transaction announcements\", \"count\", len(pending))\n\t\t\t\t}()\n\t\t\t}\n\t\t}\n\t\t// Transfer goroutine may or may not have been started, listen for events\n\t\tselect {\n\t\tcase hashes := <-p.txAnnounce:\n\t\t\t// If the connection failed, discard all transaction events\n\t\t\tif failed {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// New batch of transactions to be broadcast, queue them (with cap)\n\t\t\tqueue = append(queue, hashes...)\n\t\t\tif len(queue) > maxQueuedTxAnns {\n\t\t\t\t// Fancy copy and resize to ensure buffer doesn't grow indefinitely\n\t\t\t\tqueue = queue[:copy(queue, queue[len(queue)-maxQueuedTxAnns:])]\n\t\t\t}\n\n\t\tcase <-done:\n\t\t\tdone = nil\n\n\t\tcase <-fail:\n\t\t\tfailed = true\n\n\t\tcase <-p.term:\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "eth/protocols/eth/discovery.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/forkid\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\n// enrEntry is the ENR entry which advertises `eth` protocol on the discovery.\ntype enrEntry struct {\n\tForkID forkid.ID // Fork identifier per EIP-2124\n\n\t// Ignore additional fields (for forward compatibility).\n\tRest []rlp.RawValue `rlp:\"tail\"`\n}\n\n// ENRKey implements enr.Entry.\nfunc (e enrEntry) ENRKey() string {\n\treturn \"eth\"\n}\n\n// StartENRUpdater starts the `eth` ENR updater loop, which listens for chain\n// head events and updates the requested node record whenever a fork is passed.\nfunc StartENRUpdater(chain *core.BlockChain, ln *enode.LocalNode) {\n\tvar newHead = make(chan core.ChainHeadEvent, 10)\n\tsub := chain.SubscribeChainHeadEvent(newHead)\n\n\tgo func() {\n\t\tdefer sub.Unsubscribe()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-newHead:\n\t\t\t\tln.Set(currentENREntry(chain))\n\t\t\tcase <-sub.Err():\n\t\t\t\t// Would be nice to sync with Stop, but there is no\n\t\t\t\t// good way to do that.\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\n// currentENREntry constructs an `eth` ENR entry based on the current state of the chain.\nfunc currentENREntry(chain *core.BlockChain) *enrEntry {\n\treturn &enrEntry{\n\t\tForkID: forkid.NewID(chain.Config(), chain.Genesis().Hash(), chain.CurrentHeader().Number.Uint64()),\n\t}\n}\n"
  },
  {
    "path": "eth/protocols/eth/handler.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/metrics\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/enr\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\nconst (\n\t// softResponseLimit is the target maximum size of replies to data retrievals.\n\tsoftResponseLimit = 2 * 1024 * 1024\n\n\t// estHeaderSize is the approximate size of an RLP encoded block header.\n\testHeaderSize = 500\n\n\t// maxHeadersServe is the maximum number of block headers to serve. This number\n\t// is there to limit the number of disk lookups.\n\tmaxHeadersServe = 1024\n\n\t// maxBodiesServe is the maximum number of block bodies to serve. This number\n\t// is mostly there to limit the number of disk lookups. With 24KB block sizes\n\t// nowadays, the practical limit will always be softResponseLimit.\n\tmaxBodiesServe = 1024\n\n\t// maxNodeDataServe is the maximum number of state trie nodes to serve. This\n\t// number is there to limit the number of disk lookups.\n\tmaxNodeDataServe = 1024\n\n\t// maxReceiptsServe is the maximum number of block receipts to serve. This\n\t// number is mostly there to limit the number of disk lookups. With block\n\t// containing 200+ transactions nowadays, the practical limit will always\n\t// be softResponseLimit.\n\tmaxReceiptsServe = 1024\n)\n\n// Handler is a callback to invoke from an outside runner after the boilerplate\n// exchanges have passed.\ntype Handler func(peer *Peer) error\n\n// Backend defines the data retrieval methods to serve remote requests and the\n// callback methods to invoke on remote deliveries.\ntype Backend interface {\n\t// Chain retrieves the blockchain object to serve data.\n\tChain() *core.BlockChain\n\n\t// StateBloom retrieves the bloom filter - if any - for state trie nodes.\n\tStateBloom() *trie.SyncBloom\n\n\t// TxPool retrieves the transaction pool object to serve data.\n\tTxPool() TxPool\n\n\t// AcceptTxs retrieves whether transaction processing is enabled on the node\n\t// or if inbound transactions should simply be dropped.\n\tAcceptTxs() bool\n\n\t// RunPeer is invoked when a peer joins on the `eth` protocol. The handler\n\t// should do any peer maintenance work, handshakes and validations. If all\n\t// is passed, control should be given back to the `handler` to process the\n\t// inbound messages going forward.\n\tRunPeer(peer *Peer, handler Handler) error\n\n\t// PeerInfo retrieves all known `eth` information about a peer.\n\tPeerInfo(id enode.ID) interface{}\n\n\t// Handle is a callback to be invoked when a data packet is received from\n\t// the remote peer. Only packets not consumed by the protocol handler will\n\t// be forwarded to the backend.\n\tHandle(peer *Peer, packet Packet) error\n}\n\n// TxPool defines the methods needed by the protocol handler to serve transactions.\ntype TxPool interface {\n\t// Get retrieves the the transaction from the local txpool with the given hash.\n\tGet(hash common.Hash) *types.Transaction\n}\n\n// MakeProtocols constructs the P2P protocol definitions for `eth`.\nfunc MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol {\n\tprotocols := make([]p2p.Protocol, len(ProtocolVersions))\n\tfor i, version := range ProtocolVersions {\n\t\tversion := version // Closure\n\n\t\tprotocols[i] = p2p.Protocol{\n\t\t\tName:    ProtocolName,\n\t\t\tVersion: version,\n\t\t\tLength:  protocolLengths[version],\n\t\t\tRun: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {\n\t\t\t\tpeer := NewPeer(version, p, rw, backend.TxPool())\n\t\t\t\tdefer peer.Close()\n\n\t\t\t\treturn backend.RunPeer(peer, func(peer *Peer) error {\n\t\t\t\t\treturn Handle(backend, peer)\n\t\t\t\t})\n\t\t\t},\n\t\t\tNodeInfo: func() interface{} {\n\t\t\t\treturn nodeInfo(backend.Chain(), network)\n\t\t\t},\n\t\t\tPeerInfo: func(id enode.ID) interface{} {\n\t\t\t\treturn backend.PeerInfo(id)\n\t\t\t},\n\t\t\tAttributes:     []enr.Entry{currentENREntry(backend.Chain())},\n\t\t\tDialCandidates: dnsdisc,\n\t\t}\n\t}\n\treturn protocols\n}\n\n// NodeInfo represents a short summary of the `eth` sub-protocol metadata\n// known about the host peer.\ntype NodeInfo struct {\n\tNetwork    uint64              `json:\"network\"`    // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)\n\tDifficulty *big.Int            `json:\"difficulty\"` // Total difficulty of the host's blockchain\n\tGenesis    common.Hash         `json:\"genesis\"`    // SHA3 hash of the host's genesis block\n\tConfig     *params.ChainConfig `json:\"config\"`     // Chain configuration for the fork rules\n\tHead       common.Hash         `json:\"head\"`       // Hex hash of the host's best owned block\n}\n\n// nodeInfo retrieves some `eth` protocol metadata about the running host node.\nfunc nodeInfo(chain *core.BlockChain, network uint64) *NodeInfo {\n\thead := chain.CurrentBlock()\n\treturn &NodeInfo{\n\t\tNetwork:    network,\n\t\tDifficulty: chain.GetTd(head.Hash(), head.NumberU64()),\n\t\tGenesis:    chain.Genesis().Hash(),\n\t\tConfig:     chain.Config(),\n\t\tHead:       head.Hash(),\n\t}\n}\n\n// Handle is invoked whenever an `eth` connection is made that successfully passes\n// the protocol handshake. This method will keep processing messages until the\n// connection is torn down.\nfunc Handle(backend Backend, peer *Peer) error {\n\tfor {\n\t\tif err := handleMessage(backend, peer); err != nil {\n\t\t\tpeer.Log().Debug(\"Message handling failed in `eth`\", \"err\", err)\n\t\t\treturn err\n\t\t}\n\t}\n}\n\ntype msgHandler func(backend Backend, msg Decoder, peer *Peer) error\ntype Decoder interface {\n\tDecode(val interface{}) error\n\tTime() time.Time\n}\n\nvar eth64 = map[uint64]msgHandler{\n\tGetBlockHeadersMsg: handleGetBlockHeaders,\n\tBlockHeadersMsg:    handleBlockHeaders,\n\tGetBlockBodiesMsg:  handleGetBlockBodies,\n\tBlockBodiesMsg:     handleBlockBodies,\n\tGetNodeDataMsg:     handleGetNodeData,\n\tNodeDataMsg:        handleNodeData,\n\tGetReceiptsMsg:     handleGetReceipts,\n\tReceiptsMsg:        handleReceipts,\n\tNewBlockHashesMsg:  handleNewBlockhashes,\n\tNewBlockMsg:        handleNewBlock,\n\tTransactionsMsg:    handleTransactions,\n}\nvar eth65 = map[uint64]msgHandler{\n\t// old 64 messages\n\tGetBlockHeadersMsg: handleGetBlockHeaders,\n\tBlockHeadersMsg:    handleBlockHeaders,\n\tGetBlockBodiesMsg:  handleGetBlockBodies,\n\tBlockBodiesMsg:     handleBlockBodies,\n\tGetNodeDataMsg:     handleGetNodeData,\n\tNodeDataMsg:        handleNodeData,\n\tGetReceiptsMsg:     handleGetReceipts,\n\tReceiptsMsg:        handleReceipts,\n\tNewBlockHashesMsg:  handleNewBlockhashes,\n\tNewBlockMsg:        handleNewBlock,\n\tTransactionsMsg:    handleTransactions,\n\t// New eth65 messages\n\tNewPooledTransactionHashesMsg: handleNewPooledTransactionHashes,\n\tGetPooledTransactionsMsg:      handleGetPooledTransactions,\n\tPooledTransactionsMsg:         handlePooledTransactions,\n}\n\nvar eth66 = map[uint64]msgHandler{\n\t// eth64 announcement messages (no id)\n\tNewBlockHashesMsg: handleNewBlockhashes,\n\tNewBlockMsg:       handleNewBlock,\n\tTransactionsMsg:   handleTransactions,\n\t// eth65 announcement messages (no id)\n\tNewPooledTransactionHashesMsg: handleNewPooledTransactionHashes,\n\t// eth66 messages with request-id\n\tGetBlockHeadersMsg:       handleGetBlockHeaders66,\n\tBlockHeadersMsg:          handleBlockHeaders66,\n\tGetBlockBodiesMsg:        handleGetBlockBodies66,\n\tBlockBodiesMsg:           handleBlockBodies66,\n\tGetNodeDataMsg:           handleGetNodeData66,\n\tNodeDataMsg:              handleNodeData66,\n\tGetReceiptsMsg:           handleGetReceipts66,\n\tReceiptsMsg:              handleReceipts66,\n\tGetPooledTransactionsMsg: handleGetPooledTransactions66,\n\tPooledTransactionsMsg:    handlePooledTransactions66,\n}\n\n// handleMessage is invoked whenever an inbound message is received from a remote\n// peer. The remote connection is torn down upon returning any error.\nfunc handleMessage(backend Backend, peer *Peer) error {\n\t// Read the next message from the remote peer, and ensure it's fully consumed\n\tmsg, err := peer.rw.ReadMsg()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif msg.Size > maxMessageSize {\n\t\treturn fmt.Errorf(\"%w: %v > %v\", errMsgTooLarge, msg.Size, maxMessageSize)\n\t}\n\tdefer msg.Discard()\n\n\tvar handlers = eth64\n\tif peer.Version() == ETH65 {\n\t\thandlers = eth65\n\t} else if peer.Version() >= ETH66 {\n\t\thandlers = eth66\n\t}\n\t// Track the emount of time it takes to serve the request and run the handler\n\tif metrics.Enabled {\n\t\th := fmt.Sprintf(\"%s/%s/%d/%#02x\", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)\n\t\tdefer func(start time.Time) {\n\t\t\tsampler := func() metrics.Sample {\n\t\t\t\treturn metrics.ResettingSample(\n\t\t\t\t\tmetrics.NewExpDecaySample(1028, 0.015),\n\t\t\t\t)\n\t\t\t}\n\t\t\tmetrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds())\n\t\t}(time.Now())\n\t}\n\tif handler := handlers[msg.Code]; handler != nil {\n\t\treturn handler(backend, msg, peer)\n\t}\n\treturn fmt.Errorf(\"%w: %v\", errInvalidMsgCode, msg.Code)\n}\n"
  },
  {
    "path": "eth/protocols/eth/handler_test.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"math\"\n\t\"math/big\"\n\t\"math/rand\"\n\t\"testing\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\nvar (\n\t// testKey is a private key to use for funding a tester account.\n\ttestKey, _ = crypto.HexToECDSA(\"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291\")\n\n\t// testAddr is the Ethereum address of the tester account.\n\ttestAddr = crypto.PubkeyToAddress(testKey.PublicKey)\n)\n\n// testBackend is a mock implementation of the live Ethereum message handler. Its\n// purpose is to allow testing the request/reply workflows and wire serialization\n// in the `eth` protocol without actually doing any data processing.\ntype testBackend struct {\n\tdb     ethdb.Database\n\tchain  *core.BlockChain\n\ttxpool *core.TxPool\n}\n\n// newTestBackend creates an empty chain and wraps it into a mock backend.\nfunc newTestBackend(blocks int) *testBackend {\n\treturn newTestBackendWithGenerator(blocks, nil)\n}\n\n// newTestBackend creates a chain with a number of explicitly defined blocks and\n// wraps it into a mock backend.\nfunc newTestBackendWithGenerator(blocks int, generator func(int, *core.BlockGen)) *testBackend {\n\t// Create a database pre-initialize with a genesis block\n\tdb := rawdb.NewMemoryDatabase()\n\t(&core.Genesis{\n\t\tConfig: params.TestChainConfig,\n\t\tAlloc:  core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}},\n\t}).MustCommit(db)\n\n\tchain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil)\n\n\tbs, _ := core.GenerateChain(params.TestChainConfig, chain.Genesis(), ethash.NewFaker(), db, blocks, generator)\n\tif _, err := chain.InsertChain(bs); err != nil {\n\t\tpanic(err)\n\t}\n\ttxconfig := core.DefaultTxPoolConfig\n\ttxconfig.Journal = \"\" // Don't litter the disk with test journals\n\n\treturn &testBackend{\n\t\tdb:     db,\n\t\tchain:  chain,\n\t\ttxpool: core.NewTxPool(txconfig, params.TestChainConfig, chain),\n\t}\n}\n\n// close tears down the transaction pool and chain behind the mock backend.\nfunc (b *testBackend) close() {\n\tb.txpool.Stop()\n\tb.chain.Stop()\n}\n\nfunc (b *testBackend) Chain() *core.BlockChain     { return b.chain }\nfunc (b *testBackend) StateBloom() *trie.SyncBloom { return nil }\nfunc (b *testBackend) TxPool() TxPool              { return b.txpool }\n\nfunc (b *testBackend) RunPeer(peer *Peer, handler Handler) error {\n\t// Normally the backend would do peer mainentance and handshakes. All that\n\t// is omitted and we will just give control back to the handler.\n\treturn handler(peer)\n}\nfunc (b *testBackend) PeerInfo(enode.ID) interface{} { panic(\"not implemented\") }\n\nfunc (b *testBackend) AcceptTxs() bool {\n\tpanic(\"data processing tests should be done in the handler package\")\n}\nfunc (b *testBackend) Handle(*Peer, Packet) error {\n\tpanic(\"data processing tests should be done in the handler package\")\n}\n\n// Tests that block headers can be retrieved from a remote chain based on user queries.\nfunc TestGetBlockHeaders64(t *testing.T) { testGetBlockHeaders(t, 64) }\nfunc TestGetBlockHeaders65(t *testing.T) { testGetBlockHeaders(t, 65) }\n\nfunc testGetBlockHeaders(t *testing.T, protocol uint) {\n\tt.Parallel()\n\n\tbackend := newTestBackend(maxHeadersServe + 15)\n\tdefer backend.close()\n\n\tpeer, _ := newTestPeer(\"peer\", protocol, backend)\n\tdefer peer.close()\n\n\t// Create a \"random\" unknown hash for testing\n\tvar unknown common.Hash\n\tfor i := range unknown {\n\t\tunknown[i] = byte(i)\n\t}\n\t// Create a batch of tests for various scenarios\n\tlimit := uint64(maxHeadersServe)\n\ttests := []struct {\n\t\tquery  *GetBlockHeadersPacket // The query to execute for header retrieval\n\t\texpect []common.Hash          // The hashes of the block whose headers are expected\n\t}{\n\t\t// A single random block should be retrievable by hash and number too\n\t\t{\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},\n\t\t\t[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},\n\t\t}, {\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 1},\n\t\t\t[]common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()},\n\t\t},\n\t\t// Multiple headers should be retrievable in both directions\n\t\t{\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3},\n\t\t\t[]common.Hash{\n\t\t\t\tbackend.chain.GetBlockByNumber(limit / 2).Hash(),\n\t\t\t\tbackend.chain.GetBlockByNumber(limit/2 + 1).Hash(),\n\t\t\t\tbackend.chain.GetBlockByNumber(limit/2 + 2).Hash(),\n\t\t\t},\n\t\t}, {\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},\n\t\t\t[]common.Hash{\n\t\t\t\tbackend.chain.GetBlockByNumber(limit / 2).Hash(),\n\t\t\t\tbackend.chain.GetBlockByNumber(limit/2 - 1).Hash(),\n\t\t\t\tbackend.chain.GetBlockByNumber(limit/2 - 2).Hash(),\n\t\t\t},\n\t\t},\n\t\t// Multiple headers with skip lists should be retrievable\n\t\t{\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},\n\t\t\t[]common.Hash{\n\t\t\t\tbackend.chain.GetBlockByNumber(limit / 2).Hash(),\n\t\t\t\tbackend.chain.GetBlockByNumber(limit/2 + 4).Hash(),\n\t\t\t\tbackend.chain.GetBlockByNumber(limit/2 + 8).Hash(),\n\t\t\t},\n\t\t}, {\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},\n\t\t\t[]common.Hash{\n\t\t\t\tbackend.chain.GetBlockByNumber(limit / 2).Hash(),\n\t\t\t\tbackend.chain.GetBlockByNumber(limit/2 - 4).Hash(),\n\t\t\t\tbackend.chain.GetBlockByNumber(limit/2 - 8).Hash(),\n\t\t\t},\n\t\t},\n\t\t// The chain endpoints should be retrievable\n\t\t{\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 0}, Amount: 1},\n\t\t\t[]common.Hash{backend.chain.GetBlockByNumber(0).Hash()},\n\t\t}, {\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64()}, Amount: 1},\n\t\t\t[]common.Hash{backend.chain.CurrentBlock().Hash()},\n\t\t},\n\t\t// Ensure protocol limits are honored\n\t\t{\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true},\n\t\t\tbackend.chain.GetBlockHashesFromHash(backend.chain.CurrentBlock().Hash(), limit),\n\t\t},\n\t\t// Check that requesting more than available is handled gracefully\n\t\t{\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3},\n\t\t\t[]common.Hash{\n\t\t\t\tbackend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64() - 4).Hash(),\n\t\t\t\tbackend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64()).Hash(),\n\t\t\t},\n\t\t}, {\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},\n\t\t\t[]common.Hash{\n\t\t\t\tbackend.chain.GetBlockByNumber(4).Hash(),\n\t\t\t\tbackend.chain.GetBlockByNumber(0).Hash(),\n\t\t\t},\n\t\t},\n\t\t// Check that requesting more than available is handled gracefully, even if mid skip\n\t\t{\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3},\n\t\t\t[]common.Hash{\n\t\t\t\tbackend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64() - 4).Hash(),\n\t\t\t\tbackend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64() - 1).Hash(),\n\t\t\t},\n\t\t}, {\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},\n\t\t\t[]common.Hash{\n\t\t\t\tbackend.chain.GetBlockByNumber(4).Hash(),\n\t\t\t\tbackend.chain.GetBlockByNumber(1).Hash(),\n\t\t\t},\n\t\t},\n\t\t// Check a corner case where requesting more can iterate past the endpoints\n\t\t{\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true},\n\t\t\t[]common.Hash{\n\t\t\t\tbackend.chain.GetBlockByNumber(2).Hash(),\n\t\t\t\tbackend.chain.GetBlockByNumber(1).Hash(),\n\t\t\t\tbackend.chain.GetBlockByNumber(0).Hash(),\n\t\t\t},\n\t\t},\n\t\t// Check a corner case where skipping overflow loops back into the chain start\n\t\t{\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1},\n\t\t\t[]common.Hash{\n\t\t\t\tbackend.chain.GetBlockByNumber(3).Hash(),\n\t\t\t},\n\t\t},\n\t\t// Check a corner case where skipping overflow loops back to the same header\n\t\t{\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64},\n\t\t\t[]common.Hash{\n\t\t\t\tbackend.chain.GetBlockByNumber(1).Hash(),\n\t\t\t},\n\t\t},\n\t\t// Check that non existing headers aren't returned\n\t\t{\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1},\n\t\t\t[]common.Hash{},\n\t\t}, {\n\t\t\t&GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() + 1}, Amount: 1},\n\t\t\t[]common.Hash{},\n\t\t},\n\t}\n\t// Run each of the tests and verify the results against the chain\n\tfor i, tt := range tests {\n\t\t// Collect the headers to expect in the response\n\t\tvar headers []*types.Header\n\t\tfor _, hash := range tt.expect {\n\t\t\theaders = append(headers, backend.chain.GetBlockByHash(hash).Header())\n\t\t}\n\t\t// Send the hash request and verify the response\n\t\tp2p.Send(peer.app, GetBlockHeadersMsg, tt.query)\n\t\tif err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, headers); err != nil {\n\t\t\tt.Errorf(\"test %d: headers mismatch: %v\", i, err)\n\t\t}\n\t\t// If the test used number origins, repeat with hashes as the too\n\t\tif tt.query.Origin.Hash == (common.Hash{}) {\n\t\t\tif origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil {\n\t\t\t\ttt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0\n\n\t\t\t\tp2p.Send(peer.app, GetBlockHeadersMsg, tt.query)\n\t\t\t\tif err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, headers); err != nil {\n\t\t\t\t\tt.Errorf(\"test %d: headers mismatch: %v\", i, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Tests that block contents can be retrieved from a remote chain based on their hashes.\nfunc TestGetBlockBodies64(t *testing.T) { testGetBlockBodies(t, 64) }\nfunc TestGetBlockBodies65(t *testing.T) { testGetBlockBodies(t, 65) }\n\nfunc testGetBlockBodies(t *testing.T, protocol uint) {\n\tt.Parallel()\n\n\tbackend := newTestBackend(maxBodiesServe + 15)\n\tdefer backend.close()\n\n\tpeer, _ := newTestPeer(\"peer\", protocol, backend)\n\tdefer peer.close()\n\n\t// Create a batch of tests for various scenarios\n\tlimit := maxBodiesServe\n\ttests := []struct {\n\t\trandom    int           // Number of blocks to fetch randomly from the chain\n\t\texplicit  []common.Hash // Explicitly requested blocks\n\t\tavailable []bool        // Availability of explicitly requested blocks\n\t\texpected  int           // Total number of existing blocks to expect\n\t}{\n\t\t{1, nil, nil, 1},             // A single random block should be retrievable\n\t\t{10, nil, nil, 10},           // Multiple random blocks should be retrievable\n\t\t{limit, nil, nil, limit},     // The maximum possible blocks should be retrievable\n\t\t{limit + 1, nil, nil, limit}, // No more than the possible block count should be returned\n\t\t{0, []common.Hash{backend.chain.Genesis().Hash()}, []bool{true}, 1},      // The genesis block should be retrievable\n\t\t{0, []common.Hash{backend.chain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable\n\t\t{0, []common.Hash{{}}, []bool{false}, 0},                                 // A non existent block should not be returned\n\n\t\t// Existing and non-existing blocks interleaved should not cause problems\n\t\t{0, []common.Hash{\n\t\t\t{},\n\t\t\tbackend.chain.GetBlockByNumber(1).Hash(),\n\t\t\t{},\n\t\t\tbackend.chain.GetBlockByNumber(10).Hash(),\n\t\t\t{},\n\t\t\tbackend.chain.GetBlockByNumber(100).Hash(),\n\t\t\t{},\n\t\t}, []bool{false, true, false, true, false, true, false}, 3},\n\t}\n\t// Run each of the tests and verify the results against the chain\n\tfor i, tt := range tests {\n\t\t// Collect the hashes to request, and the response to expectva\n\t\tvar (\n\t\t\thashes []common.Hash\n\t\t\tbodies []*BlockBody\n\t\t\tseen   = make(map[int64]bool)\n\t\t)\n\t\tfor j := 0; j < tt.random; j++ {\n\t\t\tfor {\n\t\t\t\tnum := rand.Int63n(int64(backend.chain.CurrentBlock().NumberU64()))\n\t\t\t\tif !seen[num] {\n\t\t\t\t\tseen[num] = true\n\n\t\t\t\t\tblock := backend.chain.GetBlockByNumber(uint64(num))\n\t\t\t\t\thashes = append(hashes, block.Hash())\n\t\t\t\t\tif len(bodies) < tt.expected {\n\t\t\t\t\t\tbodies = append(bodies, &BlockBody{Transactions: block.Transactions(), Uncles: block.Uncles()})\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor j, hash := range tt.explicit {\n\t\t\thashes = append(hashes, hash)\n\t\t\tif tt.available[j] && len(bodies) < tt.expected {\n\t\t\t\tblock := backend.chain.GetBlockByHash(hash)\n\t\t\t\tbodies = append(bodies, &BlockBody{Transactions: block.Transactions(), Uncles: block.Uncles()})\n\t\t\t}\n\t\t}\n\t\t// Send the hash request and verify the response\n\t\tp2p.Send(peer.app, GetBlockBodiesMsg, hashes)\n\t\tif err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, bodies); err != nil {\n\t\t\tt.Errorf(\"test %d: bodies mismatch: %v\", i, err)\n\t\t}\n\t}\n}\n\n// Tests that the state trie nodes can be retrieved based on hashes.\nfunc TestGetNodeData64(t *testing.T) { testGetNodeData(t, 64) }\nfunc TestGetNodeData65(t *testing.T) { testGetNodeData(t, 65) }\n\nfunc testGetNodeData(t *testing.T, protocol uint) {\n\tt.Parallel()\n\n\t// Define three accounts to simulate transactions with\n\tacc1Key, _ := crypto.HexToECDSA(\"8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a\")\n\tacc2Key, _ := crypto.HexToECDSA(\"49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee\")\n\tacc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)\n\tacc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)\n\n\tsigner := types.HomesteadSigner{}\n\t// Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test)\n\tgenerator := func(i int, block *core.BlockGen) {\n\t\tswitch i {\n\t\tcase 0:\n\t\t\t// In block 1, the test bank sends account #1 some ether.\n\t\t\ttx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testKey)\n\t\t\tblock.AddTx(tx)\n\t\tcase 1:\n\t\t\t// In block 2, the test bank sends some more ether to account #1.\n\t\t\t// acc1Addr passes it on to account #2.\n\t\t\ttx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)\n\t\t\ttx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key)\n\t\t\tblock.AddTx(tx1)\n\t\t\tblock.AddTx(tx2)\n\t\tcase 2:\n\t\t\t// Block 3 is empty but was mined by account #2.\n\t\t\tblock.SetCoinbase(acc2Addr)\n\t\t\tblock.SetExtra([]byte(\"yeehaw\"))\n\t\tcase 3:\n\t\t\t// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).\n\t\t\tb2 := block.PrevBlock(1).Header()\n\t\t\tb2.Extra = []byte(\"foo\")\n\t\t\tblock.AddUncle(b2)\n\t\t\tb3 := block.PrevBlock(2).Header()\n\t\t\tb3.Extra = []byte(\"foo\")\n\t\t\tblock.AddUncle(b3)\n\t\t}\n\t}\n\t// Assemble the test environment\n\tbackend := newTestBackendWithGenerator(4, generator)\n\tdefer backend.close()\n\n\tpeer, _ := newTestPeer(\"peer\", protocol, backend)\n\tdefer peer.close()\n\n\t// Fetch for now the entire chain db\n\tvar hashes []common.Hash\n\n\tit := backend.db.NewIterator(nil, nil)\n\tfor it.Next() {\n\t\tif key := it.Key(); len(key) == common.HashLength {\n\t\t\thashes = append(hashes, common.BytesToHash(key))\n\t\t}\n\t}\n\tit.Release()\n\n\tp2p.Send(peer.app, GetNodeDataMsg, hashes)\n\tmsg, err := peer.app.ReadMsg()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to read node data response: %v\", err)\n\t}\n\tif msg.Code != NodeDataMsg {\n\t\tt.Fatalf(\"response packet code mismatch: have %x, want %x\", msg.Code, NodeDataMsg)\n\t}\n\tvar data [][]byte\n\tif err := msg.Decode(&data); err != nil {\n\t\tt.Fatalf(\"failed to decode response node data: %v\", err)\n\t}\n\t// Verify that all hashes correspond to the requested data, and reconstruct a state tree\n\tfor i, want := range hashes {\n\t\tif hash := crypto.Keccak256Hash(data[i]); hash != want {\n\t\t\tt.Errorf(\"data hash mismatch: have %x, want %x\", hash, want)\n\t\t}\n\t}\n\tstatedb := rawdb.NewMemoryDatabase()\n\tfor i := 0; i < len(data); i++ {\n\t\tstatedb.Put(hashes[i].Bytes(), data[i])\n\t}\n\taccounts := []common.Address{testAddr, acc1Addr, acc2Addr}\n\tfor i := uint64(0); i <= backend.chain.CurrentBlock().NumberU64(); i++ {\n\t\ttrie, _ := state.New(backend.chain.GetBlockByNumber(i).Root(), state.NewDatabase(statedb), nil)\n\n\t\tfor j, acc := range accounts {\n\t\t\tstate, _ := backend.chain.State()\n\t\t\tbw := state.GetBalance(acc)\n\t\t\tbh := trie.GetBalance(acc)\n\n\t\t\tif (bw != nil && bh == nil) || (bw == nil && bh != nil) {\n\t\t\t\tt.Errorf(\"test %d, account %d: balance mismatch: have %v, want %v\", i, j, bh, bw)\n\t\t\t}\n\t\t\tif bw != nil && bh != nil && bw.Cmp(bw) != 0 {\n\t\t\t\tt.Errorf(\"test %d, account %d: balance mismatch: have %v, want %v\", i, j, bh, bw)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Tests that the transaction receipts can be retrieved based on hashes.\nfunc TestGetBlockReceipts64(t *testing.T) { testGetBlockReceipts(t, 64) }\nfunc TestGetBlockReceipts65(t *testing.T) { testGetBlockReceipts(t, 65) }\n\nfunc testGetBlockReceipts(t *testing.T, protocol uint) {\n\tt.Parallel()\n\n\t// Define three accounts to simulate transactions with\n\tacc1Key, _ := crypto.HexToECDSA(\"8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a\")\n\tacc2Key, _ := crypto.HexToECDSA(\"49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee\")\n\tacc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey)\n\tacc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey)\n\n\tsigner := types.HomesteadSigner{}\n\t// Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test)\n\tgenerator := func(i int, block *core.BlockGen) {\n\t\tswitch i {\n\t\tcase 0:\n\t\t\t// In block 1, the test bank sends account #1 some ether.\n\t\t\ttx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testKey)\n\t\t\tblock.AddTx(tx)\n\t\tcase 1:\n\t\t\t// In block 2, the test bank sends some more ether to account #1.\n\t\t\t// acc1Addr passes it on to account #2.\n\t\t\ttx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey)\n\t\t\ttx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key)\n\t\t\tblock.AddTx(tx1)\n\t\t\tblock.AddTx(tx2)\n\t\tcase 2:\n\t\t\t// Block 3 is empty but was mined by account #2.\n\t\t\tblock.SetCoinbase(acc2Addr)\n\t\t\tblock.SetExtra([]byte(\"yeehaw\"))\n\t\tcase 3:\n\t\t\t// Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data).\n\t\t\tb2 := block.PrevBlock(1).Header()\n\t\t\tb2.Extra = []byte(\"foo\")\n\t\t\tblock.AddUncle(b2)\n\t\t\tb3 := block.PrevBlock(2).Header()\n\t\t\tb3.Extra = []byte(\"foo\")\n\t\t\tblock.AddUncle(b3)\n\t\t}\n\t}\n\t// Assemble the test environment\n\tbackend := newTestBackendWithGenerator(4, generator)\n\tdefer backend.close()\n\n\tpeer, _ := newTestPeer(\"peer\", protocol, backend)\n\tdefer peer.close()\n\n\t// Collect the hashes to request, and the response to expect\n\tvar (\n\t\thashes   []common.Hash\n\t\treceipts []types.Receipts\n\t)\n\tfor i := uint64(0); i <= backend.chain.CurrentBlock().NumberU64(); i++ {\n\t\tblock := backend.chain.GetBlockByNumber(i)\n\n\t\thashes = append(hashes, block.Hash())\n\t\treceipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash()))\n\t}\n\t// Send the hash request and verify the response\n\tp2p.Send(peer.app, GetReceiptsMsg, hashes)\n\tif err := p2p.ExpectMsg(peer.app, ReceiptsMsg, receipts); err != nil {\n\t\tt.Errorf(\"receipts mismatch: %v\", err)\n\t}\n}\n"
  },
  {
    "path": "eth/protocols/eth/handlers.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\n// handleGetBlockHeaders handles Block header query, collect the requested headers and reply\nfunc handleGetBlockHeaders(backend Backend, msg Decoder, peer *Peer) error {\n\t// Decode the complex header query\n\tvar query GetBlockHeadersPacket\n\tif err := msg.Decode(&query); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\tresponse := answerGetBlockHeadersQuery(backend, &query, peer)\n\treturn peer.SendBlockHeaders(response)\n}\n\n// handleGetBlockHeaders66 is the eth/66 version of handleGetBlockHeaders\nfunc handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {\n\t// Decode the complex header query\n\tvar query GetBlockHeadersPacket66\n\tif err := msg.Decode(&query); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\tresponse := answerGetBlockHeadersQuery(backend, query.GetBlockHeadersPacket, peer)\n\treturn peer.ReplyBlockHeaders(query.RequestId, response)\n}\n\nfunc answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, peer *Peer) []*types.Header {\n\thashMode := query.Origin.Hash != (common.Hash{})\n\tfirst := true\n\tmaxNonCanonical := uint64(100)\n\n\t// Gather headers until the fetch or network limits is reached\n\tvar (\n\t\tbytes   common.StorageSize\n\t\theaders []*types.Header\n\t\tunknown bool\n\t\tlookups int\n\t)\n\tfor !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit &&\n\t\tlen(headers) < maxHeadersServe && lookups < 2*maxHeadersServe {\n\t\tlookups++\n\t\t// Retrieve the next header satisfying the query\n\t\tvar origin *types.Header\n\t\tif hashMode {\n\t\t\tif first {\n\t\t\t\tfirst = false\n\t\t\t\torigin = backend.Chain().GetHeaderByHash(query.Origin.Hash)\n\t\t\t\tif origin != nil {\n\t\t\t\t\tquery.Origin.Number = origin.Number.Uint64()\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\torigin = backend.Chain().GetHeader(query.Origin.Hash, query.Origin.Number)\n\t\t\t}\n\t\t} else {\n\t\t\torigin = backend.Chain().GetHeaderByNumber(query.Origin.Number)\n\t\t}\n\t\tif origin == nil {\n\t\t\tbreak\n\t\t}\n\t\theaders = append(headers, origin)\n\t\tbytes += estHeaderSize\n\n\t\t// Advance to the next header of the query\n\t\tswitch {\n\t\tcase hashMode && query.Reverse:\n\t\t\t// Hash based traversal towards the genesis block\n\t\t\tancestor := query.Skip + 1\n\t\t\tif ancestor == 0 {\n\t\t\t\tunknown = true\n\t\t\t} else {\n\t\t\t\tquery.Origin.Hash, query.Origin.Number = backend.Chain().GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical)\n\t\t\t\tunknown = (query.Origin.Hash == common.Hash{})\n\t\t\t}\n\t\tcase hashMode && !query.Reverse:\n\t\t\t// Hash based traversal towards the leaf block\n\t\t\tvar (\n\t\t\t\tcurrent = origin.Number.Uint64()\n\t\t\t\tnext    = current + query.Skip + 1\n\t\t\t)\n\t\t\tif next <= current {\n\t\t\t\tinfos, _ := json.MarshalIndent(peer.Peer.Info(), \"\", \"  \")\n\t\t\t\tpeer.Log().Warn(\"GetBlockHeaders skip overflow attack\", \"current\", current, \"skip\", query.Skip, \"next\", next, \"attacker\", infos)\n\t\t\t\tunknown = true\n\t\t\t} else {\n\t\t\t\tif header := backend.Chain().GetHeaderByNumber(next); header != nil {\n\t\t\t\t\tnextHash := header.Hash()\n\t\t\t\t\texpOldHash, _ := backend.Chain().GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical)\n\t\t\t\t\tif expOldHash == query.Origin.Hash {\n\t\t\t\t\t\tquery.Origin.Hash, query.Origin.Number = nextHash, next\n\t\t\t\t\t} else {\n\t\t\t\t\t\tunknown = true\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tunknown = true\n\t\t\t\t}\n\t\t\t}\n\t\tcase query.Reverse:\n\t\t\t// Number based traversal towards the genesis block\n\t\t\tif query.Origin.Number >= query.Skip+1 {\n\t\t\t\tquery.Origin.Number -= query.Skip + 1\n\t\t\t} else {\n\t\t\t\tunknown = true\n\t\t\t}\n\n\t\tcase !query.Reverse:\n\t\t\t// Number based traversal towards the leaf block\n\t\t\tquery.Origin.Number += query.Skip + 1\n\t\t}\n\t}\n\treturn headers\n}\n\nfunc handleGetBlockBodies(backend Backend, msg Decoder, peer *Peer) error {\n\t// Decode the block body retrieval message\n\tvar query GetBlockBodiesPacket\n\tif err := msg.Decode(&query); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\tresponse := answerGetBlockBodiesQuery(backend, query, peer)\n\treturn peer.SendBlockBodiesRLP(response)\n}\n\nfunc handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {\n\t// Decode the block body retrieval message\n\tvar query GetBlockBodiesPacket66\n\tif err := msg.Decode(&query); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\tresponse := answerGetBlockBodiesQuery(backend, query.GetBlockBodiesPacket, peer)\n\treturn peer.ReplyBlockBodiesRLP(query.RequestId, response)\n}\n\nfunc answerGetBlockBodiesQuery(backend Backend, query GetBlockBodiesPacket, peer *Peer) []rlp.RawValue {\n\t// Gather blocks until the fetch or network limits is reached\n\tvar (\n\t\tbytes  int\n\t\tbodies []rlp.RawValue\n\t)\n\tfor lookups, hash := range query {\n\t\tif bytes >= softResponseLimit || len(bodies) >= maxBodiesServe ||\n\t\t\tlookups >= 2*maxBodiesServe {\n\t\t\tbreak\n\t\t}\n\t\tif data := backend.Chain().GetBodyRLP(hash); len(data) != 0 {\n\t\t\tbodies = append(bodies, data)\n\t\t\tbytes += len(data)\n\t\t}\n\t}\n\treturn bodies\n}\n\nfunc handleGetNodeData(backend Backend, msg Decoder, peer *Peer) error {\n\t// Decode the trie node data retrieval message\n\tvar query GetNodeDataPacket\n\tif err := msg.Decode(&query); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\tresponse := answerGetNodeDataQuery(backend, query, peer)\n\treturn peer.SendNodeData(response)\n}\n\nfunc handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error {\n\t// Decode the trie node data retrieval message\n\tvar query GetNodeDataPacket66\n\tif err := msg.Decode(&query); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\tresponse := answerGetNodeDataQuery(backend, query.GetNodeDataPacket, peer)\n\treturn peer.ReplyNodeData(query.RequestId, response)\n}\n\nfunc answerGetNodeDataQuery(backend Backend, query GetNodeDataPacket, peer *Peer) [][]byte {\n\t// Gather state data until the fetch or network limits is reached\n\tvar (\n\t\tbytes int\n\t\tnodes [][]byte\n\t)\n\tfor lookups, hash := range query {\n\t\tif bytes >= softResponseLimit || len(nodes) >= maxNodeDataServe ||\n\t\t\tlookups >= 2*maxNodeDataServe {\n\t\t\tbreak\n\t\t}\n\t\t// Retrieve the requested state entry\n\t\tif bloom := backend.StateBloom(); bloom != nil && !bloom.Contains(hash[:]) {\n\t\t\t// Only lookup the trie node if there's chance that we actually have it\n\t\t\tcontinue\n\t\t}\n\t\tentry, err := backend.Chain().TrieNode(hash)\n\t\tif len(entry) == 0 || err != nil {\n\t\t\t// Read the contract code with prefix only to save unnecessary lookups.\n\t\t\tentry, err = backend.Chain().ContractCodeWithPrefix(hash)\n\t\t}\n\t\tif err == nil && len(entry) > 0 {\n\t\t\tnodes = append(nodes, entry)\n\t\t\tbytes += len(entry)\n\t\t}\n\t}\n\treturn nodes\n}\n\nfunc handleGetReceipts(backend Backend, msg Decoder, peer *Peer) error {\n\t// Decode the block receipts retrieval message\n\tvar query GetReceiptsPacket\n\tif err := msg.Decode(&query); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\tresponse := answerGetReceiptsQuery(backend, query, peer)\n\treturn peer.SendReceiptsRLP(response)\n}\n\nfunc handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error {\n\t// Decode the block receipts retrieval message\n\tvar query GetReceiptsPacket66\n\tif err := msg.Decode(&query); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\tresponse := answerGetReceiptsQuery(backend, query.GetReceiptsPacket, peer)\n\treturn peer.ReplyReceiptsRLP(query.RequestId, response)\n}\n\nfunc answerGetReceiptsQuery(backend Backend, query GetReceiptsPacket, peer *Peer) []rlp.RawValue {\n\t// Gather state data until the fetch or network limits is reached\n\tvar (\n\t\tbytes    int\n\t\treceipts []rlp.RawValue\n\t)\n\tfor lookups, hash := range query {\n\t\tif bytes >= softResponseLimit || len(receipts) >= maxReceiptsServe ||\n\t\t\tlookups >= 2*maxReceiptsServe {\n\t\t\tbreak\n\t\t}\n\t\t// Retrieve the requested block's receipts\n\t\tresults := backend.Chain().GetReceiptsByHash(hash)\n\t\tif results == nil {\n\t\t\tif header := backend.Chain().GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t// If known, encode and queue for response packet\n\t\tif encoded, err := rlp.EncodeToBytes(results); err != nil {\n\t\t\tlog.Error(\"Failed to encode receipt\", \"err\", err)\n\t\t} else {\n\t\t\treceipts = append(receipts, encoded)\n\t\t\tbytes += len(encoded)\n\t\t}\n\t}\n\treturn receipts\n}\n\nfunc handleNewBlockhashes(backend Backend, msg Decoder, peer *Peer) error {\n\t// A batch of new block announcements just arrived\n\tann := new(NewBlockHashesPacket)\n\tif err := msg.Decode(ann); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\t// Mark the hashes as present at the remote node\n\tfor _, block := range *ann {\n\t\tpeer.markBlock(block.Hash)\n\t}\n\t// Deliver them all to the backend for queuing\n\treturn backend.Handle(peer, ann)\n}\n\nfunc handleNewBlock(backend Backend, msg Decoder, peer *Peer) error {\n\t// Retrieve and decode the propagated block\n\tann := new(NewBlockPacket)\n\tif err := msg.Decode(ann); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\tif hash := types.CalcUncleHash(ann.Block.Uncles()); hash != ann.Block.UncleHash() {\n\t\tlog.Warn(\"Propagated block has invalid uncles\", \"have\", hash, \"exp\", ann.Block.UncleHash())\n\t\treturn nil // TODO(karalabe): return error eventually, but wait a few releases\n\t}\n\tif hash := types.DeriveSha(ann.Block.Transactions(), trie.NewStackTrie(nil)); hash != ann.Block.TxHash() {\n\t\tlog.Warn(\"Propagated block has invalid body\", \"have\", hash, \"exp\", ann.Block.TxHash())\n\t\treturn nil // TODO(karalabe): return error eventually, but wait a few releases\n\t}\n\tif err := ann.sanityCheck(); err != nil {\n\t\treturn err\n\t}\n\tann.Block.ReceivedAt = msg.Time()\n\tann.Block.ReceivedFrom = peer\n\n\t// Mark the peer as owning the block\n\tpeer.markBlock(ann.Block.Hash())\n\n\treturn backend.Handle(peer, ann)\n}\n\nfunc handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error {\n\t// A batch of headers arrived to one of our previous requests\n\tres := new(BlockHeadersPacket)\n\tif err := msg.Decode(res); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\treturn backend.Handle(peer, res)\n}\n\nfunc handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error {\n\t// A batch of headers arrived to one of our previous requests\n\tres := new(BlockHeadersPacket66)\n\tif err := msg.Decode(res); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\treturn backend.Handle(peer, &res.BlockHeadersPacket)\n}\n\nfunc handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error {\n\t// A batch of block bodies arrived to one of our previous requests\n\tres := new(BlockBodiesPacket)\n\tif err := msg.Decode(res); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\treturn backend.Handle(peer, res)\n}\n\nfunc handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error {\n\t// A batch of block bodies arrived to one of our previous requests\n\tres := new(BlockBodiesPacket66)\n\tif err := msg.Decode(res); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\treturn backend.Handle(peer, &res.BlockBodiesPacket)\n}\n\nfunc handleNodeData(backend Backend, msg Decoder, peer *Peer) error {\n\t// A batch of node state data arrived to one of our previous requests\n\tres := new(NodeDataPacket)\n\tif err := msg.Decode(res); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\treturn backend.Handle(peer, res)\n}\n\nfunc handleNodeData66(backend Backend, msg Decoder, peer *Peer) error {\n\t// A batch of node state data arrived to one of our previous requests\n\tres := new(NodeDataPacket66)\n\tif err := msg.Decode(res); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\treturn backend.Handle(peer, &res.NodeDataPacket)\n}\n\nfunc handleReceipts(backend Backend, msg Decoder, peer *Peer) error {\n\t// A batch of receipts arrived to one of our previous requests\n\tres := new(ReceiptsPacket)\n\tif err := msg.Decode(res); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\treturn backend.Handle(peer, res)\n}\n\nfunc handleReceipts66(backend Backend, msg Decoder, peer *Peer) error {\n\t// A batch of receipts arrived to one of our previous requests\n\tres := new(ReceiptsPacket66)\n\tif err := msg.Decode(res); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\treturn backend.Handle(peer, &res.ReceiptsPacket)\n}\n\nfunc handleNewPooledTransactionHashes(backend Backend, msg Decoder, peer *Peer) error {\n\t// New transaction announcement arrived, make sure we have\n\t// a valid and fresh chain to handle them\n\tif !backend.AcceptTxs() {\n\t\treturn nil\n\t}\n\tann := new(NewPooledTransactionHashesPacket)\n\tif err := msg.Decode(ann); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\t// Schedule all the unknown hashes for retrieval\n\tfor _, hash := range *ann {\n\t\tpeer.markTransaction(hash)\n\t}\n\treturn backend.Handle(peer, ann)\n}\n\nfunc handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error {\n\t// Decode the pooled transactions retrieval message\n\tvar query GetPooledTransactionsPacket\n\tif err := msg.Decode(&query); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\thashes, txs := answerGetPooledTransactions(backend, query, peer)\n\treturn peer.SendPooledTransactionsRLP(hashes, txs)\n}\n\nfunc handleGetPooledTransactions66(backend Backend, msg Decoder, peer *Peer) error {\n\t// Decode the pooled transactions retrieval message\n\tvar query GetPooledTransactionsPacket66\n\tif err := msg.Decode(&query); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\thashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsPacket, peer)\n\treturn peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs)\n}\n\nfunc answerGetPooledTransactions(backend Backend, query GetPooledTransactionsPacket, peer *Peer) ([]common.Hash, []rlp.RawValue) {\n\t// Gather transactions until the fetch or network limits is reached\n\tvar (\n\t\tbytes  int\n\t\thashes []common.Hash\n\t\ttxs    []rlp.RawValue\n\t)\n\tfor _, hash := range query {\n\t\tif bytes >= softResponseLimit {\n\t\t\tbreak\n\t\t}\n\t\t// Retrieve the requested transaction, skipping if unknown to us\n\t\ttx := backend.TxPool().Get(hash)\n\t\tif tx == nil {\n\t\t\tcontinue\n\t\t}\n\t\t// If known, encode and queue for response packet\n\t\tif encoded, err := rlp.EncodeToBytes(tx); err != nil {\n\t\t\tlog.Error(\"Failed to encode transaction\", \"err\", err)\n\t\t} else {\n\t\t\thashes = append(hashes, hash)\n\t\t\ttxs = append(txs, encoded)\n\t\t\tbytes += len(encoded)\n\t\t}\n\t}\n\treturn hashes, txs\n}\n\nfunc handleTransactions(backend Backend, msg Decoder, peer *Peer) error {\n\t// Transactions arrived, make sure we have a valid and fresh chain to handle them\n\tif !backend.AcceptTxs() {\n\t\treturn nil\n\t}\n\t// Transactions can be processed, parse all of them and deliver to the pool\n\tvar txs TransactionsPacket\n\tif err := msg.Decode(&txs); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\tfor i, tx := range txs {\n\t\t// Validate and mark the remote transaction\n\t\tif tx == nil {\n\t\t\treturn fmt.Errorf(\"%w: transaction %d is nil\", errDecode, i)\n\t\t}\n\t\tpeer.markTransaction(tx.Hash())\n\t}\n\treturn backend.Handle(peer, &txs)\n}\n\nfunc handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error {\n\t// Transactions arrived, make sure we have a valid and fresh chain to handle them\n\tif !backend.AcceptTxs() {\n\t\treturn nil\n\t}\n\t// Transactions can be processed, parse all of them and deliver to the pool\n\tvar txs PooledTransactionsPacket\n\tif err := msg.Decode(&txs); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\tfor i, tx := range txs {\n\t\t// Validate and mark the remote transaction\n\t\tif tx == nil {\n\t\t\treturn fmt.Errorf(\"%w: transaction %d is nil\", errDecode, i)\n\t\t}\n\t\tpeer.markTransaction(tx.Hash())\n\t}\n\treturn backend.Handle(peer, &txs)\n}\n\nfunc handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error {\n\t// Transactions arrived, make sure we have a valid and fresh chain to handle them\n\tif !backend.AcceptTxs() {\n\t\treturn nil\n\t}\n\t// Transactions can be processed, parse all of them and deliver to the pool\n\tvar txs PooledTransactionsPacket66\n\tif err := msg.Decode(&txs); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\tfor i, tx := range txs.PooledTransactionsPacket {\n\t\t// Validate and mark the remote transaction\n\t\tif tx == nil {\n\t\t\treturn fmt.Errorf(\"%w: transaction %d is nil\", errDecode, i)\n\t\t}\n\t\tpeer.markTransaction(tx.Hash())\n\t}\n\treturn backend.Handle(peer, &txs.PooledTransactionsPacket)\n}\n"
  },
  {
    "path": "eth/protocols/eth/handshake.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/forkid\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n)\n\nconst (\n\t// handshakeTimeout is the maximum allowed time for the `eth` handshake to\n\t// complete before dropping the connection.= as malicious.\n\thandshakeTimeout = 5 * time.Second\n)\n\n// Handshake executes the eth protocol handshake, negotiating version number,\n// network IDs, difficulties, head and genesis blocks.\nfunc (p *Peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) error {\n\t// Send out own handshake in a new thread\n\terrc := make(chan error, 2)\n\n\tvar status StatusPacket // safe to read after two values have been received from errc\n\n\tgo func() {\n\t\terrc <- p2p.Send(p.rw, StatusMsg, &StatusPacket{\n\t\t\tProtocolVersion: uint32(p.version),\n\t\t\tNetworkID:       network,\n\t\t\tTD:              td,\n\t\t\tHead:            head,\n\t\t\tGenesis:         genesis,\n\t\t\tForkID:          forkID,\n\t\t})\n\t}()\n\tgo func() {\n\t\terrc <- p.readStatus(network, &status, genesis, forkFilter)\n\t}()\n\ttimeout := time.NewTimer(handshakeTimeout)\n\tdefer timeout.Stop()\n\tfor i := 0; i < 2; i++ {\n\t\tselect {\n\t\tcase err := <-errc:\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-timeout.C:\n\t\t\treturn p2p.DiscReadTimeout\n\t\t}\n\t}\n\tp.td, p.head = status.TD, status.Head\n\n\t// TD at mainnet block #7753254 is 76 bits. If it becomes 100 million times\n\t// larger, it will still fit within 100 bits\n\tif tdlen := p.td.BitLen(); tdlen > 100 {\n\t\treturn fmt.Errorf(\"too large total difficulty: bitlen %d\", tdlen)\n\t}\n\treturn nil\n}\n\n// readStatus reads the remote handshake message.\nfunc (p *Peer) readStatus(network uint64, status *StatusPacket, genesis common.Hash, forkFilter forkid.Filter) error {\n\tmsg, err := p.rw.ReadMsg()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif msg.Code != StatusMsg {\n\t\treturn fmt.Errorf(\"%w: first msg has code %x (!= %x)\", errNoStatusMsg, msg.Code, StatusMsg)\n\t}\n\tif msg.Size > maxMessageSize {\n\t\treturn fmt.Errorf(\"%w: %v > %v\", errMsgTooLarge, msg.Size, maxMessageSize)\n\t}\n\t// Decode the handshake and make sure everything matches\n\tif err := msg.Decode(&status); err != nil {\n\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t}\n\tif status.NetworkID != network {\n\t\treturn fmt.Errorf(\"%w: %d (!= %d)\", errNetworkIDMismatch, status.NetworkID, network)\n\t}\n\tif uint(status.ProtocolVersion) != p.version {\n\t\treturn fmt.Errorf(\"%w: %d (!= %d)\", errProtocolVersionMismatch, status.ProtocolVersion, p.version)\n\t}\n\tif status.Genesis != genesis {\n\t\treturn fmt.Errorf(\"%w: %x (!= %x)\", errGenesisMismatch, status.Genesis, genesis)\n\t}\n\tif err := forkFilter(status.ForkID); err != nil {\n\t\treturn fmt.Errorf(\"%w: %v\", errForkIDRejected, err)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "eth/protocols/eth/handshake_test.go",
    "content": "// Copyright 2014 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/forkid\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n)\n\n// Tests that handshake failures are detected and reported correctly.\nfunc TestHandshake64(t *testing.T) { testHandshake(t, 64) }\nfunc TestHandshake65(t *testing.T) { testHandshake(t, 65) }\n\nfunc testHandshake(t *testing.T, protocol uint) {\n\tt.Parallel()\n\n\t// Create a test backend only to have some valid genesis chain\n\tbackend := newTestBackend(3)\n\tdefer backend.close()\n\n\tvar (\n\t\tgenesis = backend.chain.Genesis()\n\t\thead    = backend.chain.CurrentBlock()\n\t\ttd      = backend.chain.GetTd(head.Hash(), head.NumberU64())\n\t\tforkID  = forkid.NewID(backend.chain.Config(), backend.chain.Genesis().Hash(), backend.chain.CurrentHeader().Number.Uint64())\n\t)\n\ttests := []struct {\n\t\tcode uint64\n\t\tdata interface{}\n\t\twant error\n\t}{\n\t\t{\n\t\t\tcode: TransactionsMsg, data: []interface{}{},\n\t\t\twant: errNoStatusMsg,\n\t\t},\n\t\t{\n\t\t\tcode: StatusMsg, data: StatusPacket{10, 1, td, head.Hash(), genesis.Hash(), forkID},\n\t\t\twant: errProtocolVersionMismatch,\n\t\t},\n\t\t{\n\t\t\tcode: StatusMsg, data: StatusPacket{uint32(protocol), 999, td, head.Hash(), genesis.Hash(), forkID},\n\t\t\twant: errNetworkIDMismatch,\n\t\t},\n\t\t{\n\t\t\tcode: StatusMsg, data: StatusPacket{uint32(protocol), 1, td, head.Hash(), common.Hash{3}, forkID},\n\t\t\twant: errGenesisMismatch,\n\t\t},\n\t\t{\n\t\t\tcode: StatusMsg, data: StatusPacket{uint32(protocol), 1, td, head.Hash(), genesis.Hash(), forkid.ID{Hash: [4]byte{0x00, 0x01, 0x02, 0x03}}},\n\t\t\twant: errForkIDRejected,\n\t\t},\n\t}\n\tfor i, test := range tests {\n\t\t// Create the two peers to shake with each other\n\t\tapp, net := p2p.MsgPipe()\n\t\tdefer app.Close()\n\t\tdefer net.Close()\n\n\t\tpeer := NewPeer(protocol, p2p.NewPeer(enode.ID{}, \"peer\", nil), net, nil)\n\t\tdefer peer.Close()\n\n\t\t// Send the junk test with one peer, check the handshake failure\n\t\tgo p2p.Send(app, test.code, test.data)\n\n\t\terr := peer.Handshake(1, td, head.Hash(), genesis.Hash(), forkID, forkid.NewFilter(backend.chain))\n\t\tif err == nil {\n\t\t\tt.Errorf(\"test %d: protocol returned nil error, want %q\", i, test.want)\n\t\t} else if !errors.Is(err, test.want) {\n\t\t\tt.Errorf(\"test %d: wrong error: got %q, want %q\", i, err, test.want)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "eth/protocols/eth/peer.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"math/big\"\n\t\"math/rand\"\n\t\"sync\"\n\n\tmapset \"github.com/deckarep/golang-set\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\nconst (\n\t// maxKnownTxs is the maximum transactions hashes to keep in the known list\n\t// before starting to randomly evict them.\n\tmaxKnownTxs = 32768\n\n\t// maxKnownBlocks is the maximum block hashes to keep in the known list\n\t// before starting to randomly evict them.\n\tmaxKnownBlocks = 1024\n\n\t// maxQueuedTxs is the maximum number of transactions to queue up before dropping\n\t// older broadcasts.\n\tmaxQueuedTxs = 4096\n\n\t// maxQueuedTxAnns is the maximum number of transaction announcements to queue up\n\t// before dropping older announcements.\n\tmaxQueuedTxAnns = 4096\n\n\t// maxQueuedBlocks is the maximum number of block propagations to queue up before\n\t// dropping broadcasts. There's not much point in queueing stale blocks, so a few\n\t// that might cover uncles should be enough.\n\tmaxQueuedBlocks = 4\n\n\t// maxQueuedBlockAnns is the maximum number of block announcements to queue up before\n\t// dropping broadcasts. Similarly to block propagations, there's no point to queue\n\t// above some healthy uncle limit, so use that.\n\tmaxQueuedBlockAnns = 4\n)\n\n// max is a helper function which returns the larger of the two given integers.\nfunc max(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\n// Peer is a collection of relevant information we have about a `eth` peer.\ntype Peer struct {\n\tid string // Unique ID for the peer, cached\n\n\t*p2p.Peer                   // The embedded P2P package peer\n\trw        p2p.MsgReadWriter // Input/output streams for snap\n\tversion   uint              // Protocol version negotiated\n\n\thead common.Hash // Latest advertised head block hash\n\ttd   *big.Int    // Latest advertised head block total difficulty\n\n\tknownBlocks     mapset.Set             // Set of block hashes known to be known by this peer\n\tqueuedBlocks    chan *blockPropagation // Queue of blocks to broadcast to the peer\n\tqueuedBlockAnns chan *types.Block      // Queue of blocks to announce to the peer\n\n\ttxpool      TxPool             // Transaction pool used by the broadcasters for liveness checks\n\tknownTxs    mapset.Set         // Set of transaction hashes known to be known by this peer\n\ttxBroadcast chan []common.Hash // Channel used to queue transaction propagation requests\n\ttxAnnounce  chan []common.Hash // Channel used to queue transaction announcement requests\n\n\tterm chan struct{} // Termination channel to stop the broadcasters\n\tlock sync.RWMutex  // Mutex protecting the internal fields\n}\n\n// NewPeer create a wrapper for a network connection and negotiated  protocol\n// version.\nfunc NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Peer {\n\tpeer := &Peer{\n\t\tid:              p.ID().String(),\n\t\tPeer:            p,\n\t\trw:              rw,\n\t\tversion:         version,\n\t\tknownTxs:        mapset.NewSet(),\n\t\tknownBlocks:     mapset.NewSet(),\n\t\tqueuedBlocks:    make(chan *blockPropagation, maxQueuedBlocks),\n\t\tqueuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns),\n\t\ttxBroadcast:     make(chan []common.Hash),\n\t\ttxAnnounce:      make(chan []common.Hash),\n\t\ttxpool:          txpool,\n\t\tterm:            make(chan struct{}),\n\t}\n\t// Start up all the broadcasters\n\tgo peer.broadcastBlocks()\n\tgo peer.broadcastTransactions()\n\tif version >= ETH65 {\n\t\tgo peer.announceTransactions()\n\t}\n\treturn peer\n}\n\n// Close signals the broadcast goroutine to terminate. Only ever call this if\n// you created the peer yourself via NewPeer. Otherwise let whoever created it\n// clean it up!\nfunc (p *Peer) Close() {\n\tclose(p.term)\n}\n\n// ID retrieves the peer's unique identifier.\nfunc (p *Peer) ID() string {\n\treturn p.id\n}\n\n// Version retrieves the peer's negoatiated `eth` protocol version.\nfunc (p *Peer) Version() uint {\n\treturn p.version\n}\n\n// Head retrieves the current head hash and total difficulty of the peer.\nfunc (p *Peer) Head() (hash common.Hash, td *big.Int) {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\tcopy(hash[:], p.head[:])\n\treturn hash, new(big.Int).Set(p.td)\n}\n\n// SetHead updates the head hash and total difficulty of the peer.\nfunc (p *Peer) SetHead(hash common.Hash, td *big.Int) {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\n\tcopy(p.head[:], hash[:])\n\tp.td.Set(td)\n}\n\n// KnownBlock returns whether peer is known to already have a block.\nfunc (p *Peer) KnownBlock(hash common.Hash) bool {\n\treturn p.knownBlocks.Contains(hash)\n}\n\n// KnownTransaction returns whether peer is known to already have a transaction.\nfunc (p *Peer) KnownTransaction(hash common.Hash) bool {\n\treturn p.knownTxs.Contains(hash)\n}\n\n// markBlock marks a block as known for the peer, ensuring that the block will\n// never be propagated to this particular peer.\nfunc (p *Peer) markBlock(hash common.Hash) {\n\t// If we reached the memory allowance, drop a previously known block hash\n\tfor p.knownBlocks.Cardinality() >= maxKnownBlocks {\n\t\tp.knownBlocks.Pop()\n\t}\n\tp.knownBlocks.Add(hash)\n}\n\n// markTransaction marks a transaction as known for the peer, ensuring that it\n// will never be propagated to this particular peer.\nfunc (p *Peer) markTransaction(hash common.Hash) {\n\t// If we reached the memory allowance, drop a previously known transaction hash\n\tfor p.knownTxs.Cardinality() >= maxKnownTxs {\n\t\tp.knownTxs.Pop()\n\t}\n\tp.knownTxs.Add(hash)\n}\n\n// SendTransactions sends transactions to the peer and includes the hashes\n// in its transaction hash set for future reference.\n//\n// This method is a helper used by the async transaction sender. Don't call it\n// directly as the queueing (memory) and transmission (bandwidth) costs should\n// not be managed directly.\n//\n// The reasons this is public is to allow packages using this protocol to write\n// tests that directly send messages without having to do the asyn queueing.\nfunc (p *Peer) SendTransactions(txs types.Transactions) error {\n\t// Mark all the transactions as known, but ensure we don't overflow our limits\n\tfor p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(txs)) {\n\t\tp.knownTxs.Pop()\n\t}\n\tfor _, tx := range txs {\n\t\tp.knownTxs.Add(tx.Hash())\n\t}\n\treturn p2p.Send(p.rw, TransactionsMsg, txs)\n}\n\n// AsyncSendTransactions queues a list of transactions (by hash) to eventually\n// propagate to a remote peer. The number of pending sends are capped (new ones\n// will force old sends to be dropped)\nfunc (p *Peer) AsyncSendTransactions(hashes []common.Hash) {\n\tselect {\n\tcase p.txBroadcast <- hashes:\n\t\t// Mark all the transactions as known, but ensure we don't overflow our limits\n\t\tfor p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {\n\t\t\tp.knownTxs.Pop()\n\t\t}\n\t\tfor _, hash := range hashes {\n\t\t\tp.knownTxs.Add(hash)\n\t\t}\n\tcase <-p.term:\n\t\tp.Log().Debug(\"Dropping transaction propagation\", \"count\", len(hashes))\n\t}\n}\n\n// sendPooledTransactionHashes sends transaction hashes to the peer and includes\n// them in its transaction hash set for future reference.\n//\n// This method is a helper used by the async transaction announcer. Don't call it\n// directly as the queueing (memory) and transmission (bandwidth) costs should\n// not be managed directly.\nfunc (p *Peer) sendPooledTransactionHashes(hashes []common.Hash) error {\n\t// Mark all the transactions as known, but ensure we don't overflow our limits\n\tfor p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {\n\t\tp.knownTxs.Pop()\n\t}\n\tfor _, hash := range hashes {\n\t\tp.knownTxs.Add(hash)\n\t}\n\treturn p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket(hashes))\n}\n\n// AsyncSendPooledTransactionHashes queues a list of transactions hashes to eventually\n// announce to a remote peer.  The number of pending sends are capped (new ones\n// will force old sends to be dropped)\nfunc (p *Peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) {\n\tselect {\n\tcase p.txAnnounce <- hashes:\n\t\t// Mark all the transactions as known, but ensure we don't overflow our limits\n\t\tfor p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {\n\t\t\tp.knownTxs.Pop()\n\t\t}\n\t\tfor _, hash := range hashes {\n\t\t\tp.knownTxs.Add(hash)\n\t\t}\n\tcase <-p.term:\n\t\tp.Log().Debug(\"Dropping transaction announcement\", \"count\", len(hashes))\n\t}\n}\n\n// SendPooledTransactionsRLP sends requested transactions to the peer and adds the\n// hashes in its transaction hash set for future reference.\n//\n// Note, the method assumes the hashes are correct and correspond to the list of\n// transactions being sent.\nfunc (p *Peer) SendPooledTransactionsRLP(hashes []common.Hash, txs []rlp.RawValue) error {\n\t// Mark all the transactions as known, but ensure we don't overflow our limits\n\tfor p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {\n\t\tp.knownTxs.Pop()\n\t}\n\tfor _, hash := range hashes {\n\t\tp.knownTxs.Add(hash)\n\t}\n\treturn p2p.Send(p.rw, PooledTransactionsMsg, txs) // Not packed into PooledTransactionsPacket to avoid RLP decoding\n}\n\n// ReplyPooledTransactionsRLP is the eth/66 version of SendPooledTransactionsRLP.\nfunc (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error {\n\t// Mark all the transactions as known, but ensure we don't overflow our limits\n\tfor p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {\n\t\tp.knownTxs.Pop()\n\t}\n\tfor _, hash := range hashes {\n\t\tp.knownTxs.Add(hash)\n\t}\n\t// Not packed into PooledTransactionsPacket to avoid RLP decoding\n\treturn p2p.Send(p.rw, PooledTransactionsMsg, PooledTransactionsRLPPacket66{\n\t\tRequestId:                   id,\n\t\tPooledTransactionsRLPPacket: txs,\n\t})\n}\n\n// SendNewBlockHashes announces the availability of a number of blocks through\n// a hash notification.\nfunc (p *Peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {\n\t// Mark all the block hashes as known, but ensure we don't overflow our limits\n\tfor p.knownBlocks.Cardinality() > max(0, maxKnownBlocks-len(hashes)) {\n\t\tp.knownBlocks.Pop()\n\t}\n\tfor _, hash := range hashes {\n\t\tp.knownBlocks.Add(hash)\n\t}\n\trequest := make(NewBlockHashesPacket, len(hashes))\n\tfor i := 0; i < len(hashes); i++ {\n\t\trequest[i].Hash = hashes[i]\n\t\trequest[i].Number = numbers[i]\n\t}\n\treturn p2p.Send(p.rw, NewBlockHashesMsg, request)\n}\n\n// AsyncSendNewBlockHash queues the availability of a block for propagation to a\n// remote peer. If the peer's broadcast queue is full, the event is silently\n// dropped.\nfunc (p *Peer) AsyncSendNewBlockHash(block *types.Block) {\n\tselect {\n\tcase p.queuedBlockAnns <- block:\n\t\t// Mark all the block hash as known, but ensure we don't overflow our limits\n\t\tfor p.knownBlocks.Cardinality() >= maxKnownBlocks {\n\t\t\tp.knownBlocks.Pop()\n\t\t}\n\t\tp.knownBlocks.Add(block.Hash())\n\tdefault:\n\t\tp.Log().Debug(\"Dropping block announcement\", \"number\", block.NumberU64(), \"hash\", block.Hash())\n\t}\n}\n\n// SendNewBlock propagates an entire block to a remote peer.\nfunc (p *Peer) SendNewBlock(block *types.Block, td *big.Int) error {\n\t// Mark all the block hash as known, but ensure we don't overflow our limits\n\tfor p.knownBlocks.Cardinality() >= maxKnownBlocks {\n\t\tp.knownBlocks.Pop()\n\t}\n\tp.knownBlocks.Add(block.Hash())\n\treturn p2p.Send(p.rw, NewBlockMsg, &NewBlockPacket{\n\t\tBlock: block,\n\t\tTD:    td,\n\t})\n}\n\n// AsyncSendNewBlock queues an entire block for propagation to a remote peer. If\n// the peer's broadcast queue is full, the event is silently dropped.\nfunc (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {\n\tselect {\n\tcase p.queuedBlocks <- &blockPropagation{block: block, td: td}:\n\t\t// Mark all the block hash as known, but ensure we don't overflow our limits\n\t\tfor p.knownBlocks.Cardinality() >= maxKnownBlocks {\n\t\t\tp.knownBlocks.Pop()\n\t\t}\n\t\tp.knownBlocks.Add(block.Hash())\n\tdefault:\n\t\tp.Log().Debug(\"Dropping block propagation\", \"number\", block.NumberU64(), \"hash\", block.Hash())\n\t}\n}\n\n// SendBlockHeaders sends a batch of block headers to the remote peer.\nfunc (p *Peer) SendBlockHeaders(headers []*types.Header) error {\n\treturn p2p.Send(p.rw, BlockHeadersMsg, BlockHeadersPacket(headers))\n}\n\n// ReplyBlockHeaders is the eth/66 version of SendBlockHeaders.\nfunc (p *Peer) ReplyBlockHeaders(id uint64, headers []*types.Header) error {\n\treturn p2p.Send(p.rw, BlockHeadersMsg, BlockHeadersPacket66{\n\t\tRequestId:          id,\n\t\tBlockHeadersPacket: headers,\n\t})\n}\n\n// SendBlockBodiesRLP sends a batch of block contents to the remote peer from\n// an already RLP encoded format.\nfunc (p *Peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error {\n\treturn p2p.Send(p.rw, BlockBodiesMsg, bodies) // Not packed into BlockBodiesPacket to avoid RLP decoding\n}\n\n// ReplyBlockBodiesRLP is the eth/66 version of SendBlockBodiesRLP.\nfunc (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error {\n\t// Not packed into BlockBodiesPacket to avoid RLP decoding\n\treturn p2p.Send(p.rw, BlockBodiesMsg, BlockBodiesRLPPacket66{\n\t\tRequestId:            id,\n\t\tBlockBodiesRLPPacket: bodies,\n\t})\n}\n\n// SendNodeDataRLP sends a batch of arbitrary internal data, corresponding to the\n// hashes requested.\nfunc (p *Peer) SendNodeData(data [][]byte) error {\n\treturn p2p.Send(p.rw, NodeDataMsg, NodeDataPacket(data))\n}\n\n// ReplyNodeData is the eth/66 response to GetNodeData.\nfunc (p *Peer) ReplyNodeData(id uint64, data [][]byte) error {\n\treturn p2p.Send(p.rw, NodeDataMsg, NodeDataPacket66{\n\t\tRequestId:      id,\n\t\tNodeDataPacket: data,\n\t})\n}\n\n// SendReceiptsRLP sends a batch of transaction receipts, corresponding to the\n// ones requested from an already RLP encoded format.\nfunc (p *Peer) SendReceiptsRLP(receipts []rlp.RawValue) error {\n\treturn p2p.Send(p.rw, ReceiptsMsg, receipts) // Not packed into ReceiptsPacket to avoid RLP decoding\n}\n\n// ReplyReceiptsRLP is the eth/66 response to GetReceipts.\nfunc (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error {\n\treturn p2p.Send(p.rw, ReceiptsMsg, ReceiptsRLPPacket66{\n\t\tRequestId:         id,\n\t\tReceiptsRLPPacket: receipts,\n\t})\n}\n\n// RequestOneHeader is a wrapper around the header query functions to fetch a\n// single header. It is used solely by the fetcher.\nfunc (p *Peer) RequestOneHeader(hash common.Hash) error {\n\tp.Log().Debug(\"Fetching single header\", \"hash\", hash)\n\tquery := GetBlockHeadersPacket{\n\t\tOrigin:  HashOrNumber{Hash: hash},\n\t\tAmount:  uint64(1),\n\t\tSkip:    uint64(0),\n\t\tReverse: false,\n\t}\n\tif p.Version() >= ETH66 {\n\t\treturn p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{\n\t\t\tRequestId:             rand.Uint64(),\n\t\t\tGetBlockHeadersPacket: &query,\n\t\t})\n\t}\n\treturn p2p.Send(p.rw, GetBlockHeadersMsg, &query)\n}\n\n// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the\n// specified header query, based on the hash of an origin block.\nfunc (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {\n\tp.Log().Debug(\"Fetching batch of headers\", \"count\", amount, \"fromhash\", origin, \"skip\", skip, \"reverse\", reverse)\n\tquery := GetBlockHeadersPacket{\n\t\tOrigin:  HashOrNumber{Hash: origin},\n\t\tAmount:  uint64(amount),\n\t\tSkip:    uint64(skip),\n\t\tReverse: reverse,\n\t}\n\tif p.Version() >= ETH66 {\n\t\treturn p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{\n\t\t\tRequestId:             rand.Uint64(),\n\t\t\tGetBlockHeadersPacket: &query,\n\t\t})\n\t}\n\treturn p2p.Send(p.rw, GetBlockHeadersMsg, &query)\n}\n\n// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the\n// specified header query, based on the number of an origin block.\nfunc (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {\n\tp.Log().Debug(\"Fetching batch of headers\", \"count\", amount, \"fromnum\", origin, \"skip\", skip, \"reverse\", reverse)\n\tquery := GetBlockHeadersPacket{\n\t\tOrigin:  HashOrNumber{Number: origin},\n\t\tAmount:  uint64(amount),\n\t\tSkip:    uint64(skip),\n\t\tReverse: reverse,\n\t}\n\tif p.Version() >= ETH66 {\n\t\treturn p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{\n\t\t\tRequestId:             rand.Uint64(),\n\t\t\tGetBlockHeadersPacket: &query,\n\t\t})\n\t}\n\treturn p2p.Send(p.rw, GetBlockHeadersMsg, &query)\n}\n\n// ExpectRequestHeadersByNumber is a testing method to mirror the recipient side\n// of the RequestHeadersByNumber operation.\nfunc (p *Peer) ExpectRequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {\n\treq := &GetBlockHeadersPacket{\n\t\tOrigin:  HashOrNumber{Number: origin},\n\t\tAmount:  uint64(amount),\n\t\tSkip:    uint64(skip),\n\t\tReverse: reverse,\n\t}\n\treturn p2p.ExpectMsg(p.rw, GetBlockHeadersMsg, req)\n}\n\n// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes\n// specified.\nfunc (p *Peer) RequestBodies(hashes []common.Hash) error {\n\tp.Log().Debug(\"Fetching batch of block bodies\", \"count\", len(hashes))\n\tif p.Version() >= ETH66 {\n\t\treturn p2p.Send(p.rw, GetBlockBodiesMsg, &GetBlockBodiesPacket66{\n\t\t\tRequestId:            rand.Uint64(),\n\t\t\tGetBlockBodiesPacket: hashes,\n\t\t})\n\t}\n\treturn p2p.Send(p.rw, GetBlockBodiesMsg, GetBlockBodiesPacket(hashes))\n}\n\n// RequestNodeData fetches a batch of arbitrary data from a node's known state\n// data, corresponding to the specified hashes.\nfunc (p *Peer) RequestNodeData(hashes []common.Hash) error {\n\tp.Log().Debug(\"Fetching batch of state data\", \"count\", len(hashes))\n\tif p.Version() >= ETH66 {\n\t\treturn p2p.Send(p.rw, GetNodeDataMsg, &GetNodeDataPacket66{\n\t\t\tRequestId:         rand.Uint64(),\n\t\t\tGetNodeDataPacket: hashes,\n\t\t})\n\t}\n\treturn p2p.Send(p.rw, GetNodeDataMsg, GetNodeDataPacket(hashes))\n}\n\n// RequestReceipts fetches a batch of transaction receipts from a remote node.\nfunc (p *Peer) RequestReceipts(hashes []common.Hash) error {\n\tp.Log().Debug(\"Fetching batch of receipts\", \"count\", len(hashes))\n\tif p.Version() >= ETH66 {\n\t\treturn p2p.Send(p.rw, GetReceiptsMsg, &GetReceiptsPacket66{\n\t\t\tRequestId:         rand.Uint64(),\n\t\t\tGetReceiptsPacket: hashes,\n\t\t})\n\t}\n\treturn p2p.Send(p.rw, GetReceiptsMsg, GetReceiptsPacket(hashes))\n}\n\n// RequestTxs fetches a batch of transactions from a remote node.\nfunc (p *Peer) RequestTxs(hashes []common.Hash) error {\n\tp.Log().Debug(\"Fetching batch of transactions\", \"count\", len(hashes))\n\tif p.Version() >= ETH66 {\n\t\treturn p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{\n\t\t\tRequestId:                   rand.Uint64(),\n\t\t\tGetPooledTransactionsPacket: hashes,\n\t\t})\n\t}\n\treturn p2p.Send(p.rw, GetPooledTransactionsMsg, GetPooledTransactionsPacket(hashes))\n}\n"
  },
  {
    "path": "eth/protocols/eth/peer_test.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// This file contains some shares testing functionality, common to  multiple\n// different files and modules being tested.\n\npackage eth\n\nimport (\n\t\"crypto/rand\"\n\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n)\n\n// testPeer is a simulated peer to allow testing direct network calls.\ntype testPeer struct {\n\t*Peer\n\n\tnet p2p.MsgReadWriter // Network layer reader/writer to simulate remote messaging\n\tapp *p2p.MsgPipeRW    // Application layer reader/writer to simulate the local side\n}\n\n// newTestPeer creates a new peer registered at the given data backend.\nfunc newTestPeer(name string, version uint, backend Backend) (*testPeer, <-chan error) {\n\t// Create a message pipe to communicate through\n\tapp, net := p2p.MsgPipe()\n\n\t// Start the peer on a new thread\n\tvar id enode.ID\n\trand.Read(id[:])\n\n\tpeer := NewPeer(version, p2p.NewPeer(id, name, nil), net, backend.TxPool())\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\terrc <- backend.RunPeer(peer, func(peer *Peer) error {\n\t\t\treturn Handle(backend, peer)\n\t\t})\n\t}()\n\treturn &testPeer{app: app, net: net, Peer: peer}, errc\n}\n\n// close terminates the local side of the peer, notifying the remote protocol\n// manager of termination.\nfunc (p *testPeer) close() {\n\tp.Peer.Close()\n\tp.app.Close()\n}\n"
  },
  {
    "path": "eth/protocols/eth/protocol.go",
    "content": "// Copyright 2014 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math/big\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/forkid\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\n// Constants to match up protocol versions and messages\nconst (\n\tETH64 = 64\n\tETH65 = 65\n\tETH66 = 66\n)\n\n// ProtocolName is the official short name of the `eth` protocol used during\n// devp2p capability negotiation.\nconst ProtocolName = \"eth\"\n\n// ProtocolVersions are the supported versions of the `eth` protocol (first\n// is primary).\nvar ProtocolVersions = []uint{ETH66, ETH65, ETH64}\n\n// protocolLengths are the number of implemented message corresponding to\n// different protocol versions.\nvar protocolLengths = map[uint]uint64{ETH66: 17, ETH65: 17, ETH64: 17}\n\n// maxMessageSize is the maximum cap on the size of a protocol message.\nconst maxMessageSize = 10 * 1024 * 1024\n\nconst (\n\t// Protocol messages in eth/64\n\tStatusMsg          = 0x00\n\tNewBlockHashesMsg  = 0x01\n\tTransactionsMsg    = 0x02\n\tGetBlockHeadersMsg = 0x03\n\tBlockHeadersMsg    = 0x04\n\tGetBlockBodiesMsg  = 0x05\n\tBlockBodiesMsg     = 0x06\n\tNewBlockMsg        = 0x07\n\tGetNodeDataMsg     = 0x0d\n\tNodeDataMsg        = 0x0e\n\tGetReceiptsMsg     = 0x0f\n\tReceiptsMsg        = 0x10\n\n\t// Protocol messages overloaded in eth/65\n\tNewPooledTransactionHashesMsg = 0x08\n\tGetPooledTransactionsMsg      = 0x09\n\tPooledTransactionsMsg         = 0x0a\n)\n\nvar (\n\terrNoStatusMsg             = errors.New(\"no status message\")\n\terrMsgTooLarge             = errors.New(\"message too long\")\n\terrDecode                  = errors.New(\"invalid message\")\n\terrInvalidMsgCode          = errors.New(\"invalid message code\")\n\terrProtocolVersionMismatch = errors.New(\"protocol version mismatch\")\n\terrNetworkIDMismatch       = errors.New(\"network ID mismatch\")\n\terrGenesisMismatch         = errors.New(\"genesis mismatch\")\n\terrForkIDRejected          = errors.New(\"fork ID rejected\")\n)\n\n// Packet represents a p2p message in the `eth` protocol.\ntype Packet interface {\n\tName() string // Name returns a string corresponding to the message type.\n\tKind() byte   // Kind returns the message type.\n}\n\n// StatusPacket is the network packet for the status message for eth/64 and later.\ntype StatusPacket struct {\n\tProtocolVersion uint32\n\tNetworkID       uint64\n\tTD              *big.Int\n\tHead            common.Hash\n\tGenesis         common.Hash\n\tForkID          forkid.ID\n}\n\n// NewBlockHashesPacket is the network packet for the block announcements.\ntype NewBlockHashesPacket []struct {\n\tHash   common.Hash // Hash of one particular block being announced\n\tNumber uint64      // Number of one particular block being announced\n}\n\n// Unpack retrieves the block hashes and numbers from the announcement packet\n// and returns them in a split flat format that's more consistent with the\n// internal data structures.\nfunc (p *NewBlockHashesPacket) Unpack() ([]common.Hash, []uint64) {\n\tvar (\n\t\thashes  = make([]common.Hash, len(*p))\n\t\tnumbers = make([]uint64, len(*p))\n\t)\n\tfor i, body := range *p {\n\t\thashes[i], numbers[i] = body.Hash, body.Number\n\t}\n\treturn hashes, numbers\n}\n\n// TransactionsPacket is the network packet for broadcasting new transactions.\ntype TransactionsPacket []*types.Transaction\n\n// GetBlockHeadersPacket represents a block header query.\ntype GetBlockHeadersPacket struct {\n\tOrigin  HashOrNumber // Block from which to retrieve headers\n\tAmount  uint64       // Maximum number of headers to retrieve\n\tSkip    uint64       // Blocks to skip between consecutive headers\n\tReverse bool         // Query direction (false = rising towards latest, true = falling towards genesis)\n}\n\n// GetBlockHeadersPacket represents a block header query over eth/66\ntype GetBlockHeadersPacket66 struct {\n\tRequestId uint64\n\t*GetBlockHeadersPacket\n}\n\n// HashOrNumber is a combined field for specifying an origin block.\ntype HashOrNumber struct {\n\tHash   common.Hash // Block hash from which to retrieve headers (excludes Number)\n\tNumber uint64      // Block hash from which to retrieve headers (excludes Hash)\n}\n\n// EncodeRLP is a specialized encoder for HashOrNumber to encode only one of the\n// two contained union fields.\nfunc (hn *HashOrNumber) EncodeRLP(w io.Writer) error {\n\tif hn.Hash == (common.Hash{}) {\n\t\treturn rlp.Encode(w, hn.Number)\n\t}\n\tif hn.Number != 0 {\n\t\treturn fmt.Errorf(\"both origin hash (%x) and number (%d) provided\", hn.Hash, hn.Number)\n\t}\n\treturn rlp.Encode(w, hn.Hash)\n}\n\n// DecodeRLP is a specialized decoder for HashOrNumber to decode the contents\n// into either a block hash or a block number.\nfunc (hn *HashOrNumber) DecodeRLP(s *rlp.Stream) error {\n\t_, size, _ := s.Kind()\n\torigin, err := s.Raw()\n\tif err == nil {\n\t\tswitch {\n\t\tcase size == 32:\n\t\t\terr = rlp.DecodeBytes(origin, &hn.Hash)\n\t\tcase size <= 8:\n\t\t\terr = rlp.DecodeBytes(origin, &hn.Number)\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"invalid input size %d for origin\", size)\n\t\t}\n\t}\n\treturn err\n}\n\n// BlockHeadersPacket represents a block header response.\ntype BlockHeadersPacket []*types.Header\n\n// BlockHeadersPacket represents a block header response over eth/66.\ntype BlockHeadersPacket66 struct {\n\tRequestId uint64\n\tBlockHeadersPacket\n}\n\n// NewBlockPacket is the network packet for the block propagation message.\ntype NewBlockPacket struct {\n\tBlock *types.Block\n\tTD    *big.Int\n}\n\n// sanityCheck verifies that the values are reasonable, as a DoS protection\nfunc (request *NewBlockPacket) sanityCheck() error {\n\tif err := request.Block.SanityCheck(); err != nil {\n\t\treturn err\n\t}\n\t//TD at mainnet block #7753254 is 76 bits. If it becomes 100 million times\n\t// larger, it will still fit within 100 bits\n\tif tdlen := request.TD.BitLen(); tdlen > 100 {\n\t\treturn fmt.Errorf(\"too large block TD: bitlen %d\", tdlen)\n\t}\n\treturn nil\n}\n\n// GetBlockBodiesPacket represents a block body query.\ntype GetBlockBodiesPacket []common.Hash\n\n// GetBlockBodiesPacket represents a block body query over eth/66.\ntype GetBlockBodiesPacket66 struct {\n\tRequestId uint64\n\tGetBlockBodiesPacket\n}\n\n// BlockBodiesPacket is the network packet for block content distribution.\ntype BlockBodiesPacket []*BlockBody\n\n// BlockBodiesPacket is the network packet for block content distribution over eth/66.\ntype BlockBodiesPacket66 struct {\n\tRequestId uint64\n\tBlockBodiesPacket\n}\n\n// BlockBodiesRLPPacket is used for replying to block body requests, in cases\n// where we already have them RLP-encoded, and thus can avoid the decode-encode\n// roundtrip.\ntype BlockBodiesRLPPacket []rlp.RawValue\n\n// BlockBodiesRLPPacket66 is the BlockBodiesRLPPacket over eth/66\ntype BlockBodiesRLPPacket66 struct {\n\tRequestId uint64\n\tBlockBodiesRLPPacket\n}\n\n// BlockBody represents the data content of a single block.\ntype BlockBody struct {\n\tTransactions []*types.Transaction // Transactions contained within a block\n\tUncles       []*types.Header      // Uncles contained within a block\n}\n\n// Unpack retrieves the transactions and uncles from the range packet and returns\n// them in a split flat format that's more consistent with the internal data structures.\nfunc (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header) {\n\tvar (\n\t\ttxset    = make([][]*types.Transaction, len(*p))\n\t\tuncleset = make([][]*types.Header, len(*p))\n\t)\n\tfor i, body := range *p {\n\t\ttxset[i], uncleset[i] = body.Transactions, body.Uncles\n\t}\n\treturn txset, uncleset\n}\n\n// GetNodeDataPacket represents a trie node data query.\ntype GetNodeDataPacket []common.Hash\n\n// GetNodeDataPacket represents a trie node data query over eth/66.\ntype GetNodeDataPacket66 struct {\n\tRequestId uint64\n\tGetNodeDataPacket\n}\n\n// NodeDataPacket is the network packet for trie node data distribution.\ntype NodeDataPacket [][]byte\n\n// NodeDataPacket is the network packet for trie node data distribution over eth/66.\ntype NodeDataPacket66 struct {\n\tRequestId uint64\n\tNodeDataPacket\n}\n\n// GetReceiptsPacket represents a block receipts query.\ntype GetReceiptsPacket []common.Hash\n\n// GetReceiptsPacket represents a block receipts query over eth/66.\ntype GetReceiptsPacket66 struct {\n\tRequestId uint64\n\tGetReceiptsPacket\n}\n\n// ReceiptsPacket is the network packet for block receipts distribution.\ntype ReceiptsPacket [][]*types.Receipt\n\n// ReceiptsPacket is the network packet for block receipts distribution over eth/66.\ntype ReceiptsPacket66 struct {\n\tRequestId uint64\n\tReceiptsPacket\n}\n\n// ReceiptsRLPPacket is used for receipts, when we already have it encoded\ntype ReceiptsRLPPacket []rlp.RawValue\n\n// ReceiptsPacket66 is the eth-66 version of ReceiptsRLPPacket\ntype ReceiptsRLPPacket66 struct {\n\tRequestId uint64\n\tReceiptsRLPPacket\n}\n\n// NewPooledTransactionHashesPacket represents a transaction announcement packet.\ntype NewPooledTransactionHashesPacket []common.Hash\n\n// GetPooledTransactionsPacket represents a transaction query.\ntype GetPooledTransactionsPacket []common.Hash\n\ntype GetPooledTransactionsPacket66 struct {\n\tRequestId uint64\n\tGetPooledTransactionsPacket\n}\n\n// PooledTransactionsPacket is the network packet for transaction distribution.\ntype PooledTransactionsPacket []*types.Transaction\n\n// PooledTransactionsPacket is the network packet for transaction distribution over eth/66.\ntype PooledTransactionsPacket66 struct {\n\tRequestId uint64\n\tPooledTransactionsPacket\n}\n\n// PooledTransactionsPacket is the network packet for transaction distribution, used\n// in the cases we already have them in rlp-encoded form\ntype PooledTransactionsRLPPacket []rlp.RawValue\n\n// PooledTransactionsRLPPacket66 is the eth/66 form of PooledTransactionsRLPPacket\ntype PooledTransactionsRLPPacket66 struct {\n\tRequestId uint64\n\tPooledTransactionsRLPPacket\n}\n\nfunc (*StatusPacket) Name() string { return \"Status\" }\nfunc (*StatusPacket) Kind() byte   { return StatusMsg }\n\nfunc (*NewBlockHashesPacket) Name() string { return \"NewBlockHashes\" }\nfunc (*NewBlockHashesPacket) Kind() byte   { return NewBlockHashesMsg }\n\nfunc (*TransactionsPacket) Name() string { return \"Transactions\" }\nfunc (*TransactionsPacket) Kind() byte   { return TransactionsMsg }\n\nfunc (*GetBlockHeadersPacket) Name() string { return \"GetBlockHeaders\" }\nfunc (*GetBlockHeadersPacket) Kind() byte   { return GetBlockHeadersMsg }\n\nfunc (*BlockHeadersPacket) Name() string { return \"BlockHeaders\" }\nfunc (*BlockHeadersPacket) Kind() byte   { return BlockHeadersMsg }\n\nfunc (*GetBlockBodiesPacket) Name() string { return \"GetBlockBodies\" }\nfunc (*GetBlockBodiesPacket) Kind() byte   { return GetBlockBodiesMsg }\n\nfunc (*BlockBodiesPacket) Name() string { return \"BlockBodies\" }\nfunc (*BlockBodiesPacket) Kind() byte   { return BlockBodiesMsg }\n\nfunc (*NewBlockPacket) Name() string { return \"NewBlock\" }\nfunc (*NewBlockPacket) Kind() byte   { return NewBlockMsg }\n\nfunc (*GetNodeDataPacket) Name() string { return \"GetNodeData\" }\nfunc (*GetNodeDataPacket) Kind() byte   { return GetNodeDataMsg }\n\nfunc (*NodeDataPacket) Name() string { return \"NodeData\" }\nfunc (*NodeDataPacket) Kind() byte   { return NodeDataMsg }\n\nfunc (*GetReceiptsPacket) Name() string { return \"GetReceipts\" }\nfunc (*GetReceiptsPacket) Kind() byte   { return GetReceiptsMsg }\n\nfunc (*ReceiptsPacket) Name() string { return \"Receipts\" }\nfunc (*ReceiptsPacket) Kind() byte   { return ReceiptsMsg }\n\nfunc (*NewPooledTransactionHashesPacket) Name() string { return \"NewPooledTransactionHashes\" }\nfunc (*NewPooledTransactionHashesPacket) Kind() byte   { return NewPooledTransactionHashesMsg }\n\nfunc (*GetPooledTransactionsPacket) Name() string { return \"GetPooledTransactions\" }\nfunc (*GetPooledTransactionsPacket) Kind() byte   { return GetPooledTransactionsMsg }\n\nfunc (*PooledTransactionsPacket) Name() string { return \"PooledTransactions\" }\nfunc (*PooledTransactionsPacket) Kind() byte   { return PooledTransactionsMsg }\n"
  },
  {
    "path": "eth/protocols/eth/protocol_test.go",
    "content": "// Copyright 2014 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"bytes\"\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\n// Tests that the custom union field encoder and decoder works correctly.\nfunc TestGetBlockHeadersDataEncodeDecode(t *testing.T) {\n\t// Create a \"random\" hash for testing\n\tvar hash common.Hash\n\tfor i := range hash {\n\t\thash[i] = byte(i)\n\t}\n\t// Assemble some table driven tests\n\ttests := []struct {\n\t\tpacket *GetBlockHeadersPacket\n\t\tfail   bool\n\t}{\n\t\t// Providing the origin as either a hash or a number should both work\n\t\t{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}}},\n\t\t{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}}},\n\n\t\t// Providing arbitrary query field should also work\n\t\t{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}},\n\t\t{fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}},\n\n\t\t// Providing both the origin hash and origin number must fail\n\t\t{fail: true, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash, Number: 314}}},\n\t}\n\t// Iterate over each of the tests and try to encode and then decode\n\tfor i, tt := range tests {\n\t\tbytes, err := rlp.EncodeToBytes(tt.packet)\n\t\tif err != nil && !tt.fail {\n\t\t\tt.Fatalf(\"test %d: failed to encode packet: %v\", i, err)\n\t\t} else if err == nil && tt.fail {\n\t\t\tt.Fatalf(\"test %d: encode should have failed\", i)\n\t\t}\n\t\tif !tt.fail {\n\t\t\tpacket := new(GetBlockHeadersPacket)\n\t\t\tif err := rlp.DecodeBytes(bytes, packet); err != nil {\n\t\t\t\tt.Fatalf(\"test %d: failed to decode packet: %v\", i, err)\n\t\t\t}\n\t\t\tif packet.Origin.Hash != tt.packet.Origin.Hash || packet.Origin.Number != tt.packet.Origin.Number || packet.Amount != tt.packet.Amount ||\n\t\t\t\tpacket.Skip != tt.packet.Skip || packet.Reverse != tt.packet.Reverse {\n\t\t\t\tt.Fatalf(\"test %d: encode decode mismatch: have %+v, want %+v\", i, packet, tt.packet)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// TestEth66EmptyMessages tests encoding of empty eth66 messages\nfunc TestEth66EmptyMessages(t *testing.T) {\n\t// All empty messages encodes to the same format\n\twant := common.FromHex(\"c4820457c0\")\n\n\tfor i, msg := range []interface{}{\n\t\t// Headers\n\t\tGetBlockHeadersPacket66{1111, nil},\n\t\tBlockHeadersPacket66{1111, nil},\n\t\t// Bodies\n\t\tGetBlockBodiesPacket66{1111, nil},\n\t\tBlockBodiesPacket66{1111, nil},\n\t\tBlockBodiesRLPPacket66{1111, nil},\n\t\t// Node data\n\t\tGetNodeDataPacket66{1111, nil},\n\t\tNodeDataPacket66{1111, nil},\n\t\t// Receipts\n\t\tGetReceiptsPacket66{1111, nil},\n\t\tReceiptsPacket66{1111, nil},\n\t\t// Transactions\n\t\tGetPooledTransactionsPacket66{1111, nil},\n\t\tPooledTransactionsPacket66{1111, nil},\n\t\tPooledTransactionsRLPPacket66{1111, nil},\n\n\t\t// Headers\n\t\tBlockHeadersPacket66{1111, BlockHeadersPacket([]*types.Header{})},\n\t\t// Bodies\n\t\tGetBlockBodiesPacket66{1111, GetBlockBodiesPacket([]common.Hash{})},\n\t\tBlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{})},\n\t\tBlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{})},\n\t\t// Node data\n\t\tGetNodeDataPacket66{1111, GetNodeDataPacket([]common.Hash{})},\n\t\tNodeDataPacket66{1111, NodeDataPacket([][]byte{})},\n\t\t// Receipts\n\t\tGetReceiptsPacket66{1111, GetReceiptsPacket([]common.Hash{})},\n\t\tReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{})},\n\t\t// Transactions\n\t\tGetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket([]common.Hash{})},\n\t\tPooledTransactionsPacket66{1111, PooledTransactionsPacket([]*types.Transaction{})},\n\t\tPooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket([]rlp.RawValue{})},\n\t} {\n\t\tif have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) {\n\t\t\tt.Errorf(\"test %d, type %T, have\\n\\t%x\\nwant\\n\\t%x\", i, msg, have, want)\n\t\t}\n\t}\n\n}\n\n// TestEth66Messages tests the encoding of all redefined eth66 messages\nfunc TestEth66Messages(t *testing.T) {\n\n\t// Some basic structs used during testing\n\tvar (\n\t\theader       *types.Header\n\t\tblockBody    *BlockBody\n\t\tblockBodyRlp rlp.RawValue\n\t\ttxs          []*types.Transaction\n\t\ttxRlps       []rlp.RawValue\n\t\thashes       []common.Hash\n\t\treceipts     []*types.Receipt\n\t\treceiptsRlp  rlp.RawValue\n\n\t\terr error\n\t)\n\theader = &types.Header{\n\t\tDifficulty: big.NewInt(2222),\n\t\tNumber:     big.NewInt(3333),\n\t\tGasLimit:   4444,\n\t\tGasUsed:    5555,\n\t\tTime:       6666,\n\t\tExtra:      []byte{0x77, 0x88},\n\t}\n\t// Init the transactions, taken from a different test\n\t{\n\t\tfor _, hexrlp := range []string{\n\t\t\t\"f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10\",\n\t\t\t\"f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb\",\n\t\t} {\n\t\t\tvar tx *types.Transaction\n\t\t\trlpdata := common.FromHex(hexrlp)\n\t\t\tif err := rlp.DecodeBytes(rlpdata, &tx); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\ttxs = append(txs, tx)\n\t\t\ttxRlps = append(txRlps, rlpdata)\n\t\t}\n\t}\n\t// init the block body data, both object and rlp form\n\tblockBody = &BlockBody{\n\t\tTransactions: txs,\n\t\tUncles:       []*types.Header{header},\n\t}\n\tblockBodyRlp, err = rlp.EncodeToBytes(blockBody)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\thashes = []common.Hash{\n\t\tcommon.HexToHash(\"deadc0de\"),\n\t\tcommon.HexToHash(\"feedbeef\"),\n\t}\n\tbyteSlices := [][]byte{\n\t\tcommon.FromHex(\"deadc0de\"),\n\t\tcommon.FromHex(\"feedbeef\"),\n\t}\n\t// init the receipts\n\t{\n\t\treceipts = []*types.Receipt{\n\t\t\t{\n\t\t\t\tStatus:            types.ReceiptStatusFailed,\n\t\t\t\tCumulativeGasUsed: 1,\n\t\t\t\tLogs: []*types.Log{\n\t\t\t\t\t{\n\t\t\t\t\t\tAddress: common.BytesToAddress([]byte{0x11}),\n\t\t\t\t\t\tTopics:  []common.Hash{common.HexToHash(\"dead\"), common.HexToHash(\"beef\")},\n\t\t\t\t\t\tData:    []byte{0x01, 0x00, 0xff},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tTxHash:          hashes[0],\n\t\t\t\tContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}),\n\t\t\t\tGasUsed:         111111,\n\t\t\t},\n\t\t}\n\t\trlpData, err := rlp.EncodeToBytes(receipts)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\treceiptsRlp = rlpData\n\t}\n\n\tfor i, tc := range []struct {\n\t\tmessage interface{}\n\t\twant    []byte\n\t}{\n\t\t{\n\t\t\tGetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{hashes[0], 0}, 5, 5, false}},\n\t\t\tcommon.FromHex(\"e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580\"),\n\t\t},\n\t\t{\n\t\t\tGetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}},\n\t\t\tcommon.FromHex(\"ca820457c682270f050580\"),\n\t\t},\n\t\t{\n\t\t\tBlockHeadersPacket66{1111, BlockHeadersPacket{header}},\n\t\t\tcommon.FromHex(\"f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000\"),\n\t\t},\n\t\t{\n\t\t\tGetBlockBodiesPacket66{1111, GetBlockBodiesPacket(hashes)},\n\t\t\tcommon.FromHex(\"f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef\"),\n\t\t},\n\t\t{\n\t\t\tBlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{blockBody})},\n\t\t\tcommon.FromHex(\"f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000\"),\n\t\t},\n\t\t{ // Identical to non-rlp-shortcut version\n\t\t\tBlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{blockBodyRlp})},\n\t\t\tcommon.FromHex(\"f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000\"),\n\t\t},\n\t\t{\n\t\t\tGetNodeDataPacket66{1111, GetNodeDataPacket(hashes)},\n\t\t\tcommon.FromHex(\"f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef\"),\n\t\t},\n\t\t{\n\t\t\tNodeDataPacket66{1111, NodeDataPacket(byteSlices)},\n\t\t\tcommon.FromHex(\"ce820457ca84deadc0de84feedbeef\"),\n\t\t},\n\t\t{\n\t\t\tGetReceiptsPacket66{1111, GetReceiptsPacket(hashes)},\n\t\t\tcommon.FromHex(\"f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef\"),\n\t\t},\n\t\t{\n\t\t\tReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{receipts})},\n\t\t\tcommon.FromHex(\"f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff\"),\n\t\t},\n\t\t{\n\t\t\tReceiptsRLPPacket66{1111, ReceiptsRLPPacket([]rlp.RawValue{receiptsRlp})},\n\t\t\tcommon.FromHex(\"f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff\"),\n\t\t},\n\t\t{\n\t\t\tGetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket(hashes)},\n\t\t\tcommon.FromHex(\"f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef\"),\n\t\t},\n\t\t{\n\t\t\tPooledTransactionsPacket66{1111, PooledTransactionsPacket(txs)},\n\t\t\tcommon.FromHex(\"f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb\"),\n\t\t},\n\t\t{\n\t\t\tPooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket(txRlps)},\n\t\t\tcommon.FromHex(\"f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb\"),\n\t\t},\n\t} {\n\t\tif have, _ := rlp.EncodeToBytes(tc.message); !bytes.Equal(have, tc.want) {\n\t\t\tt.Errorf(\"test %d, type %T, have\\n\\t%x\\nwant\\n\\t%x\", i, tc.message, have, tc.want)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "eth/protocols/snap/discovery.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage snap\n\nimport (\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\n// enrEntry is the ENR entry which advertises `snap` protocol on the discovery.\ntype enrEntry struct {\n\t// Ignore additional fields (for forward compatibility).\n\tRest []rlp.RawValue `rlp:\"tail\"`\n}\n\n// ENRKey implements enr.Entry.\nfunc (e enrEntry) ENRKey() string {\n\treturn \"snap\"\n}\n"
  },
  {
    "path": "eth/protocols/snap/handler.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage snap\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/metrics\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/enr\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\nconst (\n\t// softResponseLimit is the target maximum size of replies to data retrievals.\n\tsoftResponseLimit = 2 * 1024 * 1024\n\n\t// maxCodeLookups is the maximum number of bytecodes to serve. This number is\n\t// there to limit the number of disk lookups.\n\tmaxCodeLookups = 1024\n\n\t// stateLookupSlack defines the ratio by how much a state response can exceed\n\t// the requested limit in order to try and avoid breaking up contracts into\n\t// multiple packages and proving them.\n\tstateLookupSlack = 0.1\n\n\t// maxTrieNodeLookups is the maximum number of state trie nodes to serve. This\n\t// number is there to limit the number of disk lookups.\n\tmaxTrieNodeLookups = 1024\n\n\t// maxTrieNodeTimeSpent is the maximum time we should spend on looking up trie nodes.\n\t// If we spend too much time, then it's a fairly high chance of timing out\n\t// at the remote side, which means all the work is in vain.\n\tmaxTrieNodeTimeSpent = 5 * time.Second\n)\n\n// Handler is a callback to invoke from an outside runner after the boilerplate\n// exchanges have passed.\ntype Handler func(peer *Peer) error\n\n// Backend defines the data retrieval methods to serve remote requests and the\n// callback methods to invoke on remote deliveries.\ntype Backend interface {\n\t// Chain retrieves the blockchain object to serve data.\n\tChain() *core.BlockChain\n\n\t// RunPeer is invoked when a peer joins on the `eth` protocol. The handler\n\t// should do any peer maintenance work, handshakes and validations. If all\n\t// is passed, control should be given back to the `handler` to process the\n\t// inbound messages going forward.\n\tRunPeer(peer *Peer, handler Handler) error\n\n\t// PeerInfo retrieves all known `snap` information about a peer.\n\tPeerInfo(id enode.ID) interface{}\n\n\t// Handle is a callback to be invoked when a data packet is received from\n\t// the remote peer. Only packets not consumed by the protocol handler will\n\t// be forwarded to the backend.\n\tHandle(peer *Peer, packet Packet) error\n}\n\n// MakeProtocols constructs the P2P protocol definitions for `snap`.\nfunc MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol {\n\tprotocols := make([]p2p.Protocol, len(ProtocolVersions))\n\tfor i, version := range ProtocolVersions {\n\t\tversion := version // Closure\n\n\t\tprotocols[i] = p2p.Protocol{\n\t\t\tName:    ProtocolName,\n\t\t\tVersion: version,\n\t\t\tLength:  protocolLengths[version],\n\t\t\tRun: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {\n\t\t\t\treturn backend.RunPeer(newPeer(version, p, rw), func(peer *Peer) error {\n\t\t\t\t\treturn handle(backend, peer)\n\t\t\t\t})\n\t\t\t},\n\t\t\tNodeInfo: func() interface{} {\n\t\t\t\treturn nodeInfo(backend.Chain())\n\t\t\t},\n\t\t\tPeerInfo: func(id enode.ID) interface{} {\n\t\t\t\treturn backend.PeerInfo(id)\n\t\t\t},\n\t\t\tAttributes:     []enr.Entry{&enrEntry{}},\n\t\t\tDialCandidates: dnsdisc,\n\t\t}\n\t}\n\treturn protocols\n}\n\n// handle is the callback invoked to manage the life cycle of a `snap` peer.\n// When this function terminates, the peer is disconnected.\nfunc handle(backend Backend, peer *Peer) error {\n\tfor {\n\t\tif err := handleMessage(backend, peer); err != nil {\n\t\t\tpeer.Log().Debug(\"Message handling failed in `snap`\", \"err\", err)\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n// handleMessage is invoked whenever an inbound message is received from a\n// remote peer on the `snap` protocol. The remote connection is torn down upon\n// returning any error.\nfunc handleMessage(backend Backend, peer *Peer) error {\n\t// Read the next message from the remote peer, and ensure it's fully consumed\n\tmsg, err := peer.rw.ReadMsg()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif msg.Size > maxMessageSize {\n\t\treturn fmt.Errorf(\"%w: %v > %v\", errMsgTooLarge, msg.Size, maxMessageSize)\n\t}\n\tdefer msg.Discard()\n\tstart := time.Now()\n\t// Track the emount of time it takes to serve the request and run the handler\n\tif metrics.Enabled {\n\t\th := fmt.Sprintf(\"%s/%s/%d/%#02x\", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)\n\t\tdefer func(start time.Time) {\n\t\t\tsampler := func() metrics.Sample {\n\t\t\t\treturn metrics.ResettingSample(\n\t\t\t\t\tmetrics.NewExpDecaySample(1028, 0.015),\n\t\t\t\t)\n\t\t\t}\n\t\t\tmetrics.GetOrRegisterHistogramLazy(h, nil, sampler).Update(time.Since(start).Microseconds())\n\t\t}(start)\n\t}\n\t// Handle the message depending on its contents\n\tswitch {\n\tcase msg.Code == GetAccountRangeMsg:\n\t\t// Decode the account retrieval request\n\t\tvar req GetAccountRangePacket\n\t\tif err := msg.Decode(&req); err != nil {\n\t\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t\t}\n\t\tif req.Bytes > softResponseLimit {\n\t\t\treq.Bytes = softResponseLimit\n\t\t}\n\t\t// Retrieve the requested state and bail out if non existent\n\t\ttr, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())\n\t\tif err != nil {\n\t\t\treturn p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})\n\t\t}\n\t\tit, err := backend.Chain().Snapshots().AccountIterator(req.Root, req.Origin)\n\t\tif err != nil {\n\t\t\treturn p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})\n\t\t}\n\t\t// Iterate over the requested range and pile accounts up\n\t\tvar (\n\t\t\taccounts []*AccountData\n\t\t\tsize     uint64\n\t\t\tlast     common.Hash\n\t\t)\n\t\tfor it.Next() && size < req.Bytes {\n\t\t\thash, account := it.Hash(), common.CopyBytes(it.Account())\n\n\t\t\t// Track the returned interval for the Merkle proofs\n\t\t\tlast = hash\n\n\t\t\t// Assemble the reply item\n\t\t\tsize += uint64(common.HashLength + len(account))\n\t\t\taccounts = append(accounts, &AccountData{\n\t\t\t\tHash: hash,\n\t\t\t\tBody: account,\n\t\t\t})\n\t\t\t// If we've exceeded the request threshold, abort\n\t\t\tif bytes.Compare(hash[:], req.Limit[:]) >= 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tit.Release()\n\n\t\t// Generate the Merkle proofs for the first and last account\n\t\tproof := light.NewNodeSet()\n\t\tif err := tr.Prove(req.Origin[:], 0, proof); err != nil {\n\t\t\tlog.Warn(\"Failed to prove account range\", \"origin\", req.Origin, \"err\", err)\n\t\t\treturn p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})\n\t\t}\n\t\tif last != (common.Hash{}) {\n\t\t\tif err := tr.Prove(last[:], 0, proof); err != nil {\n\t\t\t\tlog.Warn(\"Failed to prove account range\", \"last\", last, \"err\", err)\n\t\t\t\treturn p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID})\n\t\t\t}\n\t\t}\n\t\tvar proofs [][]byte\n\t\tfor _, blob := range proof.NodeList() {\n\t\t\tproofs = append(proofs, blob)\n\t\t}\n\t\t// Send back anything accumulated\n\t\treturn p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{\n\t\t\tID:       req.ID,\n\t\t\tAccounts: accounts,\n\t\t\tProof:    proofs,\n\t\t})\n\n\tcase msg.Code == AccountRangeMsg:\n\t\t// A range of accounts arrived to one of our previous requests\n\t\tres := new(AccountRangePacket)\n\t\tif err := msg.Decode(res); err != nil {\n\t\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t\t}\n\t\t// Ensure the range is monotonically increasing\n\t\tfor i := 1; i < len(res.Accounts); i++ {\n\t\t\tif bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 {\n\t\t\t\treturn fmt.Errorf(\"accounts not monotonically increasing: #%d [%x] vs #%d [%x]\", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:])\n\t\t\t}\n\t\t}\n\t\treturn backend.Handle(peer, res)\n\n\tcase msg.Code == GetStorageRangesMsg:\n\t\t// Decode the storage retrieval request\n\t\tvar req GetStorageRangesPacket\n\t\tif err := msg.Decode(&req); err != nil {\n\t\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t\t}\n\t\tif req.Bytes > softResponseLimit {\n\t\t\treq.Bytes = softResponseLimit\n\t\t}\n\t\t// TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set?\n\t\t// TODO(karalabe):   - Logging locally is not ideal as remote faulst annoy the local user\n\t\t// TODO(karalabe):   - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional)\n\n\t\t// Calculate the hard limit at which to abort, even if mid storage trie\n\t\thardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack))\n\n\t\t// Retrieve storage ranges until the packet limit is reached\n\t\tvar (\n\t\t\tslots  [][]*StorageData\n\t\t\tproofs [][]byte\n\t\t\tsize   uint64\n\t\t)\n\t\tfor _, account := range req.Accounts {\n\t\t\t// If we've exceeded the requested data limit, abort without opening\n\t\t\t// a new storage range (that we'd need to prove due to exceeded size)\n\t\t\tif size >= req.Bytes {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// The first account might start from a different origin and end sooner\n\t\t\tvar origin common.Hash\n\t\t\tif len(req.Origin) > 0 {\n\t\t\t\torigin, req.Origin = common.BytesToHash(req.Origin), nil\n\t\t\t}\n\t\t\tvar limit = common.HexToHash(\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\")\n\t\t\tif len(req.Limit) > 0 {\n\t\t\t\tlimit, req.Limit = common.BytesToHash(req.Limit), nil\n\t\t\t}\n\t\t\t// Retrieve the requested state and bail out if non existent\n\t\t\tit, err := backend.Chain().Snapshots().StorageIterator(req.Root, account, origin)\n\t\t\tif err != nil {\n\t\t\t\treturn p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})\n\t\t\t}\n\t\t\t// Iterate over the requested range and pile slots up\n\t\t\tvar (\n\t\t\t\tstorage []*StorageData\n\t\t\t\tlast    common.Hash\n\t\t\t\tabort   bool\n\t\t\t)\n\t\t\tfor it.Next() {\n\t\t\t\tif size >= hardLimit {\n\t\t\t\t\tabort = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\thash, slot := it.Hash(), common.CopyBytes(it.Slot())\n\n\t\t\t\t// Track the returned interval for the Merkle proofs\n\t\t\t\tlast = hash\n\n\t\t\t\t// Assemble the reply item\n\t\t\t\tsize += uint64(common.HashLength + len(slot))\n\t\t\t\tstorage = append(storage, &StorageData{\n\t\t\t\t\tHash: hash,\n\t\t\t\t\tBody: slot,\n\t\t\t\t})\n\t\t\t\t// If we've exceeded the request threshold, abort\n\t\t\t\tif bytes.Compare(hash[:], limit[:]) >= 0 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tslots = append(slots, storage)\n\t\t\tit.Release()\n\n\t\t\t// Generate the Merkle proofs for the first and last storage slot, but\n\t\t\t// only if the response was capped. If the entire storage trie included\n\t\t\t// in the response, no need for any proofs.\n\t\t\tif origin != (common.Hash{}) || abort {\n\t\t\t\t// Request started at a non-zero hash or was capped prematurely, add\n\t\t\t\t// the endpoint Merkle proofs\n\t\t\t\taccTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})\n\t\t\t\t}\n\t\t\t\tvar acc state.Account\n\t\t\t\tif err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil {\n\t\t\t\t\treturn p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})\n\t\t\t\t}\n\t\t\t\tstTrie, err := trie.New(acc.Root, backend.Chain().StateCache().TrieDB())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})\n\t\t\t\t}\n\t\t\t\tproof := light.NewNodeSet()\n\t\t\t\tif err := stTrie.Prove(origin[:], 0, proof); err != nil {\n\t\t\t\t\tlog.Warn(\"Failed to prove storage range\", \"origin\", req.Origin, \"err\", err)\n\t\t\t\t\treturn p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})\n\t\t\t\t}\n\t\t\t\tif last != (common.Hash{}) {\n\t\t\t\t\tif err := stTrie.Prove(last[:], 0, proof); err != nil {\n\t\t\t\t\t\tlog.Warn(\"Failed to prove storage range\", \"last\", last, \"err\", err)\n\t\t\t\t\t\treturn p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor _, blob := range proof.NodeList() {\n\t\t\t\t\tproofs = append(proofs, blob)\n\t\t\t\t}\n\t\t\t\t// Proof terminates the reply as proofs are only added if a node\n\t\t\t\t// refuses to serve more data (exception when a contract fetch is\n\t\t\t\t// finishing, but that's that).\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// Send back anything accumulated\n\t\treturn p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{\n\t\t\tID:    req.ID,\n\t\t\tSlots: slots,\n\t\t\tProof: proofs,\n\t\t})\n\n\tcase msg.Code == StorageRangesMsg:\n\t\t// A range of storage slots arrived to one of our previous requests\n\t\tres := new(StorageRangesPacket)\n\t\tif err := msg.Decode(res); err != nil {\n\t\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t\t}\n\t\t// Ensure the ranges ae monotonically increasing\n\t\tfor i, slots := range res.Slots {\n\t\t\tfor j := 1; j < len(slots); j++ {\n\t\t\t\tif bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 {\n\t\t\t\t\treturn fmt.Errorf(\"storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]\", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn backend.Handle(peer, res)\n\n\tcase msg.Code == GetByteCodesMsg:\n\t\t// Decode bytecode retrieval request\n\t\tvar req GetByteCodesPacket\n\t\tif err := msg.Decode(&req); err != nil {\n\t\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t\t}\n\t\tif req.Bytes > softResponseLimit {\n\t\t\treq.Bytes = softResponseLimit\n\t\t}\n\t\tif len(req.Hashes) > maxCodeLookups {\n\t\t\treq.Hashes = req.Hashes[:maxCodeLookups]\n\t\t}\n\t\t// Retrieve bytecodes until the packet size limit is reached\n\t\tvar (\n\t\t\tcodes [][]byte\n\t\t\tbytes uint64\n\t\t)\n\t\tfor _, hash := range req.Hashes {\n\t\t\tif hash == emptyCode {\n\t\t\t\t// Peers should not request the empty code, but if they do, at\n\t\t\t\t// least sent them back a correct response without db lookups\n\t\t\t\tcodes = append(codes, []byte{})\n\t\t\t} else if blob, err := backend.Chain().ContractCode(hash); err == nil {\n\t\t\t\tcodes = append(codes, blob)\n\t\t\t\tbytes += uint64(len(blob))\n\t\t\t}\n\t\t\tif bytes > req.Bytes {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// Send back anything accumulated\n\t\treturn p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{\n\t\t\tID:    req.ID,\n\t\t\tCodes: codes,\n\t\t})\n\n\tcase msg.Code == ByteCodesMsg:\n\t\t// A batch of byte codes arrived to one of our previous requests\n\t\tres := new(ByteCodesPacket)\n\t\tif err := msg.Decode(res); err != nil {\n\t\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t\t}\n\t\treturn backend.Handle(peer, res)\n\n\tcase msg.Code == GetTrieNodesMsg:\n\t\t// Decode trie node retrieval request\n\t\tvar req GetTrieNodesPacket\n\t\tif err := msg.Decode(&req); err != nil {\n\t\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t\t}\n\t\tif req.Bytes > softResponseLimit {\n\t\t\treq.Bytes = softResponseLimit\n\t\t}\n\t\t// Make sure we have the state associated with the request\n\t\ttriedb := backend.Chain().StateCache().TrieDB()\n\n\t\taccTrie, err := trie.NewSecure(req.Root, triedb)\n\t\tif err != nil {\n\t\t\t// We don't have the requested state available, bail out\n\t\t\treturn p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})\n\t\t}\n\t\tsnap := backend.Chain().Snapshots().Snapshot(req.Root)\n\t\tif snap == nil {\n\t\t\t// We don't have the requested state snapshotted yet, bail out.\n\t\t\t// In reality we could still serve using the account and storage\n\t\t\t// tries only, but let's protect the node a bit while it's doing\n\t\t\t// snapshot generation.\n\t\t\treturn p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID})\n\t\t}\n\t\t// Retrieve trie nodes until the packet size limit is reached\n\t\tvar (\n\t\t\tnodes [][]byte\n\t\t\tbytes uint64\n\t\t\tloads int // Trie hash expansions to cound database reads\n\t\t)\n\t\tfor _, pathset := range req.Paths {\n\t\t\tswitch len(pathset) {\n\t\t\tcase 0:\n\t\t\t\t// Ensure we penalize invalid requests\n\t\t\t\treturn fmt.Errorf(\"%w: zero-item pathset requested\", errBadRequest)\n\n\t\t\tcase 1:\n\t\t\t\t// If we're only retrieving an account trie node, fetch it directly\n\t\t\t\tblob, resolved, err := accTrie.TryGetNode(pathset[0])\n\t\t\t\tloads += resolved // always account database reads, even for failures\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tnodes = append(nodes, blob)\n\t\t\t\tbytes += uint64(len(blob))\n\n\t\t\tdefault:\n\t\t\t\t// Storage slots requested, open the storage trie and retrieve from there\n\t\t\t\taccount, err := snap.Account(common.BytesToHash(pathset[0]))\n\t\t\t\tloads++ // always account database reads, even for failures\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tstTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb)\n\t\t\t\tloads++ // always account database reads, even for failures\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tfor _, path := range pathset[1:] {\n\t\t\t\t\tblob, resolved, err := stTrie.TryGetNode(path)\n\t\t\t\t\tloads += resolved // always account database reads, even for failures\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tnodes = append(nodes, blob)\n\t\t\t\t\tbytes += uint64(len(blob))\n\n\t\t\t\t\t// Sanity check limits to avoid DoS on the store trie loads\n\t\t\t\t\tif bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Abort request processing if we've exceeded our limits\n\t\t\tif bytes > req.Bytes || loads > maxTrieNodeLookups || time.Since(start) > maxTrieNodeTimeSpent {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// Send back anything accumulated\n\t\treturn p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{\n\t\t\tID:    req.ID,\n\t\t\tNodes: nodes,\n\t\t})\n\n\tcase msg.Code == TrieNodesMsg:\n\t\t// A batch of trie nodes arrived to one of our previous requests\n\t\tres := new(TrieNodesPacket)\n\t\tif err := msg.Decode(res); err != nil {\n\t\t\treturn fmt.Errorf(\"%w: message %v: %v\", errDecode, msg, err)\n\t\t}\n\t\treturn backend.Handle(peer, res)\n\n\tdefault:\n\t\treturn fmt.Errorf(\"%w: %v\", errInvalidMsgCode, msg.Code)\n\t}\n}\n\n// NodeInfo represents a short summary of the `snap` sub-protocol metadata\n// known about the host peer.\ntype NodeInfo struct{}\n\n// nodeInfo retrieves some `snap` protocol metadata about the running host node.\nfunc nodeInfo(chain *core.BlockChain) *NodeInfo {\n\treturn &NodeInfo{}\n}\n"
  },
  {
    "path": "eth/protocols/snap/peer.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage snap\n\nimport (\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n)\n\n// Peer is a collection of relevant information we have about a `snap` peer.\ntype Peer struct {\n\tid string // Unique ID for the peer, cached\n\n\t*p2p.Peer                   // The embedded P2P package peer\n\trw        p2p.MsgReadWriter // Input/output streams for snap\n\tversion   uint              // Protocol version negotiated\n\n\tlogger log.Logger // Contextual logger with the peer id injected\n}\n\n// newPeer create a wrapper for a network connection and negotiated  protocol\n// version.\nfunc newPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer {\n\tid := p.ID().String()\n\treturn &Peer{\n\t\tid:      id,\n\t\tPeer:    p,\n\t\trw:      rw,\n\t\tversion: version,\n\t\tlogger:  log.New(\"peer\", id[:8]),\n\t}\n}\n\n// ID retrieves the peer's unique identifier.\nfunc (p *Peer) ID() string {\n\treturn p.id\n}\n\n// Version retrieves the peer's negoatiated `snap` protocol version.\nfunc (p *Peer) Version() uint {\n\treturn p.version\n}\n\n// Log overrides the P2P logget with the higher level one containing only the id.\nfunc (p *Peer) Log() log.Logger {\n\treturn p.logger\n}\n\n// RequestAccountRange fetches a batch of accounts rooted in a specific account\n// trie, starting with the origin.\nfunc (p *Peer) RequestAccountRange(id uint64, root common.Hash, origin, limit common.Hash, bytes uint64) error {\n\tp.logger.Trace(\"Fetching range of accounts\", \"reqid\", id, \"root\", root, \"origin\", origin, \"limit\", limit, \"bytes\", common.StorageSize(bytes))\n\treturn p2p.Send(p.rw, GetAccountRangeMsg, &GetAccountRangePacket{\n\t\tID:     id,\n\t\tRoot:   root,\n\t\tOrigin: origin,\n\t\tLimit:  limit,\n\t\tBytes:  bytes,\n\t})\n}\n\n// RequestStorageRange fetches a batch of storage slots belonging to one or more\n// accounts. If slots from only one accout is requested, an origin marker may also\n// be used to retrieve from there.\nfunc (p *Peer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {\n\tif len(accounts) == 1 && origin != nil {\n\t\tp.logger.Trace(\"Fetching range of large storage slots\", \"reqid\", id, \"root\", root, \"account\", accounts[0], \"origin\", common.BytesToHash(origin), \"limit\", common.BytesToHash(limit), \"bytes\", common.StorageSize(bytes))\n\t} else {\n\t\tp.logger.Trace(\"Fetching ranges of small storage slots\", \"reqid\", id, \"root\", root, \"accounts\", len(accounts), \"first\", accounts[0], \"bytes\", common.StorageSize(bytes))\n\t}\n\treturn p2p.Send(p.rw, GetStorageRangesMsg, &GetStorageRangesPacket{\n\t\tID:       id,\n\t\tRoot:     root,\n\t\tAccounts: accounts,\n\t\tOrigin:   origin,\n\t\tLimit:    limit,\n\t\tBytes:    bytes,\n\t})\n}\n\n// RequestByteCodes fetches a batch of bytecodes by hash.\nfunc (p *Peer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {\n\tp.logger.Trace(\"Fetching set of byte codes\", \"reqid\", id, \"hashes\", len(hashes), \"bytes\", common.StorageSize(bytes))\n\treturn p2p.Send(p.rw, GetByteCodesMsg, &GetByteCodesPacket{\n\t\tID:     id,\n\t\tHashes: hashes,\n\t\tBytes:  bytes,\n\t})\n}\n\n// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in\n// a specificstate trie.\nfunc (p *Peer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {\n\tp.logger.Trace(\"Fetching set of trie nodes\", \"reqid\", id, \"root\", root, \"pathsets\", len(paths), \"bytes\", common.StorageSize(bytes))\n\treturn p2p.Send(p.rw, GetTrieNodesMsg, &GetTrieNodesPacket{\n\t\tID:    id,\n\t\tRoot:  root,\n\t\tPaths: paths,\n\t\tBytes: bytes,\n\t})\n}\n"
  },
  {
    "path": "eth/protocols/snap/protocol.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage snap\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/state/snapshot\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\n// Constants to match up protocol versions and messages\nconst (\n\tsnap1 = 1\n)\n\n// ProtocolName is the official short name of the `snap` protocol used during\n// devp2p capability negotiation.\nconst ProtocolName = \"snap\"\n\n// ProtocolVersions are the supported versions of the `snap` protocol (first\n// is primary).\nvar ProtocolVersions = []uint{snap1}\n\n// protocolLengths are the number of implemented message corresponding to\n// different protocol versions.\nvar protocolLengths = map[uint]uint64{snap1: 8}\n\n// maxMessageSize is the maximum cap on the size of a protocol message.\nconst maxMessageSize = 10 * 1024 * 1024\n\nconst (\n\tGetAccountRangeMsg  = 0x00\n\tAccountRangeMsg     = 0x01\n\tGetStorageRangesMsg = 0x02\n\tStorageRangesMsg    = 0x03\n\tGetByteCodesMsg     = 0x04\n\tByteCodesMsg        = 0x05\n\tGetTrieNodesMsg     = 0x06\n\tTrieNodesMsg        = 0x07\n)\n\nvar (\n\terrMsgTooLarge    = errors.New(\"message too long\")\n\terrDecode         = errors.New(\"invalid message\")\n\terrInvalidMsgCode = errors.New(\"invalid message code\")\n\terrBadRequest     = errors.New(\"bad request\")\n)\n\n// Packet represents a p2p message in the `snap` protocol.\ntype Packet interface {\n\tName() string // Name returns a string corresponding to the message type.\n\tKind() byte   // Kind returns the message type.\n}\n\n// GetAccountRangePacket represents an account query.\ntype GetAccountRangePacket struct {\n\tID     uint64      // Request ID to match up responses with\n\tRoot   common.Hash // Root hash of the account trie to serve\n\tOrigin common.Hash // Hash of the first account to retrieve\n\tLimit  common.Hash // Hash of the last account to retrieve\n\tBytes  uint64      // Soft limit at which to stop returning data\n}\n\n// AccountRangePacket represents an account query response.\ntype AccountRangePacket struct {\n\tID       uint64         // ID of the request this is a response for\n\tAccounts []*AccountData // List of consecutive accounts from the trie\n\tProof    [][]byte       // List of trie nodes proving the account range\n}\n\n// AccountData represents a single account in a query response.\ntype AccountData struct {\n\tHash common.Hash  // Hash of the account\n\tBody rlp.RawValue // Account body in slim format\n}\n\n// Unpack retrieves the accounts from the range packet and converts from slim\n// wire representation to consensus format. The returned data is RLP encoded\n// since it's expected to be serialized to disk without further interpretation.\n//\n// Note, this method does a round of RLP decoding and reencoding, so only use it\n// once and cache the results if need be. Ideally discard the packet afterwards\n// to not double the memory use.\nfunc (p *AccountRangePacket) Unpack() ([]common.Hash, [][]byte, error) {\n\tvar (\n\t\thashes   = make([]common.Hash, len(p.Accounts))\n\t\taccounts = make([][]byte, len(p.Accounts))\n\t)\n\tfor i, acc := range p.Accounts {\n\t\tval, err := snapshot.FullAccountRLP(acc.Body)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"invalid account %x: %v\", acc.Body, err)\n\t\t}\n\t\thashes[i], accounts[i] = acc.Hash, val\n\t}\n\treturn hashes, accounts, nil\n}\n\n// GetStorageRangesPacket represents an storage slot query.\ntype GetStorageRangesPacket struct {\n\tID       uint64        // Request ID to match up responses with\n\tRoot     common.Hash   // Root hash of the account trie to serve\n\tAccounts []common.Hash // Account hashes of the storage tries to serve\n\tOrigin   []byte        // Hash of the first storage slot to retrieve (large contract mode)\n\tLimit    []byte        // Hash of the last storage slot to retrieve (large contract mode)\n\tBytes    uint64        // Soft limit at which to stop returning data\n}\n\n// StorageRangesPacket represents a storage slot query response.\ntype StorageRangesPacket struct {\n\tID    uint64           // ID of the request this is a response for\n\tSlots [][]*StorageData // Lists of consecutive storage slots for the requested accounts\n\tProof [][]byte         // Merkle proofs for the *last* slot range, if it's incomplete\n}\n\n// StorageData represents a single storage slot in a query response.\ntype StorageData struct {\n\tHash common.Hash // Hash of the storage slot\n\tBody []byte      // Data content of the slot\n}\n\n// Unpack retrieves the storage slots from the range packet and returns them in\n// a split flat format that's more consistent with the internal data structures.\nfunc (p *StorageRangesPacket) Unpack() ([][]common.Hash, [][][]byte) {\n\tvar (\n\t\thashset = make([][]common.Hash, len(p.Slots))\n\t\tslotset = make([][][]byte, len(p.Slots))\n\t)\n\tfor i, slots := range p.Slots {\n\t\thashset[i] = make([]common.Hash, len(slots))\n\t\tslotset[i] = make([][]byte, len(slots))\n\t\tfor j, slot := range slots {\n\t\t\thashset[i][j] = slot.Hash\n\t\t\tslotset[i][j] = slot.Body\n\t\t}\n\t}\n\treturn hashset, slotset\n}\n\n// GetByteCodesPacket represents a contract bytecode query.\ntype GetByteCodesPacket struct {\n\tID     uint64        // Request ID to match up responses with\n\tHashes []common.Hash // Code hashes to retrieve the code for\n\tBytes  uint64        // Soft limit at which to stop returning data\n}\n\n// ByteCodesPacket represents a contract bytecode query response.\ntype ByteCodesPacket struct {\n\tID    uint64   // ID of the request this is a response for\n\tCodes [][]byte // Requested contract bytecodes\n}\n\n// GetTrieNodesPacket represents a state trie node query.\ntype GetTrieNodesPacket struct {\n\tID    uint64            // Request ID to match up responses with\n\tRoot  common.Hash       // Root hash of the account trie to serve\n\tPaths []TrieNodePathSet // Trie node hashes to retrieve the nodes for\n\tBytes uint64            // Soft limit at which to stop returning data\n}\n\n// TrieNodePathSet is a list of trie node paths to retrieve. A naive way to\n// represent trie nodes would be a simple list of `account || storage` path\n// segments concatenated, but that would be very wasteful on the network.\n//\n// Instead, this array special cases the first element as the path in the\n// account trie and the remaining elements as paths in the storage trie. To\n// address an account node, the slice should have a length of 1 consisting\n// of only the account path. There's no need to be able to address both an\n// account node and a storage node in the same request as it cannot happen\n// that a slot is accessed before the account path is fully expanded.\ntype TrieNodePathSet [][]byte\n\n// TrieNodesPacket represents a state trie node query response.\ntype TrieNodesPacket struct {\n\tID    uint64   // ID of the request this is a response for\n\tNodes [][]byte // Requested state trie nodes\n}\n\nfunc (*GetAccountRangePacket) Name() string { return \"GetAccountRange\" }\nfunc (*GetAccountRangePacket) Kind() byte   { return GetAccountRangeMsg }\n\nfunc (*AccountRangePacket) Name() string { return \"AccountRange\" }\nfunc (*AccountRangePacket) Kind() byte   { return AccountRangeMsg }\n\nfunc (*GetStorageRangesPacket) Name() string { return \"GetStorageRanges\" }\nfunc (*GetStorageRangesPacket) Kind() byte   { return GetStorageRangesMsg }\n\nfunc (*StorageRangesPacket) Name() string { return \"StorageRanges\" }\nfunc (*StorageRangesPacket) Kind() byte   { return StorageRangesMsg }\n\nfunc (*GetByteCodesPacket) Name() string { return \"GetByteCodes\" }\nfunc (*GetByteCodesPacket) Kind() byte   { return GetByteCodesMsg }\n\nfunc (*ByteCodesPacket) Name() string { return \"ByteCodes\" }\nfunc (*ByteCodesPacket) Kind() byte   { return ByteCodesMsg }\n\nfunc (*GetTrieNodesPacket) Name() string { return \"GetTrieNodes\" }\nfunc (*GetTrieNodesPacket) Kind() byte   { return GetTrieNodesMsg }\n\nfunc (*TrieNodesPacket) Name() string { return \"TrieNodes\" }\nfunc (*TrieNodesPacket) Kind() byte   { return TrieNodesMsg }\n"
  },
  {
    "path": "eth/protocols/snap/sync.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage snap\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"math/rand\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n\t\"golang.org/x/crypto/sha3\"\n)\n\nvar (\n\t// emptyRoot is the known root hash of an empty trie.\n\temptyRoot = common.HexToHash(\"56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\")\n\n\t// emptyCode is the known hash of the empty EVM bytecode.\n\temptyCode = crypto.Keccak256Hash(nil)\n)\n\nconst (\n\t// maxRequestSize is the maximum number of bytes to request from a remote peer.\n\tmaxRequestSize = 512 * 1024\n\n\t// maxStorageSetRequestCountis th maximum number of contracts to request the\n\t// storage of in a single query. If this number is too low, we're not filling\n\t// responses fully and waste round trip times. If it's too high, we're capping\n\t// responses and waste bandwidth.\n\tmaxStorageSetRequestCount = maxRequestSize / 1024\n\n\t// maxCodeRequestCount is the maximum number of bytecode blobs to request in a\n\t// single query. If this number is too low, we're not filling responses fully\n\t// and waste round trip times. If it's too high, we're capping responses and\n\t// waste bandwidth.\n\t//\n\t// Depoyed bytecodes are currently capped at 24KB, so the minimum request\n\t// size should be maxRequestSize / 24K. Assuming that most contracts do not\n\t// come close to that, requesting 4x should be a good approximation.\n\tmaxCodeRequestCount = maxRequestSize / (24 * 1024) * 4\n\n\t// maxTrieRequestCount is the maximum number of trie node blobs to request in\n\t// a single query. If this number is too low, we're not filling responses fully\n\t// and waste round trip times. If it's too high, we're capping responses and\n\t// waste bandwidth.\n\tmaxTrieRequestCount = 512\n\n\t// accountConcurrency is the number of chunks to split the account trie into\n\t// to allow concurrent retrievals.\n\taccountConcurrency = 16\n\n\t// storageConcurrency is the number of chunks to split the a large contract\n\t// storage trie into to allow concurrent retrievals.\n\tstorageConcurrency = 16\n)\n\nvar (\n\t// requestTimeout is the maximum time a peer is allowed to spend on serving\n\t// a single network request.\n\trequestTimeout = 15 * time.Second // TODO(karalabe): Make it dynamic ala fast-sync?\n)\n\n// ErrCancelled is returned from snap syncing if the operation was prematurely\n// terminated.\nvar ErrCancelled = errors.New(\"sync cancelled\")\n\n// accountRequest tracks a pending account range request to ensure responses are\n// to actual requests and to validate any security constraints.\n//\n// Concurrency note: account requests and responses are handled concurrently from\n// the main runloop to allow Merkle proof verifications on the peer's thread and\n// to drop on invalid response. The request struct must contain all the data to\n// construct the response without accessing runloop internals (i.e. task). That\n// is only included to allow the runloop to match a response to the task being\n// synced without having yet another set of maps.\ntype accountRequest struct {\n\tpeer string // Peer to which this request is assigned\n\tid   uint64 // Request ID of this request\n\n\tcancel  chan struct{} // Channel to track sync cancellation\n\ttimeout *time.Timer   // Timer to track delivery timeout\n\tstale   chan struct{} // Channel to signal the request was dropped\n\n\torigin common.Hash // First account requested to allow continuation checks\n\tlimit  common.Hash // Last account requested to allow non-overlapping chunking\n\n\ttask *accountTask // Task which this request is filling (only access fields through the runloop!!)\n}\n\n// accountResponse is an already Merkle-verified remote response to an account\n// range request. It contains the subtrie for the requested account range and\n// the database that's going to be filled with the internal nodes on commit.\ntype accountResponse struct {\n\ttask *accountTask // Task which this request is filling\n\n\thashes   []common.Hash    // Account hashes in the returned range\n\taccounts []*state.Account // Expanded accounts in the returned range\n\n\tnodes ethdb.KeyValueStore // Database containing the reconstructed trie nodes\n\ttrie  *trie.Trie          // Reconstructed trie to reject incomplete account paths\n\n\tbounds   map[common.Hash]struct{} // Boundary nodes to avoid persisting incomplete accounts\n\toverflow *light.NodeSet           // Overflow nodes to avoid persisting across chunk boundaries\n\n\tcont bool // Whether the account range has a continuation\n}\n\n// bytecodeRequest tracks a pending bytecode request to ensure responses are to\n// actual requests and to validate any security constraints.\n//\n// Concurrency note: bytecode requests and responses are handled concurrently from\n// the main runloop to allow Keccak256 hash verifications on the peer's thread and\n// to drop on invalid response. The request struct must contain all the data to\n// construct the response without accessing runloop internals (i.e. task). That\n// is only included to allow the runloop to match a response to the task being\n// synced without having yet another set of maps.\ntype bytecodeRequest struct {\n\tpeer string // Peer to which this request is assigned\n\tid   uint64 // Request ID of this request\n\n\tcancel  chan struct{} // Channel to track sync cancellation\n\ttimeout *time.Timer   // Timer to track delivery timeout\n\tstale   chan struct{} // Channel to signal the request was dropped\n\n\thashes []common.Hash // Bytecode hashes to validate responses\n\ttask   *accountTask  // Task which this request is filling (only access fields through the runloop!!)\n}\n\n// bytecodeResponse is an already verified remote response to a bytecode request.\ntype bytecodeResponse struct {\n\ttask *accountTask // Task which this request is filling\n\n\thashes []common.Hash // Hashes of the bytecode to avoid double hashing\n\tcodes  [][]byte      // Actual bytecodes to store into the database (nil = missing)\n}\n\n// storageRequest tracks a pending storage ranges request to ensure responses are\n// to actual requests and to validate any security constraints.\n//\n// Concurrency note: storage requests and responses are handled concurrently from\n// the main runloop to allow Merkel proof verifications on the peer's thread and\n// to drop on invalid response. The request struct must contain all the data to\n// construct the response without accessing runloop internals (i.e. tasks). That\n// is only included to allow the runloop to match a response to the task being\n// synced without having yet another set of maps.\ntype storageRequest struct {\n\tpeer string // Peer to which this request is assigned\n\tid   uint64 // Request ID of this request\n\n\tcancel  chan struct{} // Channel to track sync cancellation\n\ttimeout *time.Timer   // Timer to track delivery timeout\n\tstale   chan struct{} // Channel to signal the request was dropped\n\n\taccounts []common.Hash // Account hashes to validate responses\n\troots    []common.Hash // Storage roots to validate responses\n\n\torigin common.Hash // First storage slot requested to allow continuation checks\n\tlimit  common.Hash // Last storage slot requested to allow non-overlapping chunking\n\n\tmainTask *accountTask // Task which this response belongs to (only access fields through the runloop!!)\n\tsubTask  *storageTask // Task which this response is filling (only access fields through the runloop!!)\n}\n\n// storageResponse is an already Merkle-verified remote response to a storage\n// range request. It contains the subtries for the requested storage ranges and\n// the databases that's going to be filled with the internal nodes on commit.\ntype storageResponse struct {\n\tmainTask *accountTask // Task which this response belongs to\n\tsubTask  *storageTask // Task which this response is filling\n\n\taccounts []common.Hash // Account hashes requested, may be only partially filled\n\troots    []common.Hash // Storage roots requested, may be only partially filled\n\n\thashes [][]common.Hash       // Storage slot hashes in the returned range\n\tslots  [][][]byte            // Storage slot values in the returned range\n\tnodes  []ethdb.KeyValueStore // Database containing the reconstructed trie nodes\n\ttries  []*trie.Trie          // Reconstructed tries to reject overflown slots\n\n\t// Fields relevant for the last account only\n\tbounds   map[common.Hash]struct{} // Boundary nodes to avoid persisting (incomplete)\n\toverflow *light.NodeSet           // Overflow nodes to avoid persisting across chunk boundaries\n\tcont     bool                     // Whether the last storage range has a continuation\n}\n\n// trienodeHealRequest tracks a pending state trie request to ensure responses\n// are to actual requests and to validate any security constraints.\n//\n// Concurrency note: trie node requests and responses are handled concurrently from\n// the main runloop to allow Keccak256 hash verifications on the peer's thread and\n// to drop on invalid response. The request struct must contain all the data to\n// construct the response without accessing runloop internals (i.e. task). That\n// is only included to allow the runloop to match a response to the task being\n// synced without having yet another set of maps.\ntype trienodeHealRequest struct {\n\tpeer string // Peer to which this request is assigned\n\tid   uint64 // Request ID of this request\n\n\tcancel  chan struct{} // Channel to track sync cancellation\n\ttimeout *time.Timer   // Timer to track delivery timeout\n\tstale   chan struct{} // Channel to signal the request was dropped\n\n\thashes []common.Hash   // Trie node hashes to validate responses\n\tpaths  []trie.SyncPath // Trie node paths requested for rescheduling\n\n\ttask *healTask // Task which this request is filling (only access fields through the runloop!!)\n}\n\n// trienodeHealResponse is an already verified remote response to a trie node request.\ntype trienodeHealResponse struct {\n\ttask *healTask // Task which this request is filling\n\n\thashes []common.Hash   // Hashes of the trie nodes to avoid double hashing\n\tpaths  []trie.SyncPath // Trie node paths requested for rescheduling missing ones\n\tnodes  [][]byte        // Actual trie nodes to store into the database (nil = missing)\n}\n\n// bytecodeHealRequest tracks a pending bytecode request to ensure responses are to\n// actual requests and to validate any security constraints.\n//\n// Concurrency note: bytecode requests and responses are handled concurrently from\n// the main runloop to allow Keccak256 hash verifications on the peer's thread and\n// to drop on invalid response. The request struct must contain all the data to\n// construct the response without accessing runloop internals (i.e. task). That\n// is only included to allow the runloop to match a response to the task being\n// synced without having yet another set of maps.\ntype bytecodeHealRequest struct {\n\tpeer string // Peer to which this request is assigned\n\tid   uint64 // Request ID of this request\n\n\tcancel  chan struct{} // Channel to track sync cancellation\n\ttimeout *time.Timer   // Timer to track delivery timeout\n\tstale   chan struct{} // Channel to signal the request was dropped\n\n\thashes []common.Hash // Bytecode hashes to validate responses\n\ttask   *healTask     // Task which this request is filling (only access fields through the runloop!!)\n}\n\n// bytecodeHealResponse is an already verified remote response to a bytecode request.\ntype bytecodeHealResponse struct {\n\ttask *healTask // Task which this request is filling\n\n\thashes []common.Hash // Hashes of the bytecode to avoid double hashing\n\tcodes  [][]byte      // Actual bytecodes to store into the database (nil = missing)\n}\n\n// accountTask represents the sync task for a chunk of the account snapshot.\ntype accountTask struct {\n\t// These fields get serialized to leveldb on shutdown\n\tNext     common.Hash                    // Next account to sync in this interval\n\tLast     common.Hash                    // Last account to sync in this interval\n\tSubTasks map[common.Hash][]*storageTask // Storage intervals needing fetching for large contracts\n\n\t// These fields are internals used during runtime\n\treq  *accountRequest  // Pending request to fill this task\n\tres  *accountResponse // Validate response filling this task\n\tpend int              // Number of pending subtasks for this round\n\n\tneedCode  []bool // Flags whether the filling accounts need code retrieval\n\tneedState []bool // Flags whether the filling accounts need storage retrieval\n\tneedHeal  []bool // Flags whether the filling accounts's state was chunked and need healing\n\n\tcodeTasks  map[common.Hash]struct{}    // Code hashes that need retrieval\n\tstateTasks map[common.Hash]common.Hash // Account hashes->roots that need full state retrieval\n\n\tdone bool // Flag whether the task can be removed\n}\n\n// storageTask represents the sync task for a chunk of the storage snapshot.\ntype storageTask struct {\n\tNext common.Hash // Next account to sync in this interval\n\tLast common.Hash // Last account to sync in this interval\n\n\t// These fields are internals used during runtime\n\troot common.Hash     // Storage root hash for this instance\n\treq  *storageRequest // Pending request to fill this task\n\tdone bool            // Flag whether the task can be removed\n}\n\n// healTask represents the sync task for healing the snap-synced chunk boundaries.\ntype healTask struct {\n\tscheduler *trie.Sync // State trie sync scheduler defining the tasks\n\n\ttrieTasks map[common.Hash]trie.SyncPath // Set of trie node tasks currently queued for retrieval\n\tcodeTasks map[common.Hash]struct{}      // Set of byte code tasks currently queued for retrieval\n}\n\n// syncProgress is a database entry to allow suspending and resuming a snapshot state\n// sync. Opposed to full and fast sync, there is no way to restart a suspended\n// snap sync without prior knowledge of the suspension point.\ntype syncProgress struct {\n\tTasks []*accountTask // The suspended account tasks (contract tasks within)\n\n\t// Status report during syncing phase\n\tAccountSynced  uint64             // Number of accounts downloaded\n\tAccountBytes   common.StorageSize // Number of account trie bytes persisted to disk\n\tBytecodeSynced uint64             // Number of bytecodes downloaded\n\tBytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded\n\tStorageSynced  uint64             // Number of storage slots downloaded\n\tStorageBytes   common.StorageSize // Number of storage trie bytes persisted to disk\n\n\t// Status report during healing phase\n\tTrienodeHealSynced uint64             // Number of state trie nodes downloaded\n\tTrienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk\n\tTrienodeHealDups   uint64             // Number of state trie nodes already processed\n\tTrienodeHealNops   uint64             // Number of state trie nodes not requested\n\tBytecodeHealSynced uint64             // Number of bytecodes downloaded\n\tBytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk\n\tBytecodeHealDups   uint64             // Number of bytecodes already processed\n\tBytecodeHealNops   uint64             // Number of bytecodes not requested\n}\n\n// SyncPeer abstracts out the methods required for a peer to be synced against\n// with the goal of allowing the construction of mock peers without the full\n// blown networking.\ntype SyncPeer interface {\n\t// ID retrieves the peer's unique identifier.\n\tID() string\n\n\t// RequestAccountRange fetches a batch of accounts rooted in a specific account\n\t// trie, starting with the origin.\n\tRequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error\n\n\t// RequestStorageRange fetches a batch of storage slots belonging to one or\n\t// more accounts. If slots from only one accout is requested, an origin marker\n\t// may also be used to retrieve from there.\n\tRequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error\n\n\t// RequestByteCodes fetches a batch of bytecodes by hash.\n\tRequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error\n\n\t// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in\n\t// a specificstate trie.\n\tRequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error\n\n\t// Log retrieves the peer's own contextual logger.\n\tLog() log.Logger\n}\n\n// Syncer is an Ethereum account and storage trie syncer based on snapshots and\n// the  snap protocol. It's purpose is to download all the accounts and storage\n// slots from remote peers and reassemble chunks of the state trie, on top of\n// which a state sync can be run to fix any gaps / overlaps.\n//\n// Every network request has a variety of failure events:\n//   - The peer disconnects after task assignment, failing to send the request\n//   - The peer disconnects after sending the request, before delivering on it\n//   - The peer remains connected, but does not deliver a response in time\n//   - The peer delivers a stale response after a previous timeout\n//   - The peer delivers a refusal to serve the requested state\ntype Syncer struct {\n\tdb ethdb.KeyValueStore // Database to store the trie nodes into (and dedup)\n\n\troot    common.Hash    // Current state trie root being synced\n\ttasks   []*accountTask // Current account task set being synced\n\tsnapped bool           // Flag to signal that snap phase is done\n\thealer  *healTask      // Current state healing task being executed\n\tupdate  chan struct{}  // Notification channel for possible sync progression\n\n\tpeers    map[string]SyncPeer // Currently active peers to download from\n\tpeerJoin *event.Feed         // Event feed to react to peers joining\n\tpeerDrop *event.Feed         // Event feed to react to peers dropping\n\n\t// Request tracking during syncing phase\n\tstatelessPeers map[string]struct{} // Peers that failed to deliver state data\n\taccountIdlers  map[string]struct{} // Peers that aren't serving account requests\n\tbytecodeIdlers map[string]struct{} // Peers that aren't serving bytecode requests\n\tstorageIdlers  map[string]struct{} // Peers that aren't serving storage requests\n\n\taccountReqs  map[uint64]*accountRequest  // Account requests currently running\n\tbytecodeReqs map[uint64]*bytecodeRequest // Bytecode requests currently running\n\tstorageReqs  map[uint64]*storageRequest  // Storage requests currently running\n\n\taccountReqFails  chan *accountRequest  // Failed account range requests to revert\n\tbytecodeReqFails chan *bytecodeRequest // Failed bytecode requests to revert\n\tstorageReqFails  chan *storageRequest  // Failed storage requests to revert\n\n\taccountResps  chan *accountResponse  // Account sub-tries to integrate into the database\n\tbytecodeResps chan *bytecodeResponse // Bytecodes to integrate into the database\n\tstorageResps  chan *storageResponse  // Storage sub-tries to integrate into the database\n\n\taccountSynced  uint64             // Number of accounts downloaded\n\taccountBytes   common.StorageSize // Number of account trie bytes persisted to disk\n\tbytecodeSynced uint64             // Number of bytecodes downloaded\n\tbytecodeBytes  common.StorageSize // Number of bytecode bytes downloaded\n\tstorageSynced  uint64             // Number of storage slots downloaded\n\tstorageBytes   common.StorageSize // Number of storage trie bytes persisted to disk\n\n\t// Request tracking during healing phase\n\ttrienodeHealIdlers map[string]struct{} // Peers that aren't serving trie node requests\n\tbytecodeHealIdlers map[string]struct{} // Peers that aren't serving bytecode requests\n\n\ttrienodeHealReqs map[uint64]*trienodeHealRequest // Trie node requests currently running\n\tbytecodeHealReqs map[uint64]*bytecodeHealRequest // Bytecode requests currently running\n\n\ttrienodeHealReqFails chan *trienodeHealRequest // Failed trienode requests to revert\n\tbytecodeHealReqFails chan *bytecodeHealRequest // Failed bytecode requests to revert\n\n\ttrienodeHealResps chan *trienodeHealResponse // Trie nodes to integrate into the database\n\tbytecodeHealResps chan *bytecodeHealResponse // Bytecodes to integrate into the database\n\n\ttrienodeHealSynced uint64             // Number of state trie nodes downloaded\n\ttrienodeHealBytes  common.StorageSize // Number of state trie bytes persisted to disk\n\ttrienodeHealDups   uint64             // Number of state trie nodes already processed\n\ttrienodeHealNops   uint64             // Number of state trie nodes not requested\n\tbytecodeHealSynced uint64             // Number of bytecodes downloaded\n\tbytecodeHealBytes  common.StorageSize // Number of bytecodes persisted to disk\n\tbytecodeHealDups   uint64             // Number of bytecodes already processed\n\tbytecodeHealNops   uint64             // Number of bytecodes not requested\n\n\tstartTime time.Time   // Time instance when snapshot sync started\n\tstartAcc  common.Hash // Account hash where sync started from\n\tlogTime   time.Time   // Time instance when status was last reported\n\n\tpend sync.WaitGroup // Tracks network request goroutines for graceful shutdown\n\tlock sync.RWMutex   // Protects fields that can change outside of sync (peers, reqs, root)\n}\n\n// NewSyncer creates a new snapshot syncer to download the Ethereum state over the\n// snap protocol.\nfunc NewSyncer(db ethdb.KeyValueStore) *Syncer {\n\treturn &Syncer{\n\t\tdb: db,\n\n\t\tpeers:    make(map[string]SyncPeer),\n\t\tpeerJoin: new(event.Feed),\n\t\tpeerDrop: new(event.Feed),\n\t\tupdate:   make(chan struct{}, 1),\n\n\t\taccountIdlers:  make(map[string]struct{}),\n\t\tstorageIdlers:  make(map[string]struct{}),\n\t\tbytecodeIdlers: make(map[string]struct{}),\n\n\t\taccountReqs:      make(map[uint64]*accountRequest),\n\t\tstorageReqs:      make(map[uint64]*storageRequest),\n\t\tbytecodeReqs:     make(map[uint64]*bytecodeRequest),\n\t\taccountReqFails:  make(chan *accountRequest),\n\t\tstorageReqFails:  make(chan *storageRequest),\n\t\tbytecodeReqFails: make(chan *bytecodeRequest),\n\t\taccountResps:     make(chan *accountResponse),\n\t\tstorageResps:     make(chan *storageResponse),\n\t\tbytecodeResps:    make(chan *bytecodeResponse),\n\n\t\ttrienodeHealIdlers: make(map[string]struct{}),\n\t\tbytecodeHealIdlers: make(map[string]struct{}),\n\n\t\ttrienodeHealReqs:     make(map[uint64]*trienodeHealRequest),\n\t\tbytecodeHealReqs:     make(map[uint64]*bytecodeHealRequest),\n\t\ttrienodeHealReqFails: make(chan *trienodeHealRequest),\n\t\tbytecodeHealReqFails: make(chan *bytecodeHealRequest),\n\t\ttrienodeHealResps:    make(chan *trienodeHealResponse),\n\t\tbytecodeHealResps:    make(chan *bytecodeHealResponse),\n\t}\n}\n\n// Register injects a new data source into the syncer's peerset.\nfunc (s *Syncer) Register(peer SyncPeer) error {\n\t// Make sure the peer is not registered yet\n\tid := peer.ID()\n\n\ts.lock.Lock()\n\tif _, ok := s.peers[id]; ok {\n\t\tlog.Error(\"Snap peer already registered\", \"id\", id)\n\n\t\ts.lock.Unlock()\n\t\treturn errors.New(\"already registered\")\n\t}\n\ts.peers[id] = peer\n\n\t// Mark the peer as idle, even if no sync is running\n\ts.accountIdlers[id] = struct{}{}\n\ts.storageIdlers[id] = struct{}{}\n\ts.bytecodeIdlers[id] = struct{}{}\n\ts.trienodeHealIdlers[id] = struct{}{}\n\ts.bytecodeHealIdlers[id] = struct{}{}\n\ts.lock.Unlock()\n\n\t// Notify any active syncs that a new peer can be assigned data\n\ts.peerJoin.Send(id)\n\treturn nil\n}\n\n// Unregister injects a new data source into the syncer's peerset.\nfunc (s *Syncer) Unregister(id string) error {\n\t// Remove all traces of the peer from the registry\n\ts.lock.Lock()\n\tif _, ok := s.peers[id]; !ok {\n\t\tlog.Error(\"Snap peer not registered\", \"id\", id)\n\n\t\ts.lock.Unlock()\n\t\treturn errors.New(\"not registered\")\n\t}\n\tdelete(s.peers, id)\n\n\t// Remove status markers, even if no sync is running\n\tdelete(s.statelessPeers, id)\n\n\tdelete(s.accountIdlers, id)\n\tdelete(s.storageIdlers, id)\n\tdelete(s.bytecodeIdlers, id)\n\tdelete(s.trienodeHealIdlers, id)\n\tdelete(s.bytecodeHealIdlers, id)\n\ts.lock.Unlock()\n\n\t// Notify any active syncs that pending requests need to be reverted\n\ts.peerDrop.Send(id)\n\treturn nil\n}\n\n// Sync starts (or resumes a previous) sync cycle to iterate over an state trie\n// with the given root and reconstruct the nodes based on the snapshot leaves.\n// Previously downloaded segments will not be redownloaded of fixed, rather any\n// errors will be healed after the leaves are fully accumulated.\nfunc (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error {\n\t// Move the trie root from any previous value, revert stateless markers for\n\t// any peers and initialize the syncer if it was not yet run\n\ts.lock.Lock()\n\ts.root = root\n\ts.healer = &healTask{\n\t\tscheduler: state.NewStateSync(root, s.db, nil),\n\t\ttrieTasks: make(map[common.Hash]trie.SyncPath),\n\t\tcodeTasks: make(map[common.Hash]struct{}),\n\t}\n\ts.statelessPeers = make(map[string]struct{})\n\ts.lock.Unlock()\n\n\tif s.startTime == (time.Time{}) {\n\t\ts.startTime = time.Now()\n\t}\n\t// Retrieve the previous sync status from LevelDB and abort if already synced\n\ts.loadSyncStatus()\n\tif len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {\n\t\tlog.Debug(\"Snapshot sync already completed\")\n\t\treturn nil\n\t}\n\tdefer func() { // Persist any progress, independent of failure\n\t\tfor _, task := range s.tasks {\n\t\t\ts.forwardAccountTask(task)\n\t\t}\n\t\ts.cleanAccountTasks()\n\t\ts.saveSyncStatus()\n\t}()\n\n\tlog.Debug(\"Starting snapshot sync cycle\", \"root\", root)\n\tdefer s.report(true)\n\n\t// Whether sync completed or not, disregard any future packets\n\tdefer func() {\n\t\tlog.Debug(\"Terminating snapshot sync cycle\", \"root\", root)\n\t\ts.lock.Lock()\n\t\ts.accountReqs = make(map[uint64]*accountRequest)\n\t\ts.storageReqs = make(map[uint64]*storageRequest)\n\t\ts.bytecodeReqs = make(map[uint64]*bytecodeRequest)\n\t\ts.trienodeHealReqs = make(map[uint64]*trienodeHealRequest)\n\t\ts.bytecodeHealReqs = make(map[uint64]*bytecodeHealRequest)\n\t\ts.lock.Unlock()\n\t}()\n\t// Keep scheduling sync tasks\n\tpeerJoin := make(chan string, 16)\n\tpeerJoinSub := s.peerJoin.Subscribe(peerJoin)\n\tdefer peerJoinSub.Unsubscribe()\n\n\tpeerDrop := make(chan string, 16)\n\tpeerDropSub := s.peerDrop.Subscribe(peerDrop)\n\tdefer peerDropSub.Unsubscribe()\n\n\tfor {\n\t\t// Remove all completed tasks and terminate sync if everything's done\n\t\ts.cleanStorageTasks()\n\t\ts.cleanAccountTasks()\n\t\tif len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 {\n\t\t\treturn nil\n\t\t}\n\t\t// Assign all the data retrieval tasks to any free peers\n\t\ts.assignAccountTasks(cancel)\n\t\ts.assignBytecodeTasks(cancel)\n\t\ts.assignStorageTasks(cancel)\n\n\t\tif len(s.tasks) == 0 {\n\t\t\t// Sync phase done, run heal phase\n\t\t\ts.assignTrienodeHealTasks(cancel)\n\t\t\ts.assignBytecodeHealTasks(cancel)\n\t\t}\n\t\t// Wait for something to happen\n\t\tselect {\n\t\tcase <-s.update:\n\t\t\t// Something happened (new peer, delivery, timeout), recheck tasks\n\t\tcase <-peerJoin:\n\t\t\t// A new peer joined, try to schedule it new tasks\n\t\tcase id := <-peerDrop:\n\t\t\ts.revertRequests(id)\n\t\tcase <-cancel:\n\t\t\treturn ErrCancelled\n\n\t\tcase req := <-s.accountReqFails:\n\t\t\ts.revertAccountRequest(req)\n\t\tcase req := <-s.bytecodeReqFails:\n\t\t\ts.revertBytecodeRequest(req)\n\t\tcase req := <-s.storageReqFails:\n\t\t\ts.revertStorageRequest(req)\n\t\tcase req := <-s.trienodeHealReqFails:\n\t\t\ts.revertTrienodeHealRequest(req)\n\t\tcase req := <-s.bytecodeHealReqFails:\n\t\t\ts.revertBytecodeHealRequest(req)\n\n\t\tcase res := <-s.accountResps:\n\t\t\ts.processAccountResponse(res)\n\t\tcase res := <-s.bytecodeResps:\n\t\t\ts.processBytecodeResponse(res)\n\t\tcase res := <-s.storageResps:\n\t\t\ts.processStorageResponse(res)\n\t\tcase res := <-s.trienodeHealResps:\n\t\t\ts.processTrienodeHealResponse(res)\n\t\tcase res := <-s.bytecodeHealResps:\n\t\t\ts.processBytecodeHealResponse(res)\n\t\t}\n\t\t// Report stats if something meaningful happened\n\t\ts.report(false)\n\t}\n}\n\n// loadSyncStatus retrieves a previously aborted sync status from the database,\n// or generates a fresh one if none is available.\nfunc (s *Syncer) loadSyncStatus() {\n\tvar progress syncProgress\n\n\tif status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil {\n\t\tif err := json.Unmarshal(status, &progress); err != nil {\n\t\t\tlog.Error(\"Failed to decode snap sync status\", \"err\", err)\n\t\t} else {\n\t\t\tfor _, task := range progress.Tasks {\n\t\t\t\tlog.Debug(\"Scheduled account sync task\", \"from\", task.Next, \"last\", task.Last)\n\t\t\t}\n\t\t\ts.tasks = progress.Tasks\n\t\t\ts.snapped = len(s.tasks) == 0\n\n\t\t\ts.accountSynced = progress.AccountSynced\n\t\t\ts.accountBytes = progress.AccountBytes\n\t\t\ts.bytecodeSynced = progress.BytecodeSynced\n\t\t\ts.bytecodeBytes = progress.BytecodeBytes\n\t\t\ts.storageSynced = progress.StorageSynced\n\t\t\ts.storageBytes = progress.StorageBytes\n\n\t\t\ts.trienodeHealSynced = progress.TrienodeHealSynced\n\t\t\ts.trienodeHealBytes = progress.TrienodeHealBytes\n\t\t\ts.bytecodeHealSynced = progress.BytecodeHealSynced\n\t\t\ts.bytecodeHealBytes = progress.BytecodeHealBytes\n\t\t\treturn\n\t\t}\n\t}\n\t// Either we've failed to decode the previus state, or there was none.\n\t// Start a fresh sync by chunking up the account range and scheduling\n\t// them for retrieval.\n\ts.tasks = nil\n\ts.accountSynced, s.accountBytes = 0, 0\n\ts.bytecodeSynced, s.bytecodeBytes = 0, 0\n\ts.storageSynced, s.storageBytes = 0, 0\n\ts.trienodeHealSynced, s.trienodeHealBytes = 0, 0\n\ts.bytecodeHealSynced, s.bytecodeHealBytes = 0, 0\n\n\tvar next common.Hash\n\tstep := new(big.Int).Sub(\n\t\tnew(big.Int).Div(\n\t\t\tnew(big.Int).Exp(common.Big2, common.Big256, nil),\n\t\t\tbig.NewInt(accountConcurrency),\n\t\t), common.Big1,\n\t)\n\tfor i := 0; i < accountConcurrency; i++ {\n\t\tlast := common.BigToHash(new(big.Int).Add(next.Big(), step))\n\t\tif i == accountConcurrency-1 {\n\t\t\t// Make sure we don't overflow if the step is not a proper divisor\n\t\t\tlast = common.HexToHash(\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\")\n\t\t}\n\t\ts.tasks = append(s.tasks, &accountTask{\n\t\t\tNext:     next,\n\t\t\tLast:     last,\n\t\t\tSubTasks: make(map[common.Hash][]*storageTask),\n\t\t})\n\t\tlog.Debug(\"Created account sync task\", \"from\", next, \"last\", last)\n\t\tnext = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))\n\t}\n}\n\n// saveSyncStatus marshals the remaining sync tasks into leveldb.\nfunc (s *Syncer) saveSyncStatus() {\n\tprogress := &syncProgress{\n\t\tTasks:              s.tasks,\n\t\tAccountSynced:      s.accountSynced,\n\t\tAccountBytes:       s.accountBytes,\n\t\tBytecodeSynced:     s.bytecodeSynced,\n\t\tBytecodeBytes:      s.bytecodeBytes,\n\t\tStorageSynced:      s.storageSynced,\n\t\tStorageBytes:       s.storageBytes,\n\t\tTrienodeHealSynced: s.trienodeHealSynced,\n\t\tTrienodeHealBytes:  s.trienodeHealBytes,\n\t\tBytecodeHealSynced: s.bytecodeHealSynced,\n\t\tBytecodeHealBytes:  s.bytecodeHealBytes,\n\t}\n\tstatus, err := json.Marshal(progress)\n\tif err != nil {\n\t\tpanic(err) // This can only fail during implementation\n\t}\n\trawdb.WriteSnapshotSyncStatus(s.db, status)\n}\n\n// cleanAccountTasks removes account range retrieval tasks that have already been\n// completed.\nfunc (s *Syncer) cleanAccountTasks() {\n\tfor i := 0; i < len(s.tasks); i++ {\n\t\tif s.tasks[i].done {\n\t\t\ts.tasks = append(s.tasks[:i], s.tasks[i+1:]...)\n\t\t\ti--\n\t\t}\n\t}\n\tif len(s.tasks) == 0 {\n\t\ts.lock.Lock()\n\t\ts.snapped = true\n\t\ts.lock.Unlock()\n\t}\n}\n\n// cleanStorageTasks iterates over all the account tasks and storage sub-tasks\n// within, cleaning any that have been completed.\nfunc (s *Syncer) cleanStorageTasks() {\n\tfor _, task := range s.tasks {\n\t\tfor account, subtasks := range task.SubTasks {\n\t\t\t// Remove storage range retrieval tasks that completed\n\t\t\tfor j := 0; j < len(subtasks); j++ {\n\t\t\t\tif subtasks[j].done {\n\t\t\t\t\tsubtasks = append(subtasks[:j], subtasks[j+1:]...)\n\t\t\t\t\tj--\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(subtasks) > 0 {\n\t\t\t\ttask.SubTasks[account] = subtasks\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// If all storage chunks are done, mark the account as done too\n\t\t\tfor j, hash := range task.res.hashes {\n\t\t\t\tif hash == account {\n\t\t\t\t\ttask.needState[j] = false\n\t\t\t\t}\n\t\t\t}\n\t\t\tdelete(task.SubTasks, account)\n\t\t\ttask.pend--\n\n\t\t\t// If this was the last pending task, forward the account task\n\t\t\tif task.pend == 0 {\n\t\t\t\ts.forwardAccountTask(task)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// assignAccountTasks attempts to match idle peers to pending account range\n// retrievals.\nfunc (s *Syncer) assignAccountTasks(cancel chan struct{}) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\t// If there are no idle peers, short circuit assignment\n\tif len(s.accountIdlers) == 0 {\n\t\treturn\n\t}\n\t// Iterate over all the tasks and try to find a pending one\n\tfor _, task := range s.tasks {\n\t\t// Skip any tasks already filling\n\t\tif task.req != nil || task.res != nil {\n\t\t\tcontinue\n\t\t}\n\t\t// Task pending retrieval, try to find an idle peer. If no such peer\n\t\t// exists, we probably assigned tasks for all (or they are stateless).\n\t\t// Abort the entire assignment mechanism.\n\t\tvar idle string\n\t\tfor id := range s.accountIdlers {\n\t\t\t// If the peer rejected a query in this sync cycle, don't bother asking\n\t\t\t// again for anything, it's either out of sync or already pruned\n\t\t\tif _, ok := s.statelessPeers[id]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tidle = id\n\t\t\tbreak\n\t\t}\n\t\tif idle == \"\" {\n\t\t\treturn\n\t\t}\n\t\tpeer := s.peers[idle]\n\n\t\t// Matched a pending task to an idle peer, allocate a unique request id\n\t\tvar reqid uint64\n\t\tfor {\n\t\t\treqid = uint64(rand.Int63())\n\t\t\tif reqid == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := s.accountReqs[reqid]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\t// Generate the network query and send it to the peer\n\t\treq := &accountRequest{\n\t\t\tpeer:   idle,\n\t\t\tid:     reqid,\n\t\t\tcancel: cancel,\n\t\t\tstale:  make(chan struct{}),\n\t\t\torigin: task.Next,\n\t\t\tlimit:  task.Last,\n\t\t\ttask:   task,\n\t\t}\n\t\treq.timeout = time.AfterFunc(requestTimeout, func() {\n\t\t\tpeer.Log().Debug(\"Account range request timed out\", \"reqid\", reqid)\n\t\t\ts.scheduleRevertAccountRequest(req)\n\t\t})\n\t\ts.accountReqs[reqid] = req\n\t\tdelete(s.accountIdlers, idle)\n\n\t\ts.pend.Add(1)\n\t\tgo func(root common.Hash) {\n\t\t\tdefer s.pend.Done()\n\n\t\t\t// Attempt to send the remote request and revert if it fails\n\t\t\tif err := peer.RequestAccountRange(reqid, root, req.origin, req.limit, maxRequestSize); err != nil {\n\t\t\t\tpeer.Log().Debug(\"Failed to request account range\", \"err\", err)\n\t\t\t\ts.scheduleRevertAccountRequest(req)\n\t\t\t}\n\t\t}(s.root)\n\n\t\t// Inject the request into the task to block further assignments\n\t\ttask.req = req\n\t}\n}\n\n// assignBytecodeTasks attempts to match idle peers to pending code retrievals.\nfunc (s *Syncer) assignBytecodeTasks(cancel chan struct{}) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\t// If there are no idle peers, short circuit assignment\n\tif len(s.bytecodeIdlers) == 0 {\n\t\treturn\n\t}\n\t// Iterate over all the tasks and try to find a pending one\n\tfor _, task := range s.tasks {\n\t\t// Skip any tasks not in the bytecode retrieval phase\n\t\tif task.res == nil {\n\t\t\tcontinue\n\t\t}\n\t\t// Skip tasks that are already retrieving (or done with) all codes\n\t\tif len(task.codeTasks) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t// Task pending retrieval, try to find an idle peer. If no such peer\n\t\t// exists, we probably assigned tasks for all (or they are stateless).\n\t\t// Abort the entire assignment mechanism.\n\t\tvar idle string\n\t\tfor id := range s.bytecodeIdlers {\n\t\t\t// If the peer rejected a query in this sync cycle, don't bother asking\n\t\t\t// again for anything, it's either out of sync or already pruned\n\t\t\tif _, ok := s.statelessPeers[id]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tidle = id\n\t\t\tbreak\n\t\t}\n\t\tif idle == \"\" {\n\t\t\treturn\n\t\t}\n\t\tpeer := s.peers[idle]\n\n\t\t// Matched a pending task to an idle peer, allocate a unique request id\n\t\tvar reqid uint64\n\t\tfor {\n\t\t\treqid = uint64(rand.Int63())\n\t\t\tif reqid == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := s.bytecodeReqs[reqid]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\t// Generate the network query and send it to the peer\n\t\thashes := make([]common.Hash, 0, maxCodeRequestCount)\n\t\tfor hash := range task.codeTasks {\n\t\t\tdelete(task.codeTasks, hash)\n\t\t\thashes = append(hashes, hash)\n\t\t\tif len(hashes) >= maxCodeRequestCount {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treq := &bytecodeRequest{\n\t\t\tpeer:   idle,\n\t\t\tid:     reqid,\n\t\t\tcancel: cancel,\n\t\t\tstale:  make(chan struct{}),\n\t\t\thashes: hashes,\n\t\t\ttask:   task,\n\t\t}\n\t\treq.timeout = time.AfterFunc(requestTimeout, func() {\n\t\t\tpeer.Log().Debug(\"Bytecode request timed out\", \"reqid\", reqid)\n\t\t\ts.scheduleRevertBytecodeRequest(req)\n\t\t})\n\t\ts.bytecodeReqs[reqid] = req\n\t\tdelete(s.bytecodeIdlers, idle)\n\n\t\ts.pend.Add(1)\n\t\tgo func() {\n\t\t\tdefer s.pend.Done()\n\n\t\t\t// Attempt to send the remote request and revert if it fails\n\t\t\tif err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {\n\t\t\t\tlog.Debug(\"Failed to request bytecodes\", \"err\", err)\n\t\t\t\ts.scheduleRevertBytecodeRequest(req)\n\t\t\t}\n\t\t}()\n\t}\n}\n\n// assignStorageTasks attempts to match idle peers to pending storage range\n// retrievals.\nfunc (s *Syncer) assignStorageTasks(cancel chan struct{}) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\t// If there are no idle peers, short circuit assignment\n\tif len(s.storageIdlers) == 0 {\n\t\treturn\n\t}\n\t// Iterate over all the tasks and try to find a pending one\n\tfor _, task := range s.tasks {\n\t\t// Skip any tasks not in the storage retrieval phase\n\t\tif task.res == nil {\n\t\t\tcontinue\n\t\t}\n\t\t// Skip tasks that are already retrieving (or done with) all small states\n\t\tif len(task.SubTasks) == 0 && len(task.stateTasks) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t// Task pending retrieval, try to find an idle peer. If no such peer\n\t\t// exists, we probably assigned tasks for all (or they are stateless).\n\t\t// Abort the entire assignment mechanism.\n\t\tvar idle string\n\t\tfor id := range s.storageIdlers {\n\t\t\t// If the peer rejected a query in this sync cycle, don't bother asking\n\t\t\t// again for anything, it's either out of sync or already pruned\n\t\t\tif _, ok := s.statelessPeers[id]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tidle = id\n\t\t\tbreak\n\t\t}\n\t\tif idle == \"\" {\n\t\t\treturn\n\t\t}\n\t\tpeer := s.peers[idle]\n\n\t\t// Matched a pending task to an idle peer, allocate a unique request id\n\t\tvar reqid uint64\n\t\tfor {\n\t\t\treqid = uint64(rand.Int63())\n\t\t\tif reqid == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := s.storageReqs[reqid]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\t// Generate the network query and send it to the peer. If there are\n\t\t// large contract tasks pending, complete those before diving into\n\t\t// even more new contracts.\n\t\tvar (\n\t\t\taccounts = make([]common.Hash, 0, maxStorageSetRequestCount)\n\t\t\troots    = make([]common.Hash, 0, maxStorageSetRequestCount)\n\t\t\tsubtask  *storageTask\n\t\t)\n\t\tfor account, subtasks := range task.SubTasks {\n\t\t\tfor _, st := range subtasks {\n\t\t\t\t// Skip any subtasks already filling\n\t\t\t\tif st.req != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// Found an incomplete storage chunk, schedule it\n\t\t\t\taccounts = append(accounts, account)\n\t\t\t\troots = append(roots, st.root)\n\t\t\t\tsubtask = st\n\t\t\t\tbreak // Large contract chunks are downloaded individually\n\t\t\t}\n\t\t\tif subtask != nil {\n\t\t\t\tbreak // Large contract chunks are downloaded individually\n\t\t\t}\n\t\t}\n\t\tif subtask == nil {\n\t\t\t// No large contract required retrieval, but small ones available\n\t\t\tfor acccount, root := range task.stateTasks {\n\t\t\t\tdelete(task.stateTasks, acccount)\n\n\t\t\t\taccounts = append(accounts, acccount)\n\t\t\t\troots = append(roots, root)\n\n\t\t\t\tif len(accounts) >= maxStorageSetRequestCount {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// If nothing was found, it means this task is actually already fully\n\t\t// retrieving, but large contracts are hard to detect. Skip to the next.\n\t\tif len(accounts) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\treq := &storageRequest{\n\t\t\tpeer:     idle,\n\t\t\tid:       reqid,\n\t\t\tcancel:   cancel,\n\t\t\tstale:    make(chan struct{}),\n\t\t\taccounts: accounts,\n\t\t\troots:    roots,\n\t\t\tmainTask: task,\n\t\t\tsubTask:  subtask,\n\t\t}\n\t\tif subtask != nil {\n\t\t\treq.origin = subtask.Next\n\t\t\treq.limit = subtask.Last\n\t\t}\n\t\treq.timeout = time.AfterFunc(requestTimeout, func() {\n\t\t\tpeer.Log().Debug(\"Storage request timed out\", \"reqid\", reqid)\n\t\t\ts.scheduleRevertStorageRequest(req)\n\t\t})\n\t\ts.storageReqs[reqid] = req\n\t\tdelete(s.storageIdlers, idle)\n\n\t\ts.pend.Add(1)\n\t\tgo func(root common.Hash) {\n\t\t\tdefer s.pend.Done()\n\n\t\t\t// Attempt to send the remote request and revert if it fails\n\t\t\tvar origin, limit []byte\n\t\t\tif subtask != nil {\n\t\t\t\torigin, limit = req.origin[:], req.limit[:]\n\t\t\t}\n\t\t\tif err := peer.RequestStorageRanges(reqid, root, accounts, origin, limit, maxRequestSize); err != nil {\n\t\t\t\tlog.Debug(\"Failed to request storage\", \"err\", err)\n\t\t\t\ts.scheduleRevertStorageRequest(req)\n\t\t\t}\n\t\t}(s.root)\n\n\t\t// Inject the request into the subtask to block further assignments\n\t\tif subtask != nil {\n\t\t\tsubtask.req = req\n\t\t}\n\t}\n}\n\n// assignTrienodeHealTasks attempts to match idle peers to trie node requests to\n// heal any trie errors caused by the snap sync's chunked retrieval model.\nfunc (s *Syncer) assignTrienodeHealTasks(cancel chan struct{}) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\t// If there are no idle peers, short circuit assignment\n\tif len(s.trienodeHealIdlers) == 0 {\n\t\treturn\n\t}\n\t// Iterate over pending tasks and try to find a peer to retrieve with\n\tfor len(s.healer.trieTasks) > 0 || s.healer.scheduler.Pending() > 0 {\n\t\t// If there are not enough trie tasks queued to fully assign, fill the\n\t\t// queue from the state sync scheduler. The trie synced schedules these\n\t\t// together with bytecodes, so we need to queue them combined.\n\t\tvar (\n\t\t\thave = len(s.healer.trieTasks) + len(s.healer.codeTasks)\n\t\t\twant = maxTrieRequestCount + maxCodeRequestCount\n\t\t)\n\t\tif have < want {\n\t\t\tnodes, paths, codes := s.healer.scheduler.Missing(want - have)\n\t\t\tfor i, hash := range nodes {\n\t\t\t\ts.healer.trieTasks[hash] = paths[i]\n\t\t\t}\n\t\t\tfor _, hash := range codes {\n\t\t\t\ts.healer.codeTasks[hash] = struct{}{}\n\t\t\t}\n\t\t}\n\t\t// If all the heal tasks are bytecodes or already downloading, bail\n\t\tif len(s.healer.trieTasks) == 0 {\n\t\t\treturn\n\t\t}\n\t\t// Task pending retrieval, try to find an idle peer. If no such peer\n\t\t// exists, we probably assigned tasks for all (or they are stateless).\n\t\t// Abort the entire assignment mechanism.\n\t\tvar idle string\n\t\tfor id := range s.trienodeHealIdlers {\n\t\t\t// If the peer rejected a query in this sync cycle, don't bother asking\n\t\t\t// again for anything, it's either out of sync or already pruned\n\t\t\tif _, ok := s.statelessPeers[id]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tidle = id\n\t\t\tbreak\n\t\t}\n\t\tif idle == \"\" {\n\t\t\treturn\n\t\t}\n\t\tpeer := s.peers[idle]\n\n\t\t// Matched a pending task to an idle peer, allocate a unique request id\n\t\tvar reqid uint64\n\t\tfor {\n\t\t\treqid = uint64(rand.Int63())\n\t\t\tif reqid == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := s.trienodeHealReqs[reqid]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\t// Generate the network query and send it to the peer\n\t\tvar (\n\t\t\thashes   = make([]common.Hash, 0, maxTrieRequestCount)\n\t\t\tpaths    = make([]trie.SyncPath, 0, maxTrieRequestCount)\n\t\t\tpathsets = make([]TrieNodePathSet, 0, maxTrieRequestCount)\n\t\t)\n\t\tfor hash, pathset := range s.healer.trieTasks {\n\t\t\tdelete(s.healer.trieTasks, hash)\n\n\t\t\thashes = append(hashes, hash)\n\t\t\tpaths = append(paths, pathset)\n\t\t\tpathsets = append(pathsets, [][]byte(pathset)) // TODO(karalabe): group requests by account hash\n\n\t\t\tif len(hashes) >= maxTrieRequestCount {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treq := &trienodeHealRequest{\n\t\t\tpeer:   idle,\n\t\t\tid:     reqid,\n\t\t\tcancel: cancel,\n\t\t\tstale:  make(chan struct{}),\n\t\t\thashes: hashes,\n\t\t\tpaths:  paths,\n\t\t\ttask:   s.healer,\n\t\t}\n\t\treq.timeout = time.AfterFunc(requestTimeout, func() {\n\t\t\tpeer.Log().Debug(\"Trienode heal request timed out\", \"reqid\", reqid)\n\t\t\ts.scheduleRevertTrienodeHealRequest(req)\n\t\t})\n\t\ts.trienodeHealReqs[reqid] = req\n\t\tdelete(s.trienodeHealIdlers, idle)\n\n\t\ts.pend.Add(1)\n\t\tgo func(root common.Hash) {\n\t\t\tdefer s.pend.Done()\n\n\t\t\t// Attempt to send the remote request and revert if it fails\n\t\t\tif err := peer.RequestTrieNodes(reqid, root, pathsets, maxRequestSize); err != nil {\n\t\t\t\tlog.Debug(\"Failed to request trienode healers\", \"err\", err)\n\t\t\t\ts.scheduleRevertTrienodeHealRequest(req)\n\t\t\t}\n\t\t}(s.root)\n\t}\n}\n\n// assignBytecodeHealTasks attempts to match idle peers to bytecode requests to\n// heal any trie errors caused by the snap sync's chunked retrieval model.\nfunc (s *Syncer) assignBytecodeHealTasks(cancel chan struct{}) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\t// If there are no idle peers, short circuit assignment\n\tif len(s.bytecodeHealIdlers) == 0 {\n\t\treturn\n\t}\n\t// Iterate over pending tasks and try to find a peer to retrieve with\n\tfor len(s.healer.codeTasks) > 0 || s.healer.scheduler.Pending() > 0 {\n\t\t// If there are not enough trie tasks queued to fully assign, fill the\n\t\t// queue from the state sync scheduler. The trie synced schedules these\n\t\t// together with trie nodes, so we need to queue them combined.\n\t\tvar (\n\t\t\thave = len(s.healer.trieTasks) + len(s.healer.codeTasks)\n\t\t\twant = maxTrieRequestCount + maxCodeRequestCount\n\t\t)\n\t\tif have < want {\n\t\t\tnodes, paths, codes := s.healer.scheduler.Missing(want - have)\n\t\t\tfor i, hash := range nodes {\n\t\t\t\ts.healer.trieTasks[hash] = paths[i]\n\t\t\t}\n\t\t\tfor _, hash := range codes {\n\t\t\t\ts.healer.codeTasks[hash] = struct{}{}\n\t\t\t}\n\t\t}\n\t\t// If all the heal tasks are trienodes or already downloading, bail\n\t\tif len(s.healer.codeTasks) == 0 {\n\t\t\treturn\n\t\t}\n\t\t// Task pending retrieval, try to find an idle peer. If no such peer\n\t\t// exists, we probably assigned tasks for all (or they are stateless).\n\t\t// Abort the entire assignment mechanism.\n\t\tvar idle string\n\t\tfor id := range s.bytecodeHealIdlers {\n\t\t\t// If the peer rejected a query in this sync cycle, don't bother asking\n\t\t\t// again for anything, it's either out of sync or already pruned\n\t\t\tif _, ok := s.statelessPeers[id]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tidle = id\n\t\t\tbreak\n\t\t}\n\t\tif idle == \"\" {\n\t\t\treturn\n\t\t}\n\t\tpeer := s.peers[idle]\n\n\t\t// Matched a pending task to an idle peer, allocate a unique request id\n\t\tvar reqid uint64\n\t\tfor {\n\t\t\treqid = uint64(rand.Int63())\n\t\t\tif reqid == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, ok := s.bytecodeHealReqs[reqid]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\t// Generate the network query and send it to the peer\n\t\thashes := make([]common.Hash, 0, maxCodeRequestCount)\n\t\tfor hash := range s.healer.codeTasks {\n\t\t\tdelete(s.healer.codeTasks, hash)\n\n\t\t\thashes = append(hashes, hash)\n\t\t\tif len(hashes) >= maxCodeRequestCount {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treq := &bytecodeHealRequest{\n\t\t\tpeer:   idle,\n\t\t\tid:     reqid,\n\t\t\tcancel: cancel,\n\t\t\tstale:  make(chan struct{}),\n\t\t\thashes: hashes,\n\t\t\ttask:   s.healer,\n\t\t}\n\t\treq.timeout = time.AfterFunc(requestTimeout, func() {\n\t\t\tpeer.Log().Debug(\"Bytecode heal request timed out\", \"reqid\", reqid)\n\t\t\ts.scheduleRevertBytecodeHealRequest(req)\n\t\t})\n\t\ts.bytecodeHealReqs[reqid] = req\n\t\tdelete(s.bytecodeHealIdlers, idle)\n\n\t\ts.pend.Add(1)\n\t\tgo func() {\n\t\t\tdefer s.pend.Done()\n\n\t\t\t// Attempt to send the remote request and revert if it fails\n\t\t\tif err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil {\n\t\t\t\tlog.Debug(\"Failed to request bytecode healers\", \"err\", err)\n\t\t\t\ts.scheduleRevertBytecodeHealRequest(req)\n\t\t\t}\n\t\t}()\n\t}\n}\n\n// revertRequests locates all the currently pending reuqests from a particular\n// peer and reverts them, rescheduling for others to fulfill.\nfunc (s *Syncer) revertRequests(peer string) {\n\t// Gather the requests first, revertals need the lock too\n\ts.lock.Lock()\n\tvar accountReqs []*accountRequest\n\tfor _, req := range s.accountReqs {\n\t\tif req.peer == peer {\n\t\t\taccountReqs = append(accountReqs, req)\n\t\t}\n\t}\n\tvar bytecodeReqs []*bytecodeRequest\n\tfor _, req := range s.bytecodeReqs {\n\t\tif req.peer == peer {\n\t\t\tbytecodeReqs = append(bytecodeReqs, req)\n\t\t}\n\t}\n\tvar storageReqs []*storageRequest\n\tfor _, req := range s.storageReqs {\n\t\tif req.peer == peer {\n\t\t\tstorageReqs = append(storageReqs, req)\n\t\t}\n\t}\n\tvar trienodeHealReqs []*trienodeHealRequest\n\tfor _, req := range s.trienodeHealReqs {\n\t\tif req.peer == peer {\n\t\t\ttrienodeHealReqs = append(trienodeHealReqs, req)\n\t\t}\n\t}\n\tvar bytecodeHealReqs []*bytecodeHealRequest\n\tfor _, req := range s.bytecodeHealReqs {\n\t\tif req.peer == peer {\n\t\t\tbytecodeHealReqs = append(bytecodeHealReqs, req)\n\t\t}\n\t}\n\ts.lock.Unlock()\n\n\t// Revert all the requests matching the peer\n\tfor _, req := range accountReqs {\n\t\ts.revertAccountRequest(req)\n\t}\n\tfor _, req := range bytecodeReqs {\n\t\ts.revertBytecodeRequest(req)\n\t}\n\tfor _, req := range storageReqs {\n\t\ts.revertStorageRequest(req)\n\t}\n\tfor _, req := range trienodeHealReqs {\n\t\ts.revertTrienodeHealRequest(req)\n\t}\n\tfor _, req := range bytecodeHealReqs {\n\t\ts.revertBytecodeHealRequest(req)\n\t}\n}\n\n// scheduleRevertAccountRequest asks the event loop to clean up an account range\n// request and return all failed retrieval tasks to the scheduler for reassignment.\nfunc (s *Syncer) scheduleRevertAccountRequest(req *accountRequest) {\n\tselect {\n\tcase s.accountReqFails <- req:\n\t\t// Sync event loop notified\n\tcase <-req.cancel:\n\t\t// Sync cycle got cancelled\n\tcase <-req.stale:\n\t\t// Request already reverted\n\t}\n}\n\n// revertAccountRequest cleans up an account range request and returns all failed\n// retrieval tasks to the scheduler for reassignment.\n//\n// Note, this needs to run on the event runloop thread to reschedule to idle peers.\n// On peer threads, use scheduleRevertAccountRequest.\nfunc (s *Syncer) revertAccountRequest(req *accountRequest) {\n\tlog.Debug(\"Reverting account request\", \"peer\", req.peer, \"reqid\", req.id)\n\tselect {\n\tcase <-req.stale:\n\t\tlog.Trace(\"Account request already reverted\", \"peer\", req.peer, \"reqid\", req.id)\n\t\treturn\n\tdefault:\n\t}\n\tclose(req.stale)\n\n\t// Remove the request from the tracked set\n\ts.lock.Lock()\n\tdelete(s.accountReqs, req.id)\n\ts.lock.Unlock()\n\n\t// If there's a timeout timer still running, abort it and mark the account\n\t// task as not-pending, ready for resheduling\n\treq.timeout.Stop()\n\tif req.task.req == req {\n\t\treq.task.req = nil\n\t}\n}\n\n// scheduleRevertBytecodeRequest asks the event loop to clean up a bytecode request\n// and return all failed retrieval tasks to the scheduler for reassignment.\nfunc (s *Syncer) scheduleRevertBytecodeRequest(req *bytecodeRequest) {\n\tselect {\n\tcase s.bytecodeReqFails <- req:\n\t\t// Sync event loop notified\n\tcase <-req.cancel:\n\t\t// Sync cycle got cancelled\n\tcase <-req.stale:\n\t\t// Request already reverted\n\t}\n}\n\n// revertBytecodeRequest cleans up a bytecode request and returns all failed\n// retrieval tasks to the scheduler for reassignment.\n//\n// Note, this needs to run on the event runloop thread to reschedule to idle peers.\n// On peer threads, use scheduleRevertBytecodeRequest.\nfunc (s *Syncer) revertBytecodeRequest(req *bytecodeRequest) {\n\tlog.Debug(\"Reverting bytecode request\", \"peer\", req.peer)\n\tselect {\n\tcase <-req.stale:\n\t\tlog.Trace(\"Bytecode request already reverted\", \"peer\", req.peer, \"reqid\", req.id)\n\t\treturn\n\tdefault:\n\t}\n\tclose(req.stale)\n\n\t// Remove the request from the tracked set\n\ts.lock.Lock()\n\tdelete(s.bytecodeReqs, req.id)\n\ts.lock.Unlock()\n\n\t// If there's a timeout timer still running, abort it and mark the code\n\t// retrievals as not-pending, ready for resheduling\n\treq.timeout.Stop()\n\tfor _, hash := range req.hashes {\n\t\treq.task.codeTasks[hash] = struct{}{}\n\t}\n}\n\n// scheduleRevertStorageRequest asks the event loop to clean up a storage range\n// request and return all failed retrieval tasks to the scheduler for reassignment.\nfunc (s *Syncer) scheduleRevertStorageRequest(req *storageRequest) {\n\tselect {\n\tcase s.storageReqFails <- req:\n\t\t// Sync event loop notified\n\tcase <-req.cancel:\n\t\t// Sync cycle got cancelled\n\tcase <-req.stale:\n\t\t// Request already reverted\n\t}\n}\n\n// revertStorageRequest cleans up a storage range request and returns all failed\n// retrieval tasks to the scheduler for reassignment.\n//\n// Note, this needs to run on the event runloop thread to reschedule to idle peers.\n// On peer threads, use scheduleRevertStorageRequest.\nfunc (s *Syncer) revertStorageRequest(req *storageRequest) {\n\tlog.Debug(\"Reverting storage request\", \"peer\", req.peer)\n\tselect {\n\tcase <-req.stale:\n\t\tlog.Trace(\"Storage request already reverted\", \"peer\", req.peer, \"reqid\", req.id)\n\t\treturn\n\tdefault:\n\t}\n\tclose(req.stale)\n\n\t// Remove the request from the tracked set\n\ts.lock.Lock()\n\tdelete(s.storageReqs, req.id)\n\ts.lock.Unlock()\n\n\t// If there's a timeout timer still running, abort it and mark the storage\n\t// task as not-pending, ready for resheduling\n\treq.timeout.Stop()\n\tif req.subTask != nil {\n\t\treq.subTask.req = nil\n\t} else {\n\t\tfor i, account := range req.accounts {\n\t\t\treq.mainTask.stateTasks[account] = req.roots[i]\n\t\t}\n\t}\n}\n\n// scheduleRevertTrienodeHealRequest asks the event loop to clean up a trienode heal\n// request and return all failed retrieval tasks to the scheduler for reassignment.\nfunc (s *Syncer) scheduleRevertTrienodeHealRequest(req *trienodeHealRequest) {\n\tselect {\n\tcase s.trienodeHealReqFails <- req:\n\t\t// Sync event loop notified\n\tcase <-req.cancel:\n\t\t// Sync cycle got cancelled\n\tcase <-req.stale:\n\t\t// Request already reverted\n\t}\n}\n\n// revertTrienodeHealRequest cleans up a trienode heal request and returns all\n// failed retrieval tasks to the scheduler for reassignment.\n//\n// Note, this needs to run on the event runloop thread to reschedule to idle peers.\n// On peer threads, use scheduleRevertTrienodeHealRequest.\nfunc (s *Syncer) revertTrienodeHealRequest(req *trienodeHealRequest) {\n\tlog.Debug(\"Reverting trienode heal request\", \"peer\", req.peer)\n\tselect {\n\tcase <-req.stale:\n\t\tlog.Trace(\"Trienode heal request already reverted\", \"peer\", req.peer, \"reqid\", req.id)\n\t\treturn\n\tdefault:\n\t}\n\tclose(req.stale)\n\n\t// Remove the request from the tracked set\n\ts.lock.Lock()\n\tdelete(s.trienodeHealReqs, req.id)\n\ts.lock.Unlock()\n\n\t// If there's a timeout timer still running, abort it and mark the trie node\n\t// retrievals as not-pending, ready for resheduling\n\treq.timeout.Stop()\n\tfor i, hash := range req.hashes {\n\t\treq.task.trieTasks[hash] = req.paths[i]\n\t}\n}\n\n// scheduleRevertBytecodeHealRequest asks the event loop to clean up a bytecode heal\n// request and return all failed retrieval tasks to the scheduler for reassignment.\nfunc (s *Syncer) scheduleRevertBytecodeHealRequest(req *bytecodeHealRequest) {\n\tselect {\n\tcase s.bytecodeHealReqFails <- req:\n\t\t// Sync event loop notified\n\tcase <-req.cancel:\n\t\t// Sync cycle got cancelled\n\tcase <-req.stale:\n\t\t// Request already reverted\n\t}\n}\n\n// revertBytecodeHealRequest cleans up a bytecode heal request and returns all\n// failed retrieval tasks to the scheduler for reassignment.\n//\n// Note, this needs to run on the event runloop thread to reschedule to idle peers.\n// On peer threads, use scheduleRevertBytecodeHealRequest.\nfunc (s *Syncer) revertBytecodeHealRequest(req *bytecodeHealRequest) {\n\tlog.Debug(\"Reverting bytecode heal request\", \"peer\", req.peer)\n\tselect {\n\tcase <-req.stale:\n\t\tlog.Trace(\"Bytecode heal request already reverted\", \"peer\", req.peer, \"reqid\", req.id)\n\t\treturn\n\tdefault:\n\t}\n\tclose(req.stale)\n\n\t// Remove the request from the tracked set\n\ts.lock.Lock()\n\tdelete(s.bytecodeHealReqs, req.id)\n\ts.lock.Unlock()\n\n\t// If there's a timeout timer still running, abort it and mark the code\n\t// retrievals as not-pending, ready for resheduling\n\treq.timeout.Stop()\n\tfor _, hash := range req.hashes {\n\t\treq.task.codeTasks[hash] = struct{}{}\n\t}\n}\n\n// processAccountResponse integrates an already validated account range response\n// into the account tasks.\nfunc (s *Syncer) processAccountResponse(res *accountResponse) {\n\t// Switch the task from pending to filling\n\tres.task.req = nil\n\tres.task.res = res\n\n\t// Ensure that the response doesn't overflow into the subsequent task\n\tlast := res.task.Last.Big()\n\tfor i, hash := range res.hashes {\n\t\t// Mark the range complete if the last is already included.\n\t\t// Keep iteration to delete the extra states if exists.\n\t\tcmp := hash.Big().Cmp(last)\n\t\tif cmp == 0 {\n\t\t\tres.cont = false\n\t\t\tcontinue\n\t\t}\n\t\tif cmp > 0 {\n\t\t\t// Chunk overflown, cut off excess, but also update the boundary nodes\n\t\t\tfor j := i; j < len(res.hashes); j++ {\n\t\t\t\tif err := res.trie.Prove(res.hashes[j][:], 0, res.overflow); err != nil {\n\t\t\t\t\tpanic(err) // Account range was already proven, what happened\n\t\t\t\t}\n\t\t\t}\n\t\t\tres.hashes = res.hashes[:i]\n\t\t\tres.accounts = res.accounts[:i]\n\t\t\tres.cont = false // Mark range completed\n\t\t\tbreak\n\t\t}\n\t}\n\t// Iterate over all the accounts and assemble which ones need further sub-\n\t// filling before the entire account range can be persisted.\n\tres.task.needCode = make([]bool, len(res.accounts))\n\tres.task.needState = make([]bool, len(res.accounts))\n\tres.task.needHeal = make([]bool, len(res.accounts))\n\n\tres.task.codeTasks = make(map[common.Hash]struct{})\n\tres.task.stateTasks = make(map[common.Hash]common.Hash)\n\n\tresumed := make(map[common.Hash]struct{})\n\n\tres.task.pend = 0\n\tfor i, account := range res.accounts {\n\t\t// Check if the account is a contract with an unknown code\n\t\tif !bytes.Equal(account.CodeHash, emptyCode[:]) {\n\t\t\tif code := rawdb.ReadCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)); code == nil {\n\t\t\t\tres.task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{}\n\t\t\t\tres.task.needCode[i] = true\n\t\t\t\tres.task.pend++\n\t\t\t}\n\t\t}\n\t\t// Check if the account is a contract with an unknown storage trie\n\t\tif account.Root != emptyRoot {\n\t\t\tif node, err := s.db.Get(account.Root[:]); err != nil || node == nil {\n\t\t\t\t// If there was a previous large state retrieval in progress,\n\t\t\t\t// don't restart it from scratch. This happens if a sync cycle\n\t\t\t\t// is interrupted and resumed later. However, *do* update the\n\t\t\t\t// previous root hash.\n\t\t\t\tif subtasks, ok := res.task.SubTasks[res.hashes[i]]; ok {\n\t\t\t\t\tlog.Debug(\"Resuming large storage retrieval\", \"account\", res.hashes[i], \"root\", account.Root)\n\t\t\t\t\tfor _, subtask := range subtasks {\n\t\t\t\t\t\tsubtask.root = account.Root\n\t\t\t\t\t}\n\t\t\t\t\tres.task.needHeal[i] = true\n\t\t\t\t\tresumed[res.hashes[i]] = struct{}{}\n\t\t\t\t} else {\n\t\t\t\t\tres.task.stateTasks[res.hashes[i]] = account.Root\n\t\t\t\t}\n\t\t\t\tres.task.needState[i] = true\n\t\t\t\tres.task.pend++\n\t\t\t}\n\t\t}\n\t}\n\t// Delete any subtasks that have been aborted but not resumed. This may undo\n\t// some progress if a new peer gives us less accounts than an old one, but for\n\t// now we have to live with that.\n\tfor hash := range res.task.SubTasks {\n\t\tif _, ok := resumed[hash]; !ok {\n\t\t\tlog.Debug(\"Aborting suspended storage retrieval\", \"account\", hash)\n\t\t\tdelete(res.task.SubTasks, hash)\n\t\t}\n\t}\n\t// If the account range contained no contracts, or all have been fully filled\n\t// beforehand, short circuit storage filling and forward to the next task\n\tif res.task.pend == 0 {\n\t\ts.forwardAccountTask(res.task)\n\t\treturn\n\t}\n\t// Some accounts are incomplete, leave as is for the storage and contract\n\t// task assigners to pick up and fill.\n}\n\n// processBytecodeResponse integrates an already validated bytecode response\n// into the account tasks.\nfunc (s *Syncer) processBytecodeResponse(res *bytecodeResponse) {\n\tbatch := s.db.NewBatch()\n\n\tvar (\n\t\tcodes uint64\n\t\tbytes common.StorageSize\n\t)\n\tfor i, hash := range res.hashes {\n\t\tcode := res.codes[i]\n\n\t\t// If the bytecode was not delivered, reschedule it\n\t\tif code == nil {\n\t\t\tres.task.codeTasks[hash] = struct{}{}\n\t\t\tcontinue\n\t\t}\n\t\t// Code was delivered, mark it not needed any more\n\t\tfor j, account := range res.task.res.accounts {\n\t\t\tif res.task.needCode[j] && hash == common.BytesToHash(account.CodeHash) {\n\t\t\t\tres.task.needCode[j] = false\n\t\t\t\tres.task.pend--\n\t\t\t}\n\t\t}\n\t\t// Push the bytecode into a database batch\n\t\ts.bytecodeSynced++\n\t\ts.bytecodeBytes += common.StorageSize(len(code))\n\n\t\tcodes++\n\t\tbytes += common.StorageSize(len(code))\n\n\t\trawdb.WriteCode(batch, hash, code)\n\t}\n\tif err := batch.Write(); err != nil {\n\t\tlog.Crit(\"Failed to persist bytecodes\", \"err\", err)\n\t}\n\tlog.Debug(\"Persisted set of bytecodes\", \"count\", codes, \"bytes\", bytes)\n\n\t// If this delivery completed the last pending task, forward the account task\n\t// to the next chunk\n\tif res.task.pend == 0 {\n\t\ts.forwardAccountTask(res.task)\n\t\treturn\n\t}\n\t// Some accounts are still incomplete, leave as is for the storage and contract\n\t// task assigners to pick up and fill.\n}\n\n// processStorageResponse integrates an already validated storage response\n// into the account tasks.\nfunc (s *Syncer) processStorageResponse(res *storageResponse) {\n\t// Switch the suntask from pending to idle\n\tif res.subTask != nil {\n\t\tres.subTask.req = nil\n\t}\n\tbatch := s.db.NewBatch()\n\n\tvar (\n\t\tslots   int\n\t\tnodes   int\n\t\tskipped int\n\t\tbytes   common.StorageSize\n\t)\n\t// Iterate over all the accounts and reconstruct their storage tries from the\n\t// delivered slots\n\tfor i, account := range res.accounts {\n\t\t// If the account was not delivered, reschedule it\n\t\tif i >= len(res.hashes) {\n\t\t\tres.mainTask.stateTasks[account] = res.roots[i]\n\t\t\tcontinue\n\t\t}\n\t\t// State was delivered, if complete mark as not needed any more, otherwise\n\t\t// mark the account as needing healing\n\t\tfor j, hash := range res.mainTask.res.hashes {\n\t\t\tif account != hash {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tacc := res.mainTask.res.accounts[j]\n\n\t\t\t// If the packet contains multiple contract storage slots, all\n\t\t\t// but the last are surely complete. The last contract may be\n\t\t\t// chunked, so check it's continuation flag.\n\t\t\tif res.subTask == nil && res.mainTask.needState[j] && (i < len(res.hashes)-1 || !res.cont) {\n\t\t\t\tres.mainTask.needState[j] = false\n\t\t\t\tres.mainTask.pend--\n\t\t\t}\n\t\t\t// If the last contract was chunked, mark it as needing healing\n\t\t\t// to avoid writing it out to disk prematurely.\n\t\t\tif res.subTask == nil && !res.mainTask.needHeal[j] && i == len(res.hashes)-1 && res.cont {\n\t\t\t\tres.mainTask.needHeal[j] = true\n\t\t\t}\n\t\t\t// If the last contract was chunked, we need to switch to large\n\t\t\t// contract handling mode\n\t\t\tif res.subTask == nil && i == len(res.hashes)-1 && res.cont {\n\t\t\t\t// If we haven't yet started a large-contract retrieval, create\n\t\t\t\t// the subtasks for it within the main account task\n\t\t\t\tif tasks, ok := res.mainTask.SubTasks[account]; !ok {\n\t\t\t\t\tvar (\n\t\t\t\t\t\tnext common.Hash\n\t\t\t\t\t)\n\t\t\t\t\tstep := new(big.Int).Sub(\n\t\t\t\t\t\tnew(big.Int).Div(\n\t\t\t\t\t\t\tnew(big.Int).Exp(common.Big2, common.Big256, nil),\n\t\t\t\t\t\t\tbig.NewInt(storageConcurrency),\n\t\t\t\t\t\t), common.Big1,\n\t\t\t\t\t)\n\t\t\t\t\tfor k := 0; k < storageConcurrency; k++ {\n\t\t\t\t\t\tlast := common.BigToHash(new(big.Int).Add(next.Big(), step))\n\t\t\t\t\t\tif k == storageConcurrency-1 {\n\t\t\t\t\t\t\t// Make sure we don't overflow if the step is not a proper divisor\n\t\t\t\t\t\t\tlast = common.HexToHash(\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttasks = append(tasks, &storageTask{\n\t\t\t\t\t\t\tNext: next,\n\t\t\t\t\t\t\tLast: last,\n\t\t\t\t\t\t\troot: acc.Root,\n\t\t\t\t\t\t})\n\t\t\t\t\t\tlog.Debug(\"Created storage sync task\", \"account\", account, \"root\", acc.Root, \"from\", next, \"last\", last)\n\t\t\t\t\t\tnext = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))\n\t\t\t\t\t}\n\t\t\t\t\tres.mainTask.SubTasks[account] = tasks\n\n\t\t\t\t\t// Since we've just created the sub-tasks, this response\n\t\t\t\t\t// is surely for the first one (zero origin)\n\t\t\t\t\tres.subTask = tasks[0]\n\t\t\t\t}\n\t\t\t}\n\t\t\t// If we're in large contract delivery mode, forward the subtask\n\t\t\tif res.subTask != nil {\n\t\t\t\t// Ensure the response doesn't overflow into the subsequent task\n\t\t\t\tlast := res.subTask.Last.Big()\n\t\t\t\tfor k, hash := range res.hashes[i] {\n\t\t\t\t\t// Mark the range complete if the last is already included.\n\t\t\t\t\t// Keep iteration to delete the extra states if exists.\n\t\t\t\t\tcmp := hash.Big().Cmp(last)\n\t\t\t\t\tif cmp == 0 {\n\t\t\t\t\t\tres.cont = false\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif cmp > 0 {\n\t\t\t\t\t\t// Chunk overflown, cut off excess, but also update the boundary\n\t\t\t\t\t\tfor l := k; l < len(res.hashes[i]); l++ {\n\t\t\t\t\t\t\tif err := res.tries[i].Prove(res.hashes[i][l][:], 0, res.overflow); err != nil {\n\t\t\t\t\t\t\t\tpanic(err) // Account range was already proven, what happened\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tres.hashes[i] = res.hashes[i][:k]\n\t\t\t\t\t\tres.slots[i] = res.slots[i][:k]\n\t\t\t\t\t\tres.cont = false // Mark range completed\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// Forward the relevant storage chunk (even if created just now)\n\t\t\t\tif res.cont {\n\t\t\t\t\tres.subTask.Next = common.BigToHash(new(big.Int).Add(res.hashes[i][len(res.hashes[i])-1].Big(), big.NewInt(1)))\n\t\t\t\t} else {\n\t\t\t\t\tres.subTask.done = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Iterate over all the reconstructed trie nodes and push them to disk\n\t\tslots += len(res.hashes[i])\n\n\t\tit := res.nodes[i].NewIterator(nil, nil)\n\t\tfor it.Next() {\n\t\t\t// Boundary nodes are not written for the last result, since they are incomplete\n\t\t\tif i == len(res.hashes)-1 && res.subTask != nil {\n\t\t\t\tif _, ok := res.bounds[common.BytesToHash(it.Key())]; ok {\n\t\t\t\t\tskipped++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif _, err := res.overflow.Get(it.Key()); err == nil {\n\t\t\t\t\tskipped++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Node is not a boundary, persist to disk\n\t\t\tbatch.Put(it.Key(), it.Value())\n\n\t\t\tbytes += common.StorageSize(common.HashLength + len(it.Value()))\n\t\t\tnodes++\n\t\t}\n\t\tit.Release()\n\t}\n\tif err := batch.Write(); err != nil {\n\t\tlog.Crit(\"Failed to persist storage slots\", \"err\", err)\n\t}\n\ts.storageSynced += uint64(slots)\n\ts.storageBytes += bytes\n\n\tlog.Debug(\"Persisted set of storage slots\", \"accounts\", len(res.hashes), \"slots\", slots, \"nodes\", nodes, \"skipped\", skipped, \"bytes\", bytes)\n\n\t// If this delivery completed the last pending task, forward the account task\n\t// to the next chunk\n\tif res.mainTask.pend == 0 {\n\t\ts.forwardAccountTask(res.mainTask)\n\t\treturn\n\t}\n\t// Some accounts are still incomplete, leave as is for the storage and contract\n\t// task assigners to pick up and fill.\n}\n\n// processTrienodeHealResponse integrates an already validated trienode response\n// into the healer tasks.\nfunc (s *Syncer) processTrienodeHealResponse(res *trienodeHealResponse) {\n\tfor i, hash := range res.hashes {\n\t\tnode := res.nodes[i]\n\n\t\t// If the trie node was not delivered, reschedule it\n\t\tif node == nil {\n\t\t\tres.task.trieTasks[hash] = res.paths[i]\n\t\t\tcontinue\n\t\t}\n\t\t// Push the trie node into the state syncer\n\t\ts.trienodeHealSynced++\n\t\ts.trienodeHealBytes += common.StorageSize(len(node))\n\n\t\terr := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node})\n\t\tswitch err {\n\t\tcase nil:\n\t\tcase trie.ErrAlreadyProcessed:\n\t\t\ts.trienodeHealDups++\n\t\tcase trie.ErrNotRequested:\n\t\t\ts.trienodeHealNops++\n\t\tdefault:\n\t\t\tlog.Error(\"Invalid trienode processed\", \"hash\", hash, \"err\", err)\n\t\t}\n\t}\n\tbatch := s.db.NewBatch()\n\tif err := s.healer.scheduler.Commit(batch); err != nil {\n\t\tlog.Error(\"Failed to commit healing data\", \"err\", err)\n\t}\n\tif err := batch.Write(); err != nil {\n\t\tlog.Crit(\"Failed to persist healing data\", \"err\", err)\n\t}\n\tlog.Debug(\"Persisted set of healing data\", \"type\", \"trienodes\", \"bytes\", common.StorageSize(batch.ValueSize()))\n}\n\n// processBytecodeHealResponse integrates an already validated bytecode response\n// into the healer tasks.\nfunc (s *Syncer) processBytecodeHealResponse(res *bytecodeHealResponse) {\n\tfor i, hash := range res.hashes {\n\t\tnode := res.codes[i]\n\n\t\t// If the trie node was not delivered, reschedule it\n\t\tif node == nil {\n\t\t\tres.task.codeTasks[hash] = struct{}{}\n\t\t\tcontinue\n\t\t}\n\t\t// Push the trie node into the state syncer\n\t\ts.bytecodeHealSynced++\n\t\ts.bytecodeHealBytes += common.StorageSize(len(node))\n\n\t\terr := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node})\n\t\tswitch err {\n\t\tcase nil:\n\t\tcase trie.ErrAlreadyProcessed:\n\t\t\ts.bytecodeHealDups++\n\t\tcase trie.ErrNotRequested:\n\t\t\ts.bytecodeHealNops++\n\t\tdefault:\n\t\t\tlog.Error(\"Invalid bytecode processed\", \"hash\", hash, \"err\", err)\n\t\t}\n\t}\n\tbatch := s.db.NewBatch()\n\tif err := s.healer.scheduler.Commit(batch); err != nil {\n\t\tlog.Error(\"Failed to commit healing data\", \"err\", err)\n\t}\n\tif err := batch.Write(); err != nil {\n\t\tlog.Crit(\"Failed to persist healing data\", \"err\", err)\n\t}\n\tlog.Debug(\"Persisted set of healing data\", \"type\", \"bytecode\", \"bytes\", common.StorageSize(batch.ValueSize()))\n}\n\n// forwardAccountTask takes a filled account task and persists anything available\n// into the database, after which it forwards the next account marker so that the\n// task's next chunk may be filled.\nfunc (s *Syncer) forwardAccountTask(task *accountTask) {\n\t// Remove any pending delivery\n\tres := task.res\n\tif res == nil {\n\t\treturn // nothing to forward\n\t}\n\ttask.res = nil\n\n\t// Iterate over all the accounts and gather all the incomplete trie nodes. A\n\t// node is incomplete if we haven't yet filled it (sync was interrupted), or\n\t// if we filled it in multiple chunks (storage trie), in which case the few\n\t// nodes on the chunk boundaries are missing.\n\tincompletes := light.NewNodeSet()\n\tfor i := range res.accounts {\n\t\t// If the filling was interrupted, mark everything after as incomplete\n\t\tif task.needCode[i] || task.needState[i] {\n\t\t\tfor j := i; j < len(res.accounts); j++ {\n\t\t\t\tif err := res.trie.Prove(res.hashes[j][:], 0, incompletes); err != nil {\n\t\t\t\t\tpanic(err) // Account range was already proven, what happened\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\t// Filling not interrupted until this point, mark incomplete if needs healing\n\t\tif task.needHeal[i] {\n\t\t\tif err := res.trie.Prove(res.hashes[i][:], 0, incompletes); err != nil {\n\t\t\t\tpanic(err) // Account range was already proven, what happened\n\t\t\t}\n\t\t}\n\t}\n\t// Persist every finalized trie node that's not on the boundary\n\tbatch := s.db.NewBatch()\n\n\tvar (\n\t\tnodes   int\n\t\tskipped int\n\t\tbytes   common.StorageSize\n\t)\n\tit := res.nodes.NewIterator(nil, nil)\n\tfor it.Next() {\n\t\t// Boundary nodes are not written, since they are incomplete\n\t\tif _, ok := res.bounds[common.BytesToHash(it.Key())]; ok {\n\t\t\tskipped++\n\t\t\tcontinue\n\t\t}\n\t\t// Overflow nodes are not written, since they mess with another task\n\t\tif _, err := res.overflow.Get(it.Key()); err == nil {\n\t\t\tskipped++\n\t\t\tcontinue\n\t\t}\n\t\t// Accounts with split storage requests are incomplete\n\t\tif _, err := incompletes.Get(it.Key()); err == nil {\n\t\t\tskipped++\n\t\t\tcontinue\n\t\t}\n\t\t// Node is neither a boundary, not an incomplete account, persist to disk\n\t\tbatch.Put(it.Key(), it.Value())\n\n\t\tbytes += common.StorageSize(common.HashLength + len(it.Value()))\n\t\tnodes++\n\t}\n\tit.Release()\n\n\tif err := batch.Write(); err != nil {\n\t\tlog.Crit(\"Failed to persist accounts\", \"err\", err)\n\t}\n\ts.accountBytes += bytes\n\ts.accountSynced += uint64(len(res.accounts))\n\n\tlog.Debug(\"Persisted range of accounts\", \"accounts\", len(res.accounts), \"nodes\", nodes, \"skipped\", skipped, \"bytes\", bytes)\n\n\t// Task filling persisted, push it the chunk marker forward to the first\n\t// account still missing data.\n\tfor i, hash := range res.hashes {\n\t\tif task.needCode[i] || task.needState[i] {\n\t\t\treturn\n\t\t}\n\t\ttask.Next = common.BigToHash(new(big.Int).Add(hash.Big(), big.NewInt(1)))\n\t}\n\t// All accounts marked as complete, track if the entire task is done\n\ttask.done = !res.cont\n}\n\n// OnAccounts is a callback method to invoke when a range of accounts are\n// received from a remote peer.\nfunc (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, accounts [][]byte, proof [][]byte) error {\n\tsize := common.StorageSize(len(hashes) * common.HashLength)\n\tfor _, account := range accounts {\n\t\tsize += common.StorageSize(len(account))\n\t}\n\tfor _, node := range proof {\n\t\tsize += common.StorageSize(len(node))\n\t}\n\tlogger := peer.Log().New(\"reqid\", id)\n\tlogger.Trace(\"Delivering range of accounts\", \"hashes\", len(hashes), \"accounts\", len(accounts), \"proofs\", len(proof), \"bytes\", size)\n\n\t// Whether or not the response is valid, we can mark the peer as idle and\n\t// notify the scheduler to assign a new task. If the response is invalid,\n\t// we'll drop the peer in a bit.\n\ts.lock.Lock()\n\tif _, ok := s.peers[peer.ID()]; ok {\n\t\ts.accountIdlers[peer.ID()] = struct{}{}\n\t}\n\tselect {\n\tcase s.update <- struct{}{}:\n\tdefault:\n\t}\n\t// Ensure the response is for a valid request\n\treq, ok := s.accountReqs[id]\n\tif !ok {\n\t\t// Request stale, perhaps the peer timed out but came through in the end\n\t\tlogger.Warn(\"Unexpected account range packet\")\n\t\ts.lock.Unlock()\n\t\treturn nil\n\t}\n\tdelete(s.accountReqs, id)\n\n\t// Clean up the request timeout timer, we'll see how to proceed further based\n\t// on the actual delivered content\n\tif !req.timeout.Stop() {\n\t\t// The timeout is already triggered, and this request will be reverted+rescheduled\n\t\ts.lock.Unlock()\n\t\treturn nil\n\t}\n\n\t// Response is valid, but check if peer is signalling that it does not have\n\t// the requested data. For account range queries that means the state being\n\t// retrieved was either already pruned remotely, or the peer is not yet\n\t// synced to our head.\n\tif len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 {\n\t\tlogger.Debug(\"Peer rejected account range request\", \"root\", s.root)\n\t\ts.statelessPeers[peer.ID()] = struct{}{}\n\t\ts.lock.Unlock()\n\n\t\t// Signal this request as failed, and ready for rescheduling\n\t\ts.scheduleRevertAccountRequest(req)\n\t\treturn nil\n\t}\n\troot := s.root\n\ts.lock.Unlock()\n\n\t// Reconstruct a partial trie from the response and verify it\n\tkeys := make([][]byte, len(hashes))\n\tfor i, key := range hashes {\n\t\tkeys[i] = common.CopyBytes(key[:])\n\t}\n\tnodes := make(light.NodeList, len(proof))\n\tfor i, node := range proof {\n\t\tnodes[i] = node\n\t}\n\tproofdb := nodes.NodeSet()\n\n\tvar end []byte\n\tif len(keys) > 0 {\n\t\tend = keys[len(keys)-1]\n\t}\n\tdb, tr, notary, cont, err := trie.VerifyRangeProof(root, req.origin[:], end, keys, accounts, proofdb)\n\tif err != nil {\n\t\tlogger.Warn(\"Account range failed proof\", \"err\", err)\n\t\t// Signal this request as failed, and ready for rescheduling\n\t\ts.scheduleRevertAccountRequest(req)\n\t\treturn err\n\t}\n\t// Partial trie reconstructed, send it to the scheduler for storage filling\n\tbounds := make(map[common.Hash]struct{})\n\n\tit := notary.Accessed().NewIterator(nil, nil)\n\tfor it.Next() {\n\t\tbounds[common.BytesToHash(it.Key())] = struct{}{}\n\t}\n\tit.Release()\n\n\taccs := make([]*state.Account, len(accounts))\n\tfor i, account := range accounts {\n\t\tacc := new(state.Account)\n\t\tif err := rlp.DecodeBytes(account, acc); err != nil {\n\t\t\tpanic(err) // We created these blobs, we must be able to decode them\n\t\t}\n\t\taccs[i] = acc\n\t}\n\tresponse := &accountResponse{\n\t\ttask:     req.task,\n\t\thashes:   hashes,\n\t\taccounts: accs,\n\t\tnodes:    db,\n\t\ttrie:     tr,\n\t\tbounds:   bounds,\n\t\toverflow: light.NewNodeSet(),\n\t\tcont:     cont,\n\t}\n\tselect {\n\tcase s.accountResps <- response:\n\tcase <-req.cancel:\n\tcase <-req.stale:\n\t}\n\treturn nil\n}\n\n// OnByteCodes is a callback method to invoke when a batch of contract\n// bytes codes are received from a remote peer.\nfunc (s *Syncer) OnByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {\n\ts.lock.RLock()\n\tsyncing := !s.snapped\n\ts.lock.RUnlock()\n\n\tif syncing {\n\t\treturn s.onByteCodes(peer, id, bytecodes)\n\t}\n\treturn s.onHealByteCodes(peer, id, bytecodes)\n}\n\n// onByteCodes is a callback method to invoke when a batch of contract\n// bytes codes are received from a remote peer in the syncing phase.\nfunc (s *Syncer) onByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {\n\tvar size common.StorageSize\n\tfor _, code := range bytecodes {\n\t\tsize += common.StorageSize(len(code))\n\t}\n\tlogger := peer.Log().New(\"reqid\", id)\n\tlogger.Trace(\"Delivering set of bytecodes\", \"bytecodes\", len(bytecodes), \"bytes\", size)\n\n\t// Whether or not the response is valid, we can mark the peer as idle and\n\t// notify the scheduler to assign a new task. If the response is invalid,\n\t// we'll drop the peer in a bit.\n\ts.lock.Lock()\n\tif _, ok := s.peers[peer.ID()]; ok {\n\t\ts.bytecodeIdlers[peer.ID()] = struct{}{}\n\t}\n\tselect {\n\tcase s.update <- struct{}{}:\n\tdefault:\n\t}\n\t// Ensure the response is for a valid request\n\treq, ok := s.bytecodeReqs[id]\n\tif !ok {\n\t\t// Request stale, perhaps the peer timed out but came through in the end\n\t\tlogger.Warn(\"Unexpected bytecode packet\")\n\t\ts.lock.Unlock()\n\t\treturn nil\n\t}\n\tdelete(s.bytecodeReqs, id)\n\n\t// Clean up the request timeout timer, we'll see how to proceed further based\n\t// on the actual delivered content\n\tif !req.timeout.Stop() {\n\t\t// The timeout is already triggered, and this request will be reverted+rescheduled\n\t\ts.lock.Unlock()\n\t\treturn nil\n\t}\n\n\t// Response is valid, but check if peer is signalling that it does not have\n\t// the requested data. For bytecode range queries that means the peer is not\n\t// yet synced.\n\tif len(bytecodes) == 0 {\n\t\tlogger.Debug(\"Peer rejected bytecode request\")\n\t\ts.statelessPeers[peer.ID()] = struct{}{}\n\t\ts.lock.Unlock()\n\n\t\t// Signal this request as failed, and ready for rescheduling\n\t\ts.scheduleRevertBytecodeRequest(req)\n\t\treturn nil\n\t}\n\ts.lock.Unlock()\n\n\t// Cross reference the requested bytecodes with the response to find gaps\n\t// that the serving node is missing\n\thasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)\n\thash := make([]byte, 32)\n\n\tcodes := make([][]byte, len(req.hashes))\n\tfor i, j := 0, 0; i < len(bytecodes); i++ {\n\t\t// Find the next hash that we've been served, leaving misses with nils\n\t\thasher.Reset()\n\t\thasher.Write(bytecodes[i])\n\t\thasher.Read(hash)\n\n\t\tfor j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {\n\t\t\tj++\n\t\t}\n\t\tif j < len(req.hashes) {\n\t\t\tcodes[j] = bytecodes[i]\n\t\t\tj++\n\t\t\tcontinue\n\t\t}\n\t\t// We've either ran out of hashes, or got unrequested data\n\t\tlogger.Warn(\"Unexpected bytecodes\", \"count\", len(bytecodes)-i)\n\t\t// Signal this request as failed, and ready for rescheduling\n\t\ts.scheduleRevertBytecodeRequest(req)\n\t\treturn errors.New(\"unexpected bytecode\")\n\t}\n\t// Response validated, send it to the scheduler for filling\n\tresponse := &bytecodeResponse{\n\t\ttask:   req.task,\n\t\thashes: req.hashes,\n\t\tcodes:  codes,\n\t}\n\tselect {\n\tcase s.bytecodeResps <- response:\n\tcase <-req.cancel:\n\tcase <-req.stale:\n\t}\n\treturn nil\n}\n\n// OnStorage is a callback method to invoke when ranges of storage slots\n// are received from a remote peer.\nfunc (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slots [][][]byte, proof [][]byte) error {\n\t// Gather some trace stats to aid in debugging issues\n\tvar (\n\t\thashCount int\n\t\tslotCount int\n\t\tsize      common.StorageSize\n\t)\n\tfor _, hashset := range hashes {\n\t\tsize += common.StorageSize(common.HashLength * len(hashset))\n\t\thashCount += len(hashset)\n\t}\n\tfor _, slotset := range slots {\n\t\tfor _, slot := range slotset {\n\t\t\tsize += common.StorageSize(len(slot))\n\t\t}\n\t\tslotCount += len(slotset)\n\t}\n\tfor _, node := range proof {\n\t\tsize += common.StorageSize(len(node))\n\t}\n\tlogger := peer.Log().New(\"reqid\", id)\n\tlogger.Trace(\"Delivering ranges of storage slots\", \"accounts\", len(hashes), \"hashes\", hashCount, \"slots\", slotCount, \"proofs\", len(proof), \"size\", size)\n\n\t// Whether or not the response is valid, we can mark the peer as idle and\n\t// notify the scheduler to assign a new task. If the response is invalid,\n\t// we'll drop the peer in a bit.\n\ts.lock.Lock()\n\tif _, ok := s.peers[peer.ID()]; ok {\n\t\ts.storageIdlers[peer.ID()] = struct{}{}\n\t}\n\tselect {\n\tcase s.update <- struct{}{}:\n\tdefault:\n\t}\n\t// Ensure the response is for a valid request\n\treq, ok := s.storageReqs[id]\n\tif !ok {\n\t\t// Request stale, perhaps the peer timed out but came through in the end\n\t\tlogger.Warn(\"Unexpected storage ranges packet\")\n\t\ts.lock.Unlock()\n\t\treturn nil\n\t}\n\tdelete(s.storageReqs, id)\n\n\t// Clean up the request timeout timer, we'll see how to proceed further based\n\t// on the actual delivered content\n\tif !req.timeout.Stop() {\n\t\t// The timeout is already triggered, and this request will be reverted+rescheduled\n\t\ts.lock.Unlock()\n\t\treturn nil\n\t}\n\n\t// Reject the response if the hash sets and slot sets don't match, or if the\n\t// peer sent more data than requested.\n\tif len(hashes) != len(slots) {\n\t\ts.lock.Unlock()\n\t\ts.scheduleRevertStorageRequest(req) // reschedule request\n\t\tlogger.Warn(\"Hash and slot set size mismatch\", \"hashset\", len(hashes), \"slotset\", len(slots))\n\t\treturn errors.New(\"hash and slot set size mismatch\")\n\t}\n\tif len(hashes) > len(req.accounts) {\n\t\ts.lock.Unlock()\n\t\ts.scheduleRevertStorageRequest(req) // reschedule request\n\t\tlogger.Warn(\"Hash set larger than requested\", \"hashset\", len(hashes), \"requested\", len(req.accounts))\n\t\treturn errors.New(\"hash set larger than requested\")\n\t}\n\t// Response is valid, but check if peer is signalling that it does not have\n\t// the requested data. For storage range queries that means the state being\n\t// retrieved was either already pruned remotely, or the peer is not yet\n\t// synced to our head.\n\tif len(hashes) == 0 {\n\t\tlogger.Debug(\"Peer rejected storage request\")\n\t\ts.statelessPeers[peer.ID()] = struct{}{}\n\t\ts.lock.Unlock()\n\t\ts.scheduleRevertStorageRequest(req) // reschedule request\n\t\treturn nil\n\t}\n\ts.lock.Unlock()\n\n\t// Reconstruct the partial tries from the response and verify them\n\tvar (\n\t\tdbs    = make([]ethdb.KeyValueStore, len(hashes))\n\t\ttries  = make([]*trie.Trie, len(hashes))\n\t\tnotary *trie.KeyValueNotary\n\t\tcont   bool\n\t)\n\tfor i := 0; i < len(hashes); i++ {\n\t\t// Convert the keys and proofs into an internal format\n\t\tkeys := make([][]byte, len(hashes[i]))\n\t\tfor j, key := range hashes[i] {\n\t\t\tkeys[j] = common.CopyBytes(key[:])\n\t\t}\n\t\tnodes := make(light.NodeList, 0, len(proof))\n\t\tif i == len(hashes)-1 {\n\t\t\tfor _, node := range proof {\n\t\t\t\tnodes = append(nodes, node)\n\t\t\t}\n\t\t}\n\t\tvar err error\n\t\tif len(nodes) == 0 {\n\t\t\t// No proof has been attached, the response must cover the entire key\n\t\t\t// space and hash to the origin root.\n\t\t\tdbs[i], tries[i], _, _, err = trie.VerifyRangeProof(req.roots[i], nil, nil, keys, slots[i], nil)\n\t\t\tif err != nil {\n\t\t\t\ts.scheduleRevertStorageRequest(req) // reschedule request\n\t\t\t\tlogger.Warn(\"Storage slots failed proof\", \"err\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\t// A proof was attached, the response is only partial, check that the\n\t\t\t// returned data is indeed part of the storage trie\n\t\t\tproofdb := nodes.NodeSet()\n\n\t\t\tvar end []byte\n\t\t\tif len(keys) > 0 {\n\t\t\t\tend = keys[len(keys)-1]\n\t\t\t}\n\t\t\tdbs[i], tries[i], notary, cont, err = trie.VerifyRangeProof(req.roots[i], req.origin[:], end, keys, slots[i], proofdb)\n\t\t\tif err != nil {\n\t\t\t\ts.scheduleRevertStorageRequest(req) // reschedule request\n\t\t\t\tlogger.Warn(\"Storage range failed proof\", \"err\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\t// Partial tries reconstructed, send them to the scheduler for storage filling\n\tbounds := make(map[common.Hash]struct{})\n\n\tif notary != nil { // if all contract storages are delivered in full, no notary will be created\n\t\tit := notary.Accessed().NewIterator(nil, nil)\n\t\tfor it.Next() {\n\t\t\tbounds[common.BytesToHash(it.Key())] = struct{}{}\n\t\t}\n\t\tit.Release()\n\t}\n\tresponse := &storageResponse{\n\t\tmainTask: req.mainTask,\n\t\tsubTask:  req.subTask,\n\t\taccounts: req.accounts,\n\t\troots:    req.roots,\n\t\thashes:   hashes,\n\t\tslots:    slots,\n\t\tnodes:    dbs,\n\t\ttries:    tries,\n\t\tbounds:   bounds,\n\t\toverflow: light.NewNodeSet(),\n\t\tcont:     cont,\n\t}\n\tselect {\n\tcase s.storageResps <- response:\n\tcase <-req.cancel:\n\tcase <-req.stale:\n\t}\n\treturn nil\n}\n\n// OnTrieNodes is a callback method to invoke when a batch of trie nodes\n// are received from a remote peer.\nfunc (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error {\n\tvar size common.StorageSize\n\tfor _, node := range trienodes {\n\t\tsize += common.StorageSize(len(node))\n\t}\n\tlogger := peer.Log().New(\"reqid\", id)\n\tlogger.Trace(\"Delivering set of healing trienodes\", \"trienodes\", len(trienodes), \"bytes\", size)\n\n\t// Whether or not the response is valid, we can mark the peer as idle and\n\t// notify the scheduler to assign a new task. If the response is invalid,\n\t// we'll drop the peer in a bit.\n\ts.lock.Lock()\n\tif _, ok := s.peers[peer.ID()]; ok {\n\t\ts.trienodeHealIdlers[peer.ID()] = struct{}{}\n\t}\n\tselect {\n\tcase s.update <- struct{}{}:\n\tdefault:\n\t}\n\t// Ensure the response is for a valid request\n\treq, ok := s.trienodeHealReqs[id]\n\tif !ok {\n\t\t// Request stale, perhaps the peer timed out but came through in the end\n\t\tlogger.Warn(\"Unexpected trienode heal packet\")\n\t\ts.lock.Unlock()\n\t\treturn nil\n\t}\n\tdelete(s.trienodeHealReqs, id)\n\n\t// Clean up the request timeout timer, we'll see how to proceed further based\n\t// on the actual delivered content\n\tif !req.timeout.Stop() {\n\t\t// The timeout is already triggered, and this request will be reverted+rescheduled\n\t\ts.lock.Unlock()\n\t\treturn nil\n\t}\n\n\t// Response is valid, but check if peer is signalling that it does not have\n\t// the requested data. For bytecode range queries that means the peer is not\n\t// yet synced.\n\tif len(trienodes) == 0 {\n\t\tlogger.Debug(\"Peer rejected trienode heal request\")\n\t\ts.statelessPeers[peer.ID()] = struct{}{}\n\t\ts.lock.Unlock()\n\n\t\t// Signal this request as failed, and ready for rescheduling\n\t\ts.scheduleRevertTrienodeHealRequest(req)\n\t\treturn nil\n\t}\n\ts.lock.Unlock()\n\n\t// Cross reference the requested trienodes with the response to find gaps\n\t// that the serving node is missing\n\thasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)\n\thash := make([]byte, 32)\n\n\tnodes := make([][]byte, len(req.hashes))\n\tfor i, j := 0, 0; i < len(trienodes); i++ {\n\t\t// Find the next hash that we've been served, leaving misses with nils\n\t\thasher.Reset()\n\t\thasher.Write(trienodes[i])\n\t\thasher.Read(hash)\n\n\t\tfor j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {\n\t\t\tj++\n\t\t}\n\t\tif j < len(req.hashes) {\n\t\t\tnodes[j] = trienodes[i]\n\t\t\tj++\n\t\t\tcontinue\n\t\t}\n\t\t// We've either ran out of hashes, or got unrequested data\n\t\tlogger.Warn(\"Unexpected healing trienodes\", \"count\", len(trienodes)-i)\n\t\t// Signal this request as failed, and ready for rescheduling\n\t\ts.scheduleRevertTrienodeHealRequest(req)\n\t\treturn errors.New(\"unexpected healing trienode\")\n\t}\n\t// Response validated, send it to the scheduler for filling\n\tresponse := &trienodeHealResponse{\n\t\ttask:   req.task,\n\t\thashes: req.hashes,\n\t\tpaths:  req.paths,\n\t\tnodes:  nodes,\n\t}\n\tselect {\n\tcase s.trienodeHealResps <- response:\n\tcase <-req.cancel:\n\tcase <-req.stale:\n\t}\n\treturn nil\n}\n\n// onHealByteCodes is a callback method to invoke when a batch of contract\n// bytes codes are received from a remote peer in the healing phase.\nfunc (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error {\n\tvar size common.StorageSize\n\tfor _, code := range bytecodes {\n\t\tsize += common.StorageSize(len(code))\n\t}\n\tlogger := peer.Log().New(\"reqid\", id)\n\tlogger.Trace(\"Delivering set of healing bytecodes\", \"bytecodes\", len(bytecodes), \"bytes\", size)\n\n\t// Whether or not the response is valid, we can mark the peer as idle and\n\t// notify the scheduler to assign a new task. If the response is invalid,\n\t// we'll drop the peer in a bit.\n\ts.lock.Lock()\n\tif _, ok := s.peers[peer.ID()]; ok {\n\t\ts.bytecodeHealIdlers[peer.ID()] = struct{}{}\n\t}\n\tselect {\n\tcase s.update <- struct{}{}:\n\tdefault:\n\t}\n\t// Ensure the response is for a valid request\n\treq, ok := s.bytecodeHealReqs[id]\n\tif !ok {\n\t\t// Request stale, perhaps the peer timed out but came through in the end\n\t\tlogger.Warn(\"Unexpected bytecode heal packet\")\n\t\ts.lock.Unlock()\n\t\treturn nil\n\t}\n\tdelete(s.bytecodeHealReqs, id)\n\n\t// Clean up the request timeout timer, we'll see how to proceed further based\n\t// on the actual delivered content\n\tif !req.timeout.Stop() {\n\t\t// The timeout is already triggered, and this request will be reverted+rescheduled\n\t\ts.lock.Unlock()\n\t\treturn nil\n\t}\n\n\t// Response is valid, but check if peer is signalling that it does not have\n\t// the requested data. For bytecode range queries that means the peer is not\n\t// yet synced.\n\tif len(bytecodes) == 0 {\n\t\tlogger.Debug(\"Peer rejected bytecode heal request\")\n\t\ts.statelessPeers[peer.ID()] = struct{}{}\n\t\ts.lock.Unlock()\n\n\t\t// Signal this request as failed, and ready for rescheduling\n\t\ts.scheduleRevertBytecodeHealRequest(req)\n\t\treturn nil\n\t}\n\ts.lock.Unlock()\n\n\t// Cross reference the requested bytecodes with the response to find gaps\n\t// that the serving node is missing\n\thasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)\n\thash := make([]byte, 32)\n\n\tcodes := make([][]byte, len(req.hashes))\n\tfor i, j := 0, 0; i < len(bytecodes); i++ {\n\t\t// Find the next hash that we've been served, leaving misses with nils\n\t\thasher.Reset()\n\t\thasher.Write(bytecodes[i])\n\t\thasher.Read(hash)\n\n\t\tfor j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) {\n\t\t\tj++\n\t\t}\n\t\tif j < len(req.hashes) {\n\t\t\tcodes[j] = bytecodes[i]\n\t\t\tj++\n\t\t\tcontinue\n\t\t}\n\t\t// We've either ran out of hashes, or got unrequested data\n\t\tlogger.Warn(\"Unexpected healing bytecodes\", \"count\", len(bytecodes)-i)\n\t\t// Signal this request as failed, and ready for rescheduling\n\t\ts.scheduleRevertBytecodeHealRequest(req)\n\t\treturn errors.New(\"unexpected healing bytecode\")\n\t}\n\t// Response validated, send it to the scheduler for filling\n\tresponse := &bytecodeHealResponse{\n\t\ttask:   req.task,\n\t\thashes: req.hashes,\n\t\tcodes:  codes,\n\t}\n\tselect {\n\tcase s.bytecodeHealResps <- response:\n\tcase <-req.cancel:\n\tcase <-req.stale:\n\t}\n\treturn nil\n}\n\n// hashSpace is the total size of the 256 bit hash space for accounts.\nvar hashSpace = new(big.Int).Exp(common.Big2, common.Big256, nil)\n\n// report calculates various status reports and provides it to the user.\nfunc (s *Syncer) report(force bool) {\n\tif len(s.tasks) > 0 {\n\t\ts.reportSyncProgress(force)\n\t\treturn\n\t}\n\ts.reportHealProgress(force)\n}\n\n// reportSyncProgress calculates various status reports and provides it to the user.\nfunc (s *Syncer) reportSyncProgress(force bool) {\n\t// Don't report all the events, just occasionally\n\tif !force && time.Since(s.logTime) < 3*time.Second {\n\t\treturn\n\t}\n\t// Don't report anything until we have a meaningful progress\n\tsynced := s.accountBytes + s.bytecodeBytes + s.storageBytes\n\tif synced == 0 {\n\t\treturn\n\t}\n\taccountGaps := new(big.Int)\n\tfor _, task := range s.tasks {\n\t\taccountGaps.Add(accountGaps, new(big.Int).Sub(task.Last.Big(), task.Next.Big()))\n\t}\n\taccountFills := new(big.Int).Sub(hashSpace, accountGaps)\n\tif accountFills.BitLen() == 0 {\n\t\treturn\n\t}\n\ts.logTime = time.Now()\n\testBytes := float64(new(big.Int).Div(\n\t\tnew(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace),\n\t\taccountFills,\n\t).Uint64())\n\n\telapsed := time.Since(s.startTime)\n\testTime := elapsed / time.Duration(synced) * time.Duration(estBytes)\n\n\t// Create a mega progress report\n\tvar (\n\t\tprogress = fmt.Sprintf(\"%.2f%%\", float64(synced)*100/estBytes)\n\t\taccounts = fmt.Sprintf(\"%d@%v\", s.accountSynced, s.accountBytes.TerminalString())\n\t\tstorage  = fmt.Sprintf(\"%d@%v\", s.storageSynced, s.storageBytes.TerminalString())\n\t\tbytecode = fmt.Sprintf(\"%d@%v\", s.bytecodeSynced, s.bytecodeBytes.TerminalString())\n\t)\n\tlog.Info(\"State sync in progress\", \"synced\", progress, \"state\", synced,\n\t\t\"accounts\", accounts, \"slots\", storage, \"codes\", bytecode, \"eta\", common.PrettyDuration(estTime-elapsed))\n}\n\n// reportHealProgress calculates various status reports and provides it to the user.\nfunc (s *Syncer) reportHealProgress(force bool) {\n\t// Don't report all the events, just occasionally\n\tif !force && time.Since(s.logTime) < 3*time.Second {\n\t\treturn\n\t}\n\ts.logTime = time.Now()\n\n\t// Create a mega progress report\n\tvar (\n\t\ttrienode = fmt.Sprintf(\"%d@%v\", s.trienodeHealSynced, s.trienodeHealBytes.TerminalString())\n\t\tbytecode = fmt.Sprintf(\"%d@%v\", s.bytecodeHealSynced, s.bytecodeHealBytes.TerminalString())\n\t)\n\tlog.Info(\"State heal in progress\", \"nodes\", trienode, \"codes\", bytecode,\n\t\t\"pending\", s.healer.scheduler.Pending())\n}\n"
  },
  {
    "path": "eth/protocols/snap/sync_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage snap\n\nimport (\n\t\"bytes\"\n\t\"crypto/rand\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"sort\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n\t\"golang.org/x/crypto/sha3\"\n)\n\nfunc TestHashing(t *testing.T) {\n\tt.Parallel()\n\n\tvar bytecodes = make([][]byte, 10)\n\tfor i := 0; i < len(bytecodes); i++ {\n\t\tbuf := make([]byte, 100)\n\t\trand.Read(buf)\n\t\tbytecodes[i] = buf\n\t}\n\tvar want, got string\n\tvar old = func() {\n\t\thasher := sha3.NewLegacyKeccak256()\n\t\tfor i := 0; i < len(bytecodes); i++ {\n\t\t\thasher.Reset()\n\t\t\thasher.Write(bytecodes[i])\n\t\t\thash := hasher.Sum(nil)\n\t\t\tgot = fmt.Sprintf(\"%v\\n%v\", got, hash)\n\t\t}\n\t}\n\tvar new = func() {\n\t\thasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)\n\t\tvar hash = make([]byte, 32)\n\t\tfor i := 0; i < len(bytecodes); i++ {\n\t\t\thasher.Reset()\n\t\t\thasher.Write(bytecodes[i])\n\t\t\thasher.Read(hash)\n\t\t\twant = fmt.Sprintf(\"%v\\n%v\", want, hash)\n\t\t}\n\t}\n\told()\n\tnew()\n\tif want != got {\n\t\tt.Errorf(\"want\\n%v\\ngot\\n%v\\n\", want, got)\n\t}\n}\n\nfunc BenchmarkHashing(b *testing.B) {\n\tvar bytecodes = make([][]byte, 10000)\n\tfor i := 0; i < len(bytecodes); i++ {\n\t\tbuf := make([]byte, 100)\n\t\trand.Read(buf)\n\t\tbytecodes[i] = buf\n\t}\n\tvar old = func() {\n\t\thasher := sha3.NewLegacyKeccak256()\n\t\tfor i := 0; i < len(bytecodes); i++ {\n\t\t\thasher.Reset()\n\t\t\thasher.Write(bytecodes[i])\n\t\t\thasher.Sum(nil)\n\t\t}\n\t}\n\tvar new = func() {\n\t\thasher := sha3.NewLegacyKeccak256().(crypto.KeccakState)\n\t\tvar hash = make([]byte, 32)\n\t\tfor i := 0; i < len(bytecodes); i++ {\n\t\t\thasher.Reset()\n\t\t\thasher.Write(bytecodes[i])\n\t\t\thasher.Read(hash)\n\t\t}\n\t}\n\tb.Run(\"old\", func(b *testing.B) {\n\t\tb.ReportAllocs()\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\told()\n\t\t}\n\t})\n\tb.Run(\"new\", func(b *testing.B) {\n\t\tb.ReportAllocs()\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tnew()\n\t\t}\n\t})\n}\n\ntype (\n\taccountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error\n\tstorageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error\n\ttrieHandlerFunc    func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error\n\tcodeHandlerFunc    func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error\n)\n\ntype testPeer struct {\n\tid            string\n\ttest          *testing.T\n\tremote        *Syncer\n\tlogger        log.Logger\n\taccountTrie   *trie.Trie\n\taccountValues entrySlice\n\tstorageTries  map[common.Hash]*trie.Trie\n\tstorageValues map[common.Hash]entrySlice\n\n\taccountRequestHandler accountHandlerFunc\n\tstorageRequestHandler storageHandlerFunc\n\ttrieRequestHandler    trieHandlerFunc\n\tcodeRequestHandler    codeHandlerFunc\n\tterm                  func()\n}\n\nfunc newTestPeer(id string, t *testing.T, term func()) *testPeer {\n\tpeer := &testPeer{\n\t\tid:                    id,\n\t\ttest:                  t,\n\t\tlogger:                log.New(\"id\", id),\n\t\taccountRequestHandler: defaultAccountRequestHandler,\n\t\ttrieRequestHandler:    defaultTrieRequestHandler,\n\t\tstorageRequestHandler: defaultStorageRequestHandler,\n\t\tcodeRequestHandler:    defaultCodeRequestHandler,\n\t\tterm:                  term,\n\t}\n\t//stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true))\n\t//peer.logger.SetHandler(stderrHandler)\n\treturn peer\n}\n\nfunc (t *testPeer) ID() string      { return t.id }\nfunc (t *testPeer) Log() log.Logger { return t.logger }\n\nfunc (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error {\n\tt.logger.Trace(\"Fetching range of accounts\", \"reqid\", id, \"root\", root, \"origin\", origin, \"limit\", limit, \"bytes\", common.StorageSize(bytes))\n\tgo t.accountRequestHandler(t, id, root, origin, limit, bytes)\n\treturn nil\n}\n\nfunc (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error {\n\tt.logger.Trace(\"Fetching set of trie nodes\", \"reqid\", id, \"root\", root, \"pathsets\", len(paths), \"bytes\", common.StorageSize(bytes))\n\tgo t.trieRequestHandler(t, id, root, paths, bytes)\n\treturn nil\n}\n\nfunc (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error {\n\tif len(accounts) == 1 && origin != nil {\n\t\tt.logger.Trace(\"Fetching range of large storage slots\", \"reqid\", id, \"root\", root, \"account\", accounts[0], \"origin\", common.BytesToHash(origin), \"limit\", common.BytesToHash(limit), \"bytes\", common.StorageSize(bytes))\n\t} else {\n\t\tt.logger.Trace(\"Fetching ranges of small storage slots\", \"reqid\", id, \"root\", root, \"accounts\", len(accounts), \"first\", accounts[0], \"bytes\", common.StorageSize(bytes))\n\t}\n\tgo t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes)\n\treturn nil\n}\n\nfunc (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error {\n\tt.logger.Trace(\"Fetching set of byte codes\", \"reqid\", id, \"hashes\", len(hashes), \"bytes\", common.StorageSize(bytes))\n\tgo t.codeRequestHandler(t, id, hashes, bytes)\n\treturn nil\n}\n\n// defaultTrieRequestHandler is a well-behaving handler for trie healing requests\nfunc defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {\n\t// Pass the response\n\tvar nodes [][]byte\n\tfor _, pathset := range paths {\n\t\tswitch len(pathset) {\n\t\tcase 1:\n\t\t\tblob, _, err := t.accountTrie.TryGetNode(pathset[0])\n\t\t\tif err != nil {\n\t\t\t\tt.logger.Info(\"Error handling req\", \"error\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnodes = append(nodes, blob)\n\t\tdefault:\n\t\t\taccount := t.storageTries[(common.BytesToHash(pathset[0]))]\n\t\t\tfor _, path := range pathset[1:] {\n\t\t\t\tblob, _, err := account.TryGetNode(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.logger.Info(\"Error handling req\", \"error\", err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tnodes = append(nodes, blob)\n\t\t\t}\n\t\t}\n\t}\n\tt.remote.OnTrieNodes(t, requestId, nodes)\n\treturn nil\n}\n\n// defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests\nfunc defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {\n\tkeys, vals, proofs := createAccountRequestResponse(t, root, origin, limit, cap)\n\tif err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil {\n\t\tt.test.Errorf(\"Remote side rejected our delivery: %v\", err)\n\t\tt.term()\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) {\n\tvar size uint64\n\tif limit == (common.Hash{}) {\n\t\tlimit = common.HexToHash(\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\")\n\t}\n\tfor _, entry := range t.accountValues {\n\t\tif size > cap {\n\t\t\tbreak\n\t\t}\n\t\tif bytes.Compare(origin[:], entry.k) <= 0 {\n\t\t\tkeys = append(keys, common.BytesToHash(entry.k))\n\t\t\tvals = append(vals, entry.v)\n\t\t\tsize += uint64(32 + len(entry.v))\n\t\t}\n\t\t// If we've exceeded the request threshold, abort\n\t\tif bytes.Compare(entry.k, limit[:]) >= 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\t// Unless we send the entire trie, we need to supply proofs\n\t// Actually, we need to supply proofs either way! This seems to be an implementation\n\t// quirk in go-ethereum\n\tproof := light.NewNodeSet()\n\tif err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {\n\t\tt.logger.Error(\"Could not prove inexistence of origin\", \"origin\", origin, \"error\", err)\n\t}\n\tif len(keys) > 0 {\n\t\tlastK := (keys[len(keys)-1])[:]\n\t\tif err := t.accountTrie.Prove(lastK, 0, proof); err != nil {\n\t\t\tt.logger.Error(\"Could not prove last item\", \"error\", err)\n\t\t}\n\t}\n\tfor _, blob := range proof.NodeList() {\n\t\tproofs = append(proofs, blob)\n\t}\n\treturn keys, vals, proofs\n}\n\n// defaultStorageRequestHandler is a well-behaving storage request handler\nfunc defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error {\n\thashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max)\n\tif err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {\n\t\tt.test.Errorf(\"Remote side rejected our delivery: %v\", err)\n\t\tt.term()\n\t}\n\treturn nil\n}\n\nfunc defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {\n\tvar bytecodes [][]byte\n\tfor _, h := range hashes {\n\t\tbytecodes = append(bytecodes, getCodeByHash(h))\n\t}\n\tif err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {\n\t\tt.test.Errorf(\"Remote side rejected our delivery: %v\", err)\n\t\tt.term()\n\t}\n\treturn nil\n}\n\nfunc createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {\n\tvar size uint64\n\tfor _, account := range accounts {\n\t\t// The first account might start from a different origin and end sooner\n\t\tvar originHash common.Hash\n\t\tif len(origin) > 0 {\n\t\t\toriginHash = common.BytesToHash(origin)\n\t\t}\n\t\tvar limitHash = common.HexToHash(\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\")\n\t\tif len(limit) > 0 {\n\t\t\tlimitHash = common.BytesToHash(limit)\n\t\t}\n\t\tvar (\n\t\t\tkeys  []common.Hash\n\t\t\tvals  [][]byte\n\t\t\tabort bool\n\t\t)\n\t\tfor _, entry := range t.storageValues[account] {\n\t\t\tif size >= max {\n\t\t\t\tabort = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif bytes.Compare(entry.k, originHash[:]) < 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tkeys = append(keys, common.BytesToHash(entry.k))\n\t\t\tvals = append(vals, entry.v)\n\t\t\tsize += uint64(32 + len(entry.v))\n\t\t\tif bytes.Compare(entry.k, limitHash[:]) >= 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\thashes = append(hashes, keys)\n\t\tslots = append(slots, vals)\n\n\t\t// Generate the Merkle proofs for the first and last storage slot, but\n\t\t// only if the response was capped. If the entire storage trie included\n\t\t// in the response, no need for any proofs.\n\t\tif originHash != (common.Hash{}) || abort {\n\t\t\t// If we're aborting, we need to prove the first and last item\n\t\t\t// This terminates the response (and thus the loop)\n\t\t\tproof := light.NewNodeSet()\n\t\t\tstTrie := t.storageTries[account]\n\n\t\t\t// Here's a potential gotcha: when constructing the proof, we cannot\n\t\t\t// use the 'origin' slice directly, but must use the full 32-byte\n\t\t\t// hash form.\n\t\t\tif err := stTrie.Prove(originHash[:], 0, proof); err != nil {\n\t\t\t\tt.logger.Error(\"Could not prove inexistence of origin\", \"origin\", originHash, \"error\", err)\n\t\t\t}\n\t\t\tif len(keys) > 0 {\n\t\t\t\tlastK := (keys[len(keys)-1])[:]\n\t\t\t\tif err := stTrie.Prove(lastK, 0, proof); err != nil {\n\t\t\t\t\tt.logger.Error(\"Could not prove last item\", \"error\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, blob := range proof.NodeList() {\n\t\t\t\tproofs = append(proofs, blob)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn hashes, slots, proofs\n}\n\n//  the createStorageRequestResponseAlwaysProve tests a cornercase, where it always\n// supplies the proof for the last account, even if it is 'complete'.h\nfunc createStorageRequestResponseAlwaysProve(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) {\n\tvar size uint64\n\tmax = max * 3 / 4\n\n\tvar origin common.Hash\n\tif len(bOrigin) > 0 {\n\t\torigin = common.BytesToHash(bOrigin)\n\t}\n\tvar exit bool\n\tfor i, account := range accounts {\n\t\tvar keys []common.Hash\n\t\tvar vals [][]byte\n\t\tfor _, entry := range t.storageValues[account] {\n\t\t\tif bytes.Compare(entry.k, origin[:]) < 0 {\n\t\t\t\texit = true\n\t\t\t}\n\t\t\tkeys = append(keys, common.BytesToHash(entry.k))\n\t\t\tvals = append(vals, entry.v)\n\t\t\tsize += uint64(32 + len(entry.v))\n\t\t\tif size > max {\n\t\t\t\texit = true\n\t\t\t}\n\t\t}\n\t\tif i == len(accounts)-1 {\n\t\t\texit = true\n\t\t}\n\t\thashes = append(hashes, keys)\n\t\tslots = append(slots, vals)\n\n\t\tif exit {\n\t\t\t// If we're aborting, we need to prove the first and last item\n\t\t\t// This terminates the response (and thus the loop)\n\t\t\tproof := light.NewNodeSet()\n\t\t\tstTrie := t.storageTries[account]\n\n\t\t\t// Here's a potential gotcha: when constructing the proof, we cannot\n\t\t\t// use the 'origin' slice directly, but must use the full 32-byte\n\t\t\t// hash form.\n\t\t\tif err := stTrie.Prove(origin[:], 0, proof); err != nil {\n\t\t\t\tt.logger.Error(\"Could not prove inexistence of origin\", \"origin\", origin,\n\t\t\t\t\t\"error\", err)\n\t\t\t}\n\t\t\tif len(keys) > 0 {\n\t\t\t\tlastK := (keys[len(keys)-1])[:]\n\t\t\t\tif err := stTrie.Prove(lastK, 0, proof); err != nil {\n\t\t\t\t\tt.logger.Error(\"Could not prove last item\", \"error\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, blob := range proof.NodeList() {\n\t\t\t\tproofs = append(proofs, blob)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn hashes, slots, proofs\n}\n\n// emptyRequestAccountRangeFn is a rejects AccountRangeRequests\nfunc emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {\n\tt.remote.OnAccounts(t, requestId, nil, nil, nil)\n\treturn nil\n}\n\nfunc nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {\n\treturn nil\n}\n\nfunc emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {\n\tt.remote.OnTrieNodes(t, requestId, nil)\n\treturn nil\n}\n\nfunc nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error {\n\treturn nil\n}\n\nfunc emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {\n\tt.remote.OnStorage(t, requestId, nil, nil, nil)\n\treturn nil\n}\n\nfunc nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {\n\treturn nil\n}\n\nfunc proofHappyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {\n\thashes, slots, proofs := createStorageRequestResponseAlwaysProve(t, root, accounts, origin, limit, max)\n\tif err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {\n\t\tt.test.Errorf(\"Remote side rejected our delivery: %v\", err)\n\t\tt.term()\n\t}\n\treturn nil\n}\n\n//func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {\n//\tvar bytecodes [][]byte\n//\tt.remote.OnByteCodes(t, id, bytecodes)\n//\treturn nil\n//}\n\nfunc corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {\n\tvar bytecodes [][]byte\n\tfor _, h := range hashes {\n\t\t// Send back the hashes\n\t\tbytecodes = append(bytecodes, h[:])\n\t}\n\tif err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {\n\t\tt.logger.Info(\"remote error on delivery (as expected)\", \"error\", err)\n\t\t// Mimic the real-life handler, which drops a peer on errors\n\t\tt.remote.Unregister(t.id)\n\t}\n\treturn nil\n}\n\nfunc cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {\n\tvar bytecodes [][]byte\n\tfor _, h := range hashes[:1] {\n\t\tbytecodes = append(bytecodes, getCodeByHash(h))\n\t}\n\t// Missing bytecode can be retrieved again, no error expected\n\tif err := t.remote.OnByteCodes(t, id, bytecodes); err != nil {\n\t\tt.test.Errorf(\"Remote side rejected our delivery: %v\", err)\n\t\tt.term()\n\t}\n\treturn nil\n}\n\n// starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small\nfunc starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {\n\treturn defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500)\n}\n\nfunc starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {\n\treturn defaultAccountRequestHandler(t, requestId, root, origin, limit, 500)\n}\n\n//func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error {\n//\treturn defaultAccountRequestHandler(t, requestId-1, root, origin, 500)\n//}\n\nfunc corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {\n\thashes, accounts, proofs := createAccountRequestResponse(t, root, origin, limit, cap)\n\tif len(proofs) > 0 {\n\t\tproofs = proofs[1:]\n\t}\n\tif err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil {\n\t\tt.logger.Info(\"remote error on delivery (as expected)\", \"error\", err)\n\t\t// Mimic the real-life handler, which drops a peer on errors\n\t\tt.remote.Unregister(t.id)\n\t}\n\treturn nil\n}\n\n// corruptStorageRequestHandler doesn't provide good proofs\nfunc corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {\n\thashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max)\n\tif len(proofs) > 0 {\n\t\tproofs = proofs[1:]\n\t}\n\tif err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil {\n\t\tt.logger.Info(\"remote error on delivery (as expected)\", \"error\", err)\n\t\t// Mimic the real-life handler, which drops a peer on errors\n\t\tt.remote.Unregister(t.id)\n\t}\n\treturn nil\n}\n\nfunc noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error {\n\thashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max)\n\tif err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil {\n\t\tt.logger.Info(\"remote error on delivery (as expected)\", \"error\", err)\n\t\t// Mimic the real-life handler, which drops a peer on errors\n\t\tt.remote.Unregister(t.id)\n\t}\n\treturn nil\n}\n\n// TestSyncBloatedProof tests a scenario where we provide only _one_ value, but\n// also ship the entire trie inside the proof. If the attack is successful,\n// the remote side does not do any follow-up requests\nfunc TestSyncBloatedProof(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems := makeAccountTrieNoStorage(100)\n\tsource := newTestPeer(\"source\", t, term)\n\tsource.accountTrie = sourceAccountTrie\n\tsource.accountValues = elems\n\n\tsource.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, limit common.Hash, cap uint64) error {\n\t\tvar (\n\t\t\tproofs [][]byte\n\t\t\tkeys   []common.Hash\n\t\t\tvals   [][]byte\n\t\t)\n\t\t// The values\n\t\tfor _, entry := range t.accountValues {\n\t\t\tif bytes.Compare(entry.k, origin[:]) < 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif bytes.Compare(entry.k, limit[:]) > 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tkeys = append(keys, common.BytesToHash(entry.k))\n\t\t\tvals = append(vals, entry.v)\n\t\t}\n\t\t// The proofs\n\t\tproof := light.NewNodeSet()\n\t\tif err := t.accountTrie.Prove(origin[:], 0, proof); err != nil {\n\t\t\tt.logger.Error(\"Could not prove origin\", \"origin\", origin, \"error\", err)\n\t\t}\n\t\t// The bloat: add proof of every single element\n\t\tfor _, entry := range t.accountValues {\n\t\t\tif err := t.accountTrie.Prove(entry.k, 0, proof); err != nil {\n\t\t\t\tt.logger.Error(\"Could not prove item\", \"error\", err)\n\t\t\t}\n\t\t}\n\t\t// And remove one item from the elements\n\t\tif len(keys) > 2 {\n\t\t\tkeys = append(keys[:1], keys[2:]...)\n\t\t\tvals = append(vals[:1], vals[2:]...)\n\t\t}\n\t\tfor _, blob := range proof.NodeList() {\n\t\t\tproofs = append(proofs, blob)\n\t\t}\n\t\tif err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil {\n\t\t\tt.logger.Info(\"remote error on delivery (as expected)\", \"error\", err)\n\t\t\tt.term()\n\t\t\t// This is actually correct, signal to exit the test successfully\n\t\t}\n\t\treturn nil\n\t}\n\tsyncer := setupSyncer(source)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil {\n\t\tt.Fatal(\"No error returned from incomplete/cancelled sync\")\n\t}\n}\n\nfunc setupSyncer(peers ...*testPeer) *Syncer {\n\tstateDb := rawdb.NewMemoryDatabase()\n\tsyncer := NewSyncer(stateDb)\n\tfor _, peer := range peers {\n\t\tsyncer.Register(peer)\n\t\tpeer.remote = syncer\n\t}\n\treturn syncer\n}\n\n// TestSync tests a basic sync with one peer\nfunc TestSync(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems := makeAccountTrieNoStorage(100)\n\n\tmkSource := func(name string) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\treturn source\n\t}\n\tsyncer := setupSyncer(mkSource(\"source\"))\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\n// TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a\n// panic within the prover\nfunc TestSyncTinyTriePanic(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems := makeAccountTrieNoStorage(1)\n\n\tmkSource := func(name string) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\treturn source\n\t}\n\tsyncer := setupSyncer(mkSource(\"source\"))\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\n// TestMultiSync tests a basic sync with multiple peers\nfunc TestMultiSync(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems := makeAccountTrieNoStorage(100)\n\n\tmkSource := func(name string) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\treturn source\n\t}\n\tsyncer := setupSyncer(mkSource(\"sourceA\"), mkSource(\"sourceB\"))\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\n// TestSyncWithStorage tests  basic sync using accounts + storage + code\nfunc TestSyncWithStorage(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true, false)\n\n\tmkSource := func(name string) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\tsource.storageTries = storageTries\n\t\tsource.storageValues = storageElems\n\t\treturn source\n\t}\n\tsyncer := setupSyncer(mkSource(\"sourceA\"))\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\n// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all\nfunc TestMultiSyncManyUseless(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)\n\n\tmkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\tsource.storageTries = storageTries\n\t\tsource.storageValues = storageElems\n\n\t\tif !noAccount {\n\t\t\tsource.accountRequestHandler = emptyRequestAccountRangeFn\n\t\t}\n\t\tif !noStorage {\n\t\t\tsource.storageRequestHandler = emptyStorageRequestHandler\n\t\t}\n\t\tif !noTrieNode {\n\t\t\tsource.trieRequestHandler = emptyTrieRequestHandler\n\t\t}\n\t\treturn source\n\t}\n\n\tsyncer := setupSyncer(\n\t\tmkSource(\"full\", true, true, true),\n\t\tmkSource(\"noAccounts\", false, true, true),\n\t\tmkSource(\"noStorage\", true, false, true),\n\t\tmkSource(\"noTrie\", true, true, false),\n\t)\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\n// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all\nfunc TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {\n\t// We're setting the timeout to very low, to increase the chance of the timeout\n\t// being triggered. This was previously a cause of panic, when a response\n\t// arrived simultaneously as a timeout was triggered.\n\tdefer func(old time.Duration) { requestTimeout = old }(requestTimeout)\n\trequestTimeout = time.Millisecond\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)\n\n\tmkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\tsource.storageTries = storageTries\n\t\tsource.storageValues = storageElems\n\n\t\tif !noAccount {\n\t\t\tsource.accountRequestHandler = emptyRequestAccountRangeFn\n\t\t}\n\t\tif !noStorage {\n\t\t\tsource.storageRequestHandler = emptyStorageRequestHandler\n\t\t}\n\t\tif !noTrieNode {\n\t\t\tsource.trieRequestHandler = emptyTrieRequestHandler\n\t\t}\n\t\treturn source\n\t}\n\n\tsyncer := setupSyncer(\n\t\tmkSource(\"full\", true, true, true),\n\t\tmkSource(\"noAccounts\", false, true, true),\n\t\tmkSource(\"noStorage\", true, false, true),\n\t\tmkSource(\"noTrie\", true, true, false),\n\t)\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\n// TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all\nfunc TestMultiSyncManyUnresponsive(t *testing.T) {\n\t// We're setting the timeout to very low, to make the test run a bit faster\n\tdefer func(old time.Duration) { requestTimeout = old }(requestTimeout)\n\trequestTimeout = time.Millisecond\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)\n\n\tmkSource := func(name string, noAccount, noStorage, noTrieNode bool) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\tsource.storageTries = storageTries\n\t\tsource.storageValues = storageElems\n\n\t\tif !noAccount {\n\t\t\tsource.accountRequestHandler = nonResponsiveRequestAccountRangeFn\n\t\t}\n\t\tif !noStorage {\n\t\t\tsource.storageRequestHandler = nonResponsiveStorageRequestHandler\n\t\t}\n\t\tif !noTrieNode {\n\t\t\tsource.trieRequestHandler = nonResponsiveTrieRequestHandler\n\t\t}\n\t\treturn source\n\t}\n\n\tsyncer := setupSyncer(\n\t\tmkSource(\"full\", true, true, true),\n\t\tmkSource(\"noAccounts\", false, true, true),\n\t\tmkSource(\"noStorage\", true, false, true),\n\t\tmkSource(\"noTrie\", true, true, false),\n\t)\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\nfunc checkStall(t *testing.T, term func()) chan struct{} {\n\ttestDone := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much\n\t\t\tt.Log(\"Sync stalled\")\n\t\t\tterm()\n\t\tcase <-testDone:\n\t\t\treturn\n\t\t}\n\t}()\n\treturn testDone\n}\n\n// TestSyncBoundaryAccountTrie tests sync against a few normal peers, but the\n// account trie has a few boundary elements.\nfunc TestSyncBoundaryAccountTrie(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems := makeBoundaryAccountTrie(3000)\n\n\tmkSource := func(name string) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\treturn source\n\t}\n\tsyncer := setupSyncer(\n\t\tmkSource(\"peer-a\"),\n\t\tmkSource(\"peer-b\"),\n\t)\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\n// TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is\n// consistently returning very small results\nfunc TestSyncNoStorageAndOneCappedPeer(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems := makeAccountTrieNoStorage(3000)\n\n\tmkSource := func(name string, slow bool) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\n\t\tif slow {\n\t\t\tsource.accountRequestHandler = starvingAccountRequestHandler\n\t\t}\n\t\treturn source\n\t}\n\n\tsyncer := setupSyncer(\n\t\tmkSource(\"nice-a\", false),\n\t\tmkSource(\"nice-b\", false),\n\t\tmkSource(\"nice-c\", false),\n\t\tmkSource(\"capped\", true),\n\t)\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\n// TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver\n// code requests properly.\nfunc TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems := makeAccountTrieNoStorage(3000)\n\n\tmkSource := func(name string, codeFn codeHandlerFunc) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\tsource.codeRequestHandler = codeFn\n\t\treturn source\n\t}\n\t// One is capped, one is corrupt. If we don't use a capped one, there's a 50%\n\t// chance that the full set of codes requested are sent only to the\n\t// non-corrupt peer, which delivers everything in one go, and makes the\n\t// test moot\n\tsyncer := setupSyncer(\n\t\tmkSource(\"capped\", cappedCodeRequestHandler),\n\t\tmkSource(\"corrupt\", corruptCodeRequestHandler),\n\t)\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\nfunc TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems := makeAccountTrieNoStorage(3000)\n\n\tmkSource := func(name string, accFn accountHandlerFunc) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\tsource.accountRequestHandler = accFn\n\t\treturn source\n\t}\n\t// One is capped, one is corrupt. If we don't use a capped one, there's a 50%\n\t// chance that the full set of codes requested are sent only to the\n\t// non-corrupt peer, which delivers everything in one go, and makes the\n\t// test moot\n\tsyncer := setupSyncer(\n\t\tmkSource(\"capped\", defaultAccountRequestHandler),\n\t\tmkSource(\"corrupt\", corruptAccountRequestHandler),\n\t)\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\n// TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes\n// one by one\nfunc TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems := makeAccountTrieNoStorage(3000)\n\n\tmkSource := func(name string, codeFn codeHandlerFunc) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\tsource.codeRequestHandler = codeFn\n\t\treturn source\n\t}\n\t// Count how many times it's invoked. Remember, there are only 8 unique hashes,\n\t// so it shouldn't be more than that\n\tvar counter int\n\tsyncer := setupSyncer(\n\t\tmkSource(\"capped\", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error {\n\t\t\tcounter++\n\t\t\treturn cappedCodeRequestHandler(t, id, hashes, max)\n\t\t}),\n\t)\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\t// There are only 8 unique hashes, and 3K accounts. However, the code\n\t// deduplication is per request batch. If it were a perfect global dedup,\n\t// we would expect only 8 requests. If there were no dedup, there would be\n\t// 3k requests.\n\t// We expect somewhere below 100 requests for these 8 unique hashes.\n\tif threshold := 100; counter > threshold {\n\t\tt.Fatalf(\"Error, expected < %d invocations, got %d\", threshold, counter)\n\t}\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\n// TestSyncBoundaryStorageTrie tests sync against a few normal peers, but the\n// storage trie has a few boundary elements.\nfunc TestSyncBoundaryStorageTrie(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(10, 1000, false, true)\n\n\tmkSource := func(name string) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\tsource.storageTries = storageTries\n\t\tsource.storageValues = storageElems\n\t\treturn source\n\t}\n\tsyncer := setupSyncer(\n\t\tmkSource(\"peer-a\"),\n\t\tmkSource(\"peer-b\"),\n\t)\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\n// TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is\n// consistently returning very small results\nfunc TestSyncWithStorageAndOneCappedPeer(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false, false)\n\n\tmkSource := func(name string, slow bool) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\tsource.storageTries = storageTries\n\t\tsource.storageValues = storageElems\n\n\t\tif slow {\n\t\t\tsource.storageRequestHandler = starvingStorageRequestHandler\n\t\t}\n\t\treturn source\n\t}\n\n\tsyncer := setupSyncer(\n\t\tmkSource(\"nice-a\", false),\n\t\tmkSource(\"slow\", true),\n\t)\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\n// TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is\n// sometimes sending bad proofs\nfunc TestSyncWithStorageAndCorruptPeer(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)\n\n\tmkSource := func(name string, handler storageHandlerFunc) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\tsource.storageTries = storageTries\n\t\tsource.storageValues = storageElems\n\t\tsource.storageRequestHandler = handler\n\t\treturn source\n\t}\n\n\tsyncer := setupSyncer(\n\t\tmkSource(\"nice-a\", defaultStorageRequestHandler),\n\t\tmkSource(\"nice-b\", defaultStorageRequestHandler),\n\t\tmkSource(\"nice-c\", defaultStorageRequestHandler),\n\t\tmkSource(\"corrupt\", corruptStorageRequestHandler),\n\t)\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\nfunc TestSyncWithStorageAndNonProvingPeer(t *testing.T) {\n\tt.Parallel()\n\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true, false)\n\n\tmkSource := func(name string, handler storageHandlerFunc) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\tsource.storageTries = storageTries\n\t\tsource.storageValues = storageElems\n\t\tsource.storageRequestHandler = handler\n\t\treturn source\n\t}\n\tsyncer := setupSyncer(\n\t\tmkSource(\"nice-a\", defaultStorageRequestHandler),\n\t\tmkSource(\"nice-b\", defaultStorageRequestHandler),\n\t\tmkSource(\"nice-c\", defaultStorageRequestHandler),\n\t\tmkSource(\"corrupt\", noProofStorageRequestHandler),\n\t)\n\tdone := checkStall(t, term)\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tclose(done)\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\n// TestSyncWithStorage tests  basic sync using accounts + storage + code, against\n// a peer who insists on delivering full storage sets _and_ proofs. This triggered\n// an error, where the recipient erroneously clipped the boundary nodes, but\n// did not mark the account for healing.\nfunc TestSyncWithStorageMisbehavingProve(t *testing.T) {\n\tt.Parallel()\n\tvar (\n\t\tonce   sync.Once\n\t\tcancel = make(chan struct{})\n\t\tterm   = func() {\n\t\t\tonce.Do(func() {\n\t\t\t\tclose(cancel)\n\t\t\t})\n\t\t}\n\t)\n\tsourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorageWithUniqueStorage(10, 30, false)\n\n\tmkSource := func(name string) *testPeer {\n\t\tsource := newTestPeer(name, t, term)\n\t\tsource.accountTrie = sourceAccountTrie\n\t\tsource.accountValues = elems\n\t\tsource.storageTries = storageTries\n\t\tsource.storageValues = storageElems\n\t\tsource.storageRequestHandler = proofHappyStorageRequestHandler\n\t\treturn source\n\t}\n\tsyncer := setupSyncer(mkSource(\"sourceA\"))\n\tif err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil {\n\t\tt.Fatalf(\"sync failed: %v\", err)\n\t}\n\tverifyTrie(syncer.db, sourceAccountTrie.Hash(), t)\n}\n\ntype kv struct {\n\tk, v []byte\n}\n\n// Some helpers for sorting\ntype entrySlice []*kv\n\nfunc (p entrySlice) Len() int           { return len(p) }\nfunc (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 }\nfunc (p entrySlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }\n\nfunc key32(i uint64) []byte {\n\tkey := make([]byte, 32)\n\tbinary.LittleEndian.PutUint64(key, i)\n\treturn key\n}\n\nvar (\n\tcodehashes = []common.Hash{\n\t\tcrypto.Keccak256Hash([]byte{0}),\n\t\tcrypto.Keccak256Hash([]byte{1}),\n\t\tcrypto.Keccak256Hash([]byte{2}),\n\t\tcrypto.Keccak256Hash([]byte{3}),\n\t\tcrypto.Keccak256Hash([]byte{4}),\n\t\tcrypto.Keccak256Hash([]byte{5}),\n\t\tcrypto.Keccak256Hash([]byte{6}),\n\t\tcrypto.Keccak256Hash([]byte{7}),\n\t}\n)\n\n// getCodeHash returns a pseudo-random code hash\nfunc getCodeHash(i uint64) []byte {\n\th := codehashes[int(i)%len(codehashes)]\n\treturn common.CopyBytes(h[:])\n}\n\n// getCodeByHash convenience function to lookup the code from the code hash\nfunc getCodeByHash(hash common.Hash) []byte {\n\tif hash == emptyCode {\n\t\treturn nil\n\t}\n\tfor i, h := range codehashes {\n\t\tif h == hash {\n\t\t\treturn []byte{byte(i)}\n\t\t}\n\t}\n\treturn nil\n}\n\n// makeAccountTrieNoStorage spits out a trie, along with the leafs\nfunc makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) {\n\tdb := trie.NewDatabase(rawdb.NewMemoryDatabase())\n\taccTrie, _ := trie.New(common.Hash{}, db)\n\tvar entries entrySlice\n\tfor i := uint64(1); i <= uint64(n); i++ {\n\t\tvalue, _ := rlp.EncodeToBytes(state.Account{\n\t\t\tNonce:    i,\n\t\t\tBalance:  big.NewInt(int64(i)),\n\t\t\tRoot:     emptyRoot,\n\t\t\tCodeHash: getCodeHash(i),\n\t\t})\n\t\tkey := key32(i)\n\t\telem := &kv{key, value}\n\t\taccTrie.Update(elem.k, elem.v)\n\t\tentries = append(entries, elem)\n\t}\n\tsort.Sort(entries)\n\taccTrie.Commit(nil)\n\treturn accTrie, entries\n}\n\n// makeBoundaryAccountTrie constructs an account trie. Instead of filling\n// accounts normally, this function will fill a few accounts which have\n// boundary hash.\nfunc makeBoundaryAccountTrie(n int) (*trie.Trie, entrySlice) {\n\tvar (\n\t\tentries    entrySlice\n\t\tboundaries []common.Hash\n\n\t\tdb      = trie.NewDatabase(rawdb.NewMemoryDatabase())\n\t\ttrie, _ = trie.New(common.Hash{}, db)\n\t)\n\t// Initialize boundaries\n\tvar next common.Hash\n\tstep := new(big.Int).Sub(\n\t\tnew(big.Int).Div(\n\t\t\tnew(big.Int).Exp(common.Big2, common.Big256, nil),\n\t\t\tbig.NewInt(accountConcurrency),\n\t\t), common.Big1,\n\t)\n\tfor i := 0; i < accountConcurrency; i++ {\n\t\tlast := common.BigToHash(new(big.Int).Add(next.Big(), step))\n\t\tif i == accountConcurrency-1 {\n\t\t\tlast = common.HexToHash(\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\")\n\t\t}\n\t\tboundaries = append(boundaries, last)\n\t\tnext = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))\n\t}\n\t// Fill boundary accounts\n\tfor i := 0; i < len(boundaries); i++ {\n\t\tvalue, _ := rlp.EncodeToBytes(state.Account{\n\t\t\tNonce:    uint64(0),\n\t\t\tBalance:  big.NewInt(int64(i)),\n\t\t\tRoot:     emptyRoot,\n\t\t\tCodeHash: getCodeHash(uint64(i)),\n\t\t})\n\t\telem := &kv{boundaries[i].Bytes(), value}\n\t\ttrie.Update(elem.k, elem.v)\n\t\tentries = append(entries, elem)\n\t}\n\t// Fill other accounts if required\n\tfor i := uint64(1); i <= uint64(n); i++ {\n\t\tvalue, _ := rlp.EncodeToBytes(state.Account{\n\t\t\tNonce:    i,\n\t\t\tBalance:  big.NewInt(int64(i)),\n\t\t\tRoot:     emptyRoot,\n\t\t\tCodeHash: getCodeHash(i),\n\t\t})\n\t\telem := &kv{key32(i), value}\n\t\ttrie.Update(elem.k, elem.v)\n\t\tentries = append(entries, elem)\n\t}\n\tsort.Sort(entries)\n\ttrie.Commit(nil)\n\treturn trie, entries\n}\n\n// makeAccountTrieWithStorageWithUniqueStorage creates an account trie where each accounts\n// has a unique storage set.\nfunc makeAccountTrieWithStorageWithUniqueStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {\n\tvar (\n\t\tdb             = trie.NewDatabase(rawdb.NewMemoryDatabase())\n\t\taccTrie, _     = trie.New(common.Hash{}, db)\n\t\tentries        entrySlice\n\t\tstorageTries   = make(map[common.Hash]*trie.Trie)\n\t\tstorageEntries = make(map[common.Hash]entrySlice)\n\t)\n\t// Create n accounts in the trie\n\tfor i := uint64(1); i <= uint64(accounts); i++ {\n\t\tkey := key32(i)\n\t\tcodehash := emptyCode[:]\n\t\tif code {\n\t\t\tcodehash = getCodeHash(i)\n\t\t}\n\t\t// Create a storage trie\n\t\tstTrie, stEntries := makeStorageTrieWithSeed(uint64(slots), i, db)\n\t\tstRoot := stTrie.Hash()\n\t\tstTrie.Commit(nil)\n\t\tvalue, _ := rlp.EncodeToBytes(state.Account{\n\t\t\tNonce:    i,\n\t\t\tBalance:  big.NewInt(int64(i)),\n\t\t\tRoot:     stRoot,\n\t\t\tCodeHash: codehash,\n\t\t})\n\t\telem := &kv{key, value}\n\t\taccTrie.Update(elem.k, elem.v)\n\t\tentries = append(entries, elem)\n\n\t\tstorageTries[common.BytesToHash(key)] = stTrie\n\t\tstorageEntries[common.BytesToHash(key)] = stEntries\n\t}\n\tsort.Sort(entries)\n\n\taccTrie.Commit(nil)\n\treturn accTrie, entries, storageTries, storageEntries\n}\n\n// makeAccountTrieWithStorage spits out a trie, along with the leafs\nfunc makeAccountTrieWithStorage(accounts, slots int, code, boundary bool) (*trie.Trie, entrySlice, map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) {\n\tvar (\n\t\tdb             = trie.NewDatabase(rawdb.NewMemoryDatabase())\n\t\taccTrie, _     = trie.New(common.Hash{}, db)\n\t\tentries        entrySlice\n\t\tstorageTries   = make(map[common.Hash]*trie.Trie)\n\t\tstorageEntries = make(map[common.Hash]entrySlice)\n\t)\n\t// Make a storage trie which we reuse for the whole lot\n\tvar (\n\t\tstTrie    *trie.Trie\n\t\tstEntries entrySlice\n\t)\n\tif boundary {\n\t\tstTrie, stEntries = makeBoundaryStorageTrie(slots, db)\n\t} else {\n\t\tstTrie, stEntries = makeStorageTrieWithSeed(uint64(slots), 0, db)\n\t}\n\tstRoot := stTrie.Hash()\n\n\t// Create n accounts in the trie\n\tfor i := uint64(1); i <= uint64(accounts); i++ {\n\t\tkey := key32(i)\n\t\tcodehash := emptyCode[:]\n\t\tif code {\n\t\t\tcodehash = getCodeHash(i)\n\t\t}\n\t\tvalue, _ := rlp.EncodeToBytes(state.Account{\n\t\t\tNonce:    i,\n\t\t\tBalance:  big.NewInt(int64(i)),\n\t\t\tRoot:     stRoot,\n\t\t\tCodeHash: codehash,\n\t\t})\n\t\telem := &kv{key, value}\n\t\taccTrie.Update(elem.k, elem.v)\n\t\tentries = append(entries, elem)\n\t\t// we reuse the same one for all accounts\n\t\tstorageTries[common.BytesToHash(key)] = stTrie\n\t\tstorageEntries[common.BytesToHash(key)] = stEntries\n\t}\n\tsort.Sort(entries)\n\tstTrie.Commit(nil)\n\taccTrie.Commit(nil)\n\treturn accTrie, entries, storageTries, storageEntries\n}\n\n// makeStorageTrieWithSeed fills a storage trie with n items, returning the\n// not-yet-committed trie and the sorted entries. The seeds can be used to ensure\n// that tries are unique.\nfunc makeStorageTrieWithSeed(n, seed uint64, db *trie.Database) (*trie.Trie, entrySlice) {\n\ttrie, _ := trie.New(common.Hash{}, db)\n\tvar entries entrySlice\n\tfor i := uint64(1); i <= n; i++ {\n\t\t// store 'x' at slot 'x'\n\t\tslotValue := key32(i + seed)\n\t\trlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))\n\n\t\tslotKey := key32(i)\n\t\tkey := crypto.Keccak256Hash(slotKey[:])\n\n\t\telem := &kv{key[:], rlpSlotValue}\n\t\ttrie.Update(elem.k, elem.v)\n\t\tentries = append(entries, elem)\n\t}\n\tsort.Sort(entries)\n\ttrie.Commit(nil)\n\treturn trie, entries\n}\n\n// makeBoundaryStorageTrie constructs a storage trie. Instead of filling\n// storage slots normally, this function will fill a few slots which have\n// boundary hash.\nfunc makeBoundaryStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) {\n\tvar (\n\t\tentries    entrySlice\n\t\tboundaries []common.Hash\n\t\ttrie, _    = trie.New(common.Hash{}, db)\n\t)\n\t// Initialize boundaries\n\tvar next common.Hash\n\tstep := new(big.Int).Sub(\n\t\tnew(big.Int).Div(\n\t\t\tnew(big.Int).Exp(common.Big2, common.Big256, nil),\n\t\t\tbig.NewInt(accountConcurrency),\n\t\t), common.Big1,\n\t)\n\tfor i := 0; i < accountConcurrency; i++ {\n\t\tlast := common.BigToHash(new(big.Int).Add(next.Big(), step))\n\t\tif i == accountConcurrency-1 {\n\t\t\tlast = common.HexToHash(\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\")\n\t\t}\n\t\tboundaries = append(boundaries, last)\n\t\tnext = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1))\n\t}\n\t// Fill boundary slots\n\tfor i := 0; i < len(boundaries); i++ {\n\t\tkey := boundaries[i]\n\t\tval := []byte{0xde, 0xad, 0xbe, 0xef}\n\n\t\telem := &kv{key[:], val}\n\t\ttrie.Update(elem.k, elem.v)\n\t\tentries = append(entries, elem)\n\t}\n\t// Fill other slots if required\n\tfor i := uint64(1); i <= uint64(n); i++ {\n\t\tslotKey := key32(i)\n\t\tkey := crypto.Keccak256Hash(slotKey[:])\n\n\t\tslotValue := key32(i)\n\t\trlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:]))\n\n\t\telem := &kv{key[:], rlpSlotValue}\n\t\ttrie.Update(elem.k, elem.v)\n\t\tentries = append(entries, elem)\n\t}\n\tsort.Sort(entries)\n\ttrie.Commit(nil)\n\treturn trie, entries\n}\n\nfunc verifyTrie(db ethdb.KeyValueStore, root common.Hash, t *testing.T) {\n\tt.Helper()\n\ttriedb := trie.NewDatabase(db)\n\taccTrie, err := trie.New(root, triedb)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\taccounts, slots := 0, 0\n\taccIt := trie.NewIterator(accTrie.NodeIterator(nil))\n\tfor accIt.Next() {\n\t\tvar acc struct {\n\t\t\tNonce    uint64\n\t\t\tBalance  *big.Int\n\t\t\tRoot     common.Hash\n\t\t\tCodeHash []byte\n\t\t}\n\t\tif err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {\n\t\t\tlog.Crit(\"Invalid account encountered during snapshot creation\", \"err\", err)\n\t\t}\n\t\taccounts++\n\t\tif acc.Root != emptyRoot {\n\t\t\tstoreTrie, err := trie.NewSecure(acc.Root, triedb)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tstoreIt := trie.NewIterator(storeTrie.NodeIterator(nil))\n\t\t\tfor storeIt.Next() {\n\t\t\t\tslots++\n\t\t\t}\n\t\t\tif err := storeIt.Err; err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\tif err := accIt.Err; err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Logf(\"accounts: %d, slots: %d\", accounts, slots)\n}\n"
  },
  {
    "path": "eth/state_accessor.go",
    "content": "// Copyright 2021 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\n// stateAtBlock retrieves the state database associated with a certain block.\n// If no state is locally available for the given block, a number of blocks are\n// attempted to be reexecuted to generate the desired state.\nfunc (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64) (statedb *state.StateDB, release func(), err error) {\n\t// If we have the state fully available, use that\n\tstatedb, err = eth.blockchain.StateAt(block.Root())\n\tif err == nil {\n\t\treturn statedb, func() {}, nil\n\t}\n\t// Otherwise try to reexec blocks until we find a state or reach our limit\n\torigin := block.NumberU64()\n\tdatabase := state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16, Preimages: true})\n\n\tfor i := uint64(0); i < reexec; i++ {\n\t\tif block.NumberU64() == 0 {\n\t\t\treturn nil, nil, errors.New(\"genesis state is missing\")\n\t\t}\n\t\tparent := eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)\n\t\tif parent == nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"missing block %v %d\", block.ParentHash(), block.NumberU64()-1)\n\t\t}\n\t\tblock = parent\n\n\t\tstatedb, err = state.New(block.Root(), database, nil)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase *trie.MissingNodeError:\n\t\t\treturn nil, nil, fmt.Errorf(\"required historical state unavailable (reexec=%d)\", reexec)\n\t\tdefault:\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\t// State was available at historical point, regenerate\n\tvar (\n\t\tstart  = time.Now()\n\t\tlogged time.Time\n\t\tparent common.Hash\n\t)\n\tdefer func() {\n\t\tif err != nil && parent != (common.Hash{}) {\n\t\t\tdatabase.TrieDB().Dereference(parent)\n\t\t}\n\t}()\n\tfor block.NumberU64() < origin {\n\t\t// Print progress logs if long enough time elapsed\n\t\tif time.Since(logged) > 8*time.Second {\n\t\t\tlog.Info(\"Regenerating historical state\", \"block\", block.NumberU64()+1, \"target\", origin, \"remaining\", origin-block.NumberU64()-1, \"elapsed\", time.Since(start))\n\t\t\tlogged = time.Now()\n\t\t}\n\t\t// Retrieve the next block to regenerate and process it\n\t\tif block = eth.blockchain.GetBlockByNumber(block.NumberU64() + 1); block == nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"block #%d not found\", block.NumberU64()+1)\n\t\t}\n\t\t_, _, _, err := eth.blockchain.Processor().Process(block, statedb, vm.Config{})\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"processing block %d failed: %v\", block.NumberU64(), err)\n\t\t}\n\t\t// Finalize the state so any modifications are written to the trie\n\t\troot, err := statedb.Commit(eth.blockchain.Config().IsEIP158(block.Number()))\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tstatedb, err = state.New(root, database, nil)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"state reset after block %d failed: %v\", block.NumberU64(), err)\n\t\t}\n\t\tdatabase.TrieDB().Reference(root, common.Hash{})\n\t\tif parent != (common.Hash{}) {\n\t\t\tdatabase.TrieDB().Dereference(parent)\n\t\t}\n\t\tparent = root\n\t}\n\tnodes, imgs := database.TrieDB().Size()\n\tlog.Info(\"Historical state regenerated\", \"block\", block.NumberU64(), \"elapsed\", time.Since(start), \"nodes\", nodes, \"preimages\", imgs)\n\treturn statedb, func() { database.TrieDB().Dereference(parent) }, nil\n}\n\n// statesInRange retrieves a batch of state databases associated with the specific\n// block ranges. If no state is locally available for the given range, a number of\n// blocks are attempted to be reexecuted to generate the ancestor state.\nfunc (eth *Ethereum) statesInRange(fromBlock, toBlock *types.Block, reexec uint64) (states []*state.StateDB, release func(), err error) {\n\tstatedb, err := eth.blockchain.StateAt(fromBlock.Root())\n\tif err != nil {\n\t\tstatedb, _, err = eth.stateAtBlock(fromBlock, reexec)\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tstates = append(states, statedb.Copy())\n\n\tvar (\n\t\tlogged   time.Time\n\t\tparent   common.Hash\n\t\tstart    = time.Now()\n\t\trefs     = []common.Hash{fromBlock.Root()}\n\t\tdatabase = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16, Preimages: true})\n\t)\n\t// Release all resources(including the states referenced by `stateAtBlock`)\n\t// if error is returned.\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tfor _, ref := range refs {\n\t\t\t\tdatabase.TrieDB().Dereference(ref)\n\t\t\t}\n\t\t}\n\t}()\n\tfor i := fromBlock.NumberU64() + 1; i <= toBlock.NumberU64(); i++ {\n\t\t// Print progress logs if long enough time elapsed\n\t\tif time.Since(logged) > 8*time.Second {\n\t\t\tlogged = time.Now()\n\t\t\tlog.Info(\"Regenerating historical state\", \"block\", i, \"target\", fromBlock.NumberU64(), \"remaining\", toBlock.NumberU64()-i, \"elapsed\", time.Since(start))\n\t\t}\n\t\t// Retrieve the next block to regenerate and process it\n\t\tblock := eth.blockchain.GetBlockByNumber(i)\n\t\tif block == nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"block #%d not found\", i)\n\t\t}\n\t\t_, _, _, err := eth.blockchain.Processor().Process(block, statedb, vm.Config{})\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"processing block %d failed: %v\", block.NumberU64(), err)\n\t\t}\n\t\t// Finalize the state so any modifications are written to the trie\n\t\troot, err := statedb.Commit(eth.blockchain.Config().IsEIP158(block.Number()))\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tstatedb, err := eth.blockchain.StateAt(root)\n\t\tif err != nil {\n\t\t\treturn nil, nil, fmt.Errorf(\"state reset after block %d failed: %v\", block.NumberU64(), err)\n\t\t}\n\t\tstates = append(states, statedb.Copy())\n\n\t\t// Reference the trie twice, once for us, once for the tracer\n\t\tdatabase.TrieDB().Reference(root, common.Hash{})\n\t\tdatabase.TrieDB().Reference(root, common.Hash{})\n\t\trefs = append(refs, root)\n\n\t\t// Dereference all past tries we ourselves are done working with\n\t\tif parent != (common.Hash{}) {\n\t\t\tdatabase.TrieDB().Dereference(parent)\n\t\t}\n\t\tparent = root\n\t}\n\t// release is handler to release all states referenced, including\n\t// the one referenced in `stateAtBlock`.\n\trelease = func() {\n\t\tfor _, ref := range refs {\n\t\t\tdatabase.TrieDB().Dereference(ref)\n\t\t}\n\t}\n\treturn states, release, nil\n}\n\n// stateAtTransaction returns the execution environment of a certain transaction.\nfunc (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) {\n\t// Short circuit if it's genesis block.\n\tif block.NumberU64() == 0 {\n\t\treturn nil, vm.BlockContext{}, nil, nil, errors.New(\"no transaction in genesis\")\n\t}\n\t// Create the parent state database\n\tparent := eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1)\n\tif parent == nil {\n\t\treturn nil, vm.BlockContext{}, nil, nil, fmt.Errorf(\"parent %#x not found\", block.ParentHash())\n\t}\n\tstatedb, release, err := eth.stateAtBlock(parent, reexec)\n\tif err != nil {\n\t\treturn nil, vm.BlockContext{}, nil, nil, err\n\t}\n\tif txIndex == 0 && len(block.Transactions()) == 0 {\n\t\treturn nil, vm.BlockContext{}, statedb, release, nil\n\t}\n\t// Recompute transactions up to the target index.\n\tsigner := types.MakeSigner(eth.blockchain.Config(), block.Number())\n\tfor idx, tx := range block.Transactions() {\n\t\t// Assemble the transaction call message and return if the requested offset\n\t\tmsg, _ := tx.AsMessage(signer)\n\t\ttxContext := core.NewEVMTxContext(msg)\n\t\tcontext := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil)\n\t\tif idx == txIndex {\n\t\t\treturn msg, context, statedb, release, nil\n\t\t}\n\t\t// Not yet the searched for transaction, execute on top of the current state\n\t\tvmenv := vm.NewEVM(context, txContext, statedb, eth.blockchain.Config(), vm.Config{})\n\t\tstatedb.Prepare(tx.Hash(), block.Hash(), idx)\n\t\tif _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {\n\t\t\trelease()\n\t\t\treturn nil, vm.BlockContext{}, nil, nil, fmt.Errorf(\"transaction %#x failed: %v\", tx.Hash(), err)\n\t\t}\n\t\t// Ensure any modifications are committed to the state\n\t\t// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect\n\t\tstatedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))\n\t}\n\trelease()\n\treturn nil, vm.BlockContext{}, nil, nil, fmt.Errorf(\"transaction index %d out of range for block %#x\", txIndex, block.Hash())\n}\n"
  },
  {
    "path": "eth/sync.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"math/big\"\n\t\"math/rand\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/eth/protocols/eth\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n)\n\nconst (\n\tforceSyncCycle      = 10 * time.Second // Time interval to force syncs, even if few peers are available\n\tdefaultMinSyncPeers = 5                // Amount of peers desired to start syncing\n\n\t// This is the target size for the packs of transactions sent by txsyncLoop64.\n\t// A pack can get larger than this if a single transactions exceeds this size.\n\ttxsyncPackSize = 100 * 1024\n)\n\ntype txsync struct {\n\tp   *eth.Peer\n\ttxs []*types.Transaction\n}\n\n// syncTransactions starts sending all currently pending transactions to the given peer.\nfunc (h *handler) syncTransactions(p *eth.Peer) {\n\t// Assemble the set of transaction to broadcast or announce to the remote\n\t// peer. Fun fact, this is quite an expensive operation as it needs to sort\n\t// the transactions if the sorting is not cached yet. However, with a random\n\t// order, insertions could overflow the non-executable queues and get dropped.\n\t//\n\t// TODO(karalabe): Figure out if we could get away with random order somehow\n\tvar txs types.Transactions\n\tpending, _ := h.txpool.Pending()\n\tfor _, batch := range pending {\n\t\ttxs = append(txs, batch...)\n\t}\n\tif len(txs) == 0 {\n\t\treturn\n\t}\n\t// The eth/65 protocol introduces proper transaction announcements, so instead\n\t// of dripping transactions across multiple peers, just send the entire list as\n\t// an announcement and let the remote side decide what they need (likely nothing).\n\tif p.Version() >= eth.ETH65 {\n\t\thashes := make([]common.Hash, len(txs))\n\t\tfor i, tx := range txs {\n\t\t\thashes[i] = tx.Hash()\n\t\t}\n\t\tp.AsyncSendPooledTransactionHashes(hashes)\n\t\treturn\n\t}\n\t// Out of luck, peer is running legacy protocols, drop the txs over\n\tselect {\n\tcase h.txsyncCh <- &txsync{p: p, txs: txs}:\n\tcase <-h.quitSync:\n\t}\n}\n\n// txsyncLoop64 takes care of the initial transaction sync for each new\n// connection. When a new peer appears, we relay all currently pending\n// transactions. In order to minimise egress bandwidth usage, we send\n// the transactions in small packs to one peer at a time.\nfunc (h *handler) txsyncLoop64() {\n\tdefer h.wg.Done()\n\n\tvar (\n\t\tpending = make(map[enode.ID]*txsync)\n\t\tsending = false               // whether a send is active\n\t\tpack    = new(txsync)         // the pack that is being sent\n\t\tdone    = make(chan error, 1) // result of the send\n\t)\n\n\t// send starts a sending a pack of transactions from the sync.\n\tsend := func(s *txsync) {\n\t\tif s.p.Version() >= eth.ETH65 {\n\t\t\tpanic(\"initial transaction syncer running on eth/65+\")\n\t\t}\n\t\t// Fill pack with transactions up to the target size.\n\t\tsize := common.StorageSize(0)\n\t\tpack.p = s.p\n\t\tpack.txs = pack.txs[:0]\n\t\tfor i := 0; i < len(s.txs) && size < txsyncPackSize; i++ {\n\t\t\tpack.txs = append(pack.txs, s.txs[i])\n\t\t\tsize += s.txs[i].Size()\n\t\t}\n\t\t// Remove the transactions that will be sent.\n\t\ts.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])]\n\t\tif len(s.txs) == 0 {\n\t\t\tdelete(pending, s.p.Peer.ID())\n\t\t}\n\t\t// Send the pack in the background.\n\t\ts.p.Log().Trace(\"Sending batch of transactions\", \"count\", len(pack.txs), \"bytes\", size)\n\t\tsending = true\n\t\tgo func() { done <- pack.p.SendTransactions(pack.txs) }()\n\t}\n\t// pick chooses the next pending sync.\n\tpick := func() *txsync {\n\t\tif len(pending) == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tn := rand.Intn(len(pending)) + 1\n\t\tfor _, s := range pending {\n\t\t\tif n--; n == 0 {\n\t\t\t\treturn s\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase s := <-h.txsyncCh:\n\t\t\tpending[s.p.Peer.ID()] = s\n\t\t\tif !sending {\n\t\t\t\tsend(s)\n\t\t\t}\n\t\tcase err := <-done:\n\t\t\tsending = false\n\t\t\t// Stop tracking peers that cause send failures.\n\t\t\tif err != nil {\n\t\t\t\tpack.p.Log().Debug(\"Transaction send failed\", \"err\", err)\n\t\t\t\tdelete(pending, pack.p.Peer.ID())\n\t\t\t}\n\t\t\t// Schedule the next send.\n\t\t\tif s := pick(); s != nil {\n\t\t\t\tsend(s)\n\t\t\t}\n\t\tcase <-h.quitSync:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// chainSyncer coordinates blockchain sync components.\ntype chainSyncer struct {\n\thandler     *handler\n\tforce       *time.Timer\n\tforced      bool // true when force timer fired\n\tpeerEventCh chan struct{}\n\tdoneCh      chan error // non-nil when sync is running\n}\n\n// chainSyncOp is a scheduled sync operation.\ntype chainSyncOp struct {\n\tmode downloader.SyncMode\n\tpeer *eth.Peer\n\ttd   *big.Int\n\thead common.Hash\n}\n\n// newChainSyncer creates a chainSyncer.\nfunc newChainSyncer(handler *handler) *chainSyncer {\n\treturn &chainSyncer{\n\t\thandler:     handler,\n\t\tpeerEventCh: make(chan struct{}),\n\t}\n}\n\n// handlePeerEvent notifies the syncer about a change in the peer set.\n// This is called for new peers and every time a peer announces a new\n// chain head.\nfunc (cs *chainSyncer) handlePeerEvent(peer *eth.Peer) bool {\n\tselect {\n\tcase cs.peerEventCh <- struct{}{}:\n\t\treturn true\n\tcase <-cs.handler.quitSync:\n\t\treturn false\n\t}\n}\n\n// loop runs in its own goroutine and launches the sync when necessary.\nfunc (cs *chainSyncer) loop() {\n\tdefer cs.handler.wg.Done()\n\n\tcs.handler.blockFetcher.Start()\n\tcs.handler.txFetcher.Start()\n\tdefer cs.handler.blockFetcher.Stop()\n\tdefer cs.handler.txFetcher.Stop()\n\tdefer cs.handler.downloader.Terminate()\n\n\t// The force timer lowers the peer count threshold down to one when it fires.\n\t// This ensures we'll always start sync even if there aren't enough peers.\n\tcs.force = time.NewTimer(forceSyncCycle)\n\tdefer cs.force.Stop()\n\n\tfor {\n\t\tif op := cs.nextSyncOp(); op != nil {\n\t\t\tcs.startSync(op)\n\t\t}\n\t\tselect {\n\t\tcase <-cs.peerEventCh:\n\t\t\t// Peer information changed, recheck.\n\t\tcase <-cs.doneCh:\n\t\t\tcs.doneCh = nil\n\t\t\tcs.force.Reset(forceSyncCycle)\n\t\t\tcs.forced = false\n\t\tcase <-cs.force.C:\n\t\t\tcs.forced = true\n\n\t\tcase <-cs.handler.quitSync:\n\t\t\t// Disable all insertion on the blockchain. This needs to happen before\n\t\t\t// terminating the downloader because the downloader waits for blockchain\n\t\t\t// inserts, and these can take a long time to finish.\n\t\t\tcs.handler.chain.StopInsert()\n\t\t\tcs.handler.downloader.Terminate()\n\t\t\tif cs.doneCh != nil {\n\t\t\t\t<-cs.doneCh\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// nextSyncOp determines whether sync is required at this time.\nfunc (cs *chainSyncer) nextSyncOp() *chainSyncOp {\n\tif cs.doneCh != nil {\n\t\treturn nil // Sync already running.\n\t}\n\n\t// Ensure we're at minimum peer count.\n\tminPeers := defaultMinSyncPeers\n\tif cs.forced {\n\t\tminPeers = 1\n\t} else if minPeers > cs.handler.maxPeers {\n\t\tminPeers = cs.handler.maxPeers\n\t}\n\tif cs.handler.peers.len() < minPeers {\n\t\treturn nil\n\t}\n\t// We have enough peers, check TD\n\tpeer := cs.handler.peers.peerWithHighestTD()\n\tif peer == nil {\n\t\treturn nil\n\t}\n\tmode, ourTD := cs.modeAndLocalHead()\n\tif mode == downloader.FastSync && atomic.LoadUint32(&cs.handler.snapSync) == 1 {\n\t\t// Fast sync via the snap protocol\n\t\tmode = downloader.SnapSync\n\t}\n\top := peerToSyncOp(mode, peer)\n\tif op.td.Cmp(ourTD) <= 0 {\n\t\treturn nil // We're in sync.\n\t}\n\treturn op\n}\n\nfunc peerToSyncOp(mode downloader.SyncMode, p *eth.Peer) *chainSyncOp {\n\tpeerHead, peerTD := p.Head()\n\treturn &chainSyncOp{mode: mode, peer: p, td: peerTD, head: peerHead}\n}\n\nfunc (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) {\n\t// If we're in fast sync mode, return that directly\n\tif atomic.LoadUint32(&cs.handler.fastSync) == 1 {\n\t\tblock := cs.handler.chain.CurrentFastBlock()\n\t\ttd := cs.handler.chain.GetTdByHash(block.Hash())\n\t\treturn downloader.FastSync, td\n\t}\n\t// We are probably in full sync, but we might have rewound to before the\n\t// fast sync pivot, check if we should reenable\n\tif pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil {\n\t\tif head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot {\n\t\t\tblock := cs.handler.chain.CurrentFastBlock()\n\t\t\ttd := cs.handler.chain.GetTdByHash(block.Hash())\n\t\t\treturn downloader.FastSync, td\n\t\t}\n\t}\n\t// Nope, we're really full syncing\n\thead := cs.handler.chain.CurrentBlock()\n\ttd := cs.handler.chain.GetTd(head.Hash(), head.NumberU64())\n\treturn downloader.FullSync, td\n}\n\n// startSync launches doSync in a new goroutine.\nfunc (cs *chainSyncer) startSync(op *chainSyncOp) {\n\tcs.doneCh = make(chan error, 1)\n\tgo func() { cs.doneCh <- cs.handler.doSync(op) }()\n}\n\n// doSync synchronizes the local blockchain with a remote peer.\nfunc (h *handler) doSync(op *chainSyncOp) error {\n\tif op.mode == downloader.FastSync || op.mode == downloader.SnapSync {\n\t\t// Before launch the fast sync, we have to ensure user uses the same\n\t\t// txlookup limit.\n\t\t// The main concern here is: during the fast sync Geth won't index the\n\t\t// block(generate tx indices) before the HEAD-limit. But if user changes\n\t\t// the limit in the next fast sync(e.g. user kill Geth manually and\n\t\t// restart) then it will be hard for Geth to figure out the oldest block\n\t\t// has been indexed. So here for the user-experience wise, it's non-optimal\n\t\t// that user can't change limit during the fast sync. If changed, Geth\n\t\t// will just blindly use the original one.\n\t\tlimit := h.chain.TxLookupLimit()\n\t\tif stored := rawdb.ReadFastTxLookupLimit(h.database); stored == nil {\n\t\t\trawdb.WriteFastTxLookupLimit(h.database, limit)\n\t\t} else if *stored != limit {\n\t\t\th.chain.SetTxLookupLimit(*stored)\n\t\t\tlog.Warn(\"Update txLookup limit\", \"provided\", limit, \"updated\", *stored)\n\t\t}\n\t}\n\t// Run the sync cycle, and disable fast sync if we're past the pivot block\n\terr := h.downloader.Synchronise(op.peer.ID(), op.head, op.td, op.mode)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif atomic.LoadUint32(&h.fastSync) == 1 {\n\t\tlog.Info(\"Fast sync complete, auto disabling\")\n\t\tatomic.StoreUint32(&h.fastSync, 0)\n\t}\n\tif atomic.LoadUint32(&h.snapSync) == 1 {\n\t\tlog.Info(\"Snap sync complete, auto disabling\")\n\t\tatomic.StoreUint32(&h.snapSync, 0)\n\t}\n\t// If we've successfully finished a sync cycle and passed any required checkpoint,\n\t// enable accepting transactions from the network.\n\thead := h.chain.CurrentBlock()\n\tif head.NumberU64() >= h.checkpointNumber {\n\t\t// Checkpoint passed, sanity check the timestamp to have a fallback mechanism\n\t\t// for non-checkpointed (number = 0) private networks.\n\t\tif head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) {\n\t\t\tatomic.StoreUint32(&h.acceptTxs, 1)\n\t\t}\n\t}\n\tif head.NumberU64() > 0 {\n\t\t// We've completed a sync cycle, notify all peers of new state. This path is\n\t\t// essential in star-topology networks where a gateway node needs to notify\n\t\t// all its out-of-date peers of the availability of a new block. This failure\n\t\t// scenario will most often crop up in private and hackathon networks with\n\t\t// degenerate connectivity, but it should be healthy for the mainnet too to\n\t\t// more reliably update peers or the local TD state.\n\t\th.BroadcastBlock(head, false)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "eth/sync_test.go",
    "content": "// Copyright 2015 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage eth\n\nimport (\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/eth/protocols/eth\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n)\n\n// Tests that fast sync is disabled after a successful sync cycle.\nfunc TestFastSyncDisabling64(t *testing.T) { testFastSyncDisabling(t, 64) }\nfunc TestFastSyncDisabling65(t *testing.T) { testFastSyncDisabling(t, 65) }\n\n// Tests that fast sync gets disabled as soon as a real block is successfully\n// imported into the blockchain.\nfunc testFastSyncDisabling(t *testing.T, protocol uint) {\n\tt.Parallel()\n\n\t// Create an empty handler and ensure it's in fast sync mode\n\tempty := newTestHandler()\n\tif atomic.LoadUint32(&empty.handler.fastSync) == 0 {\n\t\tt.Fatalf(\"fast sync disabled on pristine blockchain\")\n\t}\n\tdefer empty.close()\n\n\t// Create a full handler and ensure fast sync ends up disabled\n\tfull := newTestHandlerWithBlocks(1024)\n\tif atomic.LoadUint32(&full.handler.fastSync) == 1 {\n\t\tt.Fatalf(\"fast sync not disabled on non-empty blockchain\")\n\t}\n\tdefer full.close()\n\n\t// Sync up the two handlers\n\temptyPipe, fullPipe := p2p.MsgPipe()\n\tdefer emptyPipe.Close()\n\tdefer fullPipe.Close()\n\n\temptyPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, \"\", nil), emptyPipe, empty.txpool)\n\tfullPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, \"\", nil), fullPipe, full.txpool)\n\tdefer emptyPeer.Close()\n\tdefer fullPeer.Close()\n\n\tgo empty.handler.runEthPeer(emptyPeer, func(peer *eth.Peer) error {\n\t\treturn eth.Handle((*ethHandler)(empty.handler), peer)\n\t})\n\tgo full.handler.runEthPeer(fullPeer, func(peer *eth.Peer) error {\n\t\treturn eth.Handle((*ethHandler)(full.handler), peer)\n\t})\n\t// Wait a bit for the above handlers to start\n\ttime.Sleep(250 * time.Millisecond)\n\n\t// Check that fast sync was disabled\n\top := peerToSyncOp(downloader.FastSync, empty.handler.peers.peerWithHighestTD())\n\tif err := empty.handler.doSync(op); err != nil {\n\t\tt.Fatal(\"sync failed:\", err)\n\t}\n\tif atomic.LoadUint32(&empty.handler.fastSync) == 1 {\n\t\tt.Fatalf(\"fast sync not disabled after successful synchronisation\")\n\t}\n}\n"
  },
  {
    "path": "eth/tracers/api.go",
    "content": "// Copyright 2021 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage tracers\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n\t\"github.com/ethereum/go-ethereum/consensus\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/internal/ethapi\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\nconst (\n\t// defaultTraceTimeout is the amount of time a single transaction can execute\n\t// by default before being forcefully aborted.\n\tdefaultTraceTimeout = 5 * time.Second\n\n\t// defaultTraceReexec is the number of blocks the tracer is willing to go back\n\t// and reexecute to produce missing historical state necessary to run a specific\n\t// trace.\n\tdefaultTraceReexec = uint64(128)\n)\n\n// Backend interface provides the common API services (that are provided by\n// both full and light clients) with access to necessary functions.\ntype Backend interface {\n\tHeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error)\n\tHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error)\n\tBlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error)\n\tBlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error)\n\tGetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error)\n\tRPCGasCap() uint64\n\tChainConfig() *params.ChainConfig\n\tEngine() consensus.Engine\n\tChainDb() ethdb.Database\n\tStateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error)\n\tStateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error)\n\tStatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error)\n}\n\n// API is the collection of tracing APIs exposed over the private debugging endpoint.\ntype API struct {\n\tbackend Backend\n}\n\n// NewAPI creates a new API definition for the tracing methods of the Ethereum service.\nfunc NewAPI(backend Backend) *API {\n\treturn &API{backend: backend}\n}\n\ntype chainContext struct {\n\tapi *API\n\tctx context.Context\n}\n\nfunc (context *chainContext) Engine() consensus.Engine {\n\treturn context.api.backend.Engine()\n}\n\nfunc (context *chainContext) GetHeader(hash common.Hash, number uint64) *types.Header {\n\theader, err := context.api.backend.HeaderByNumber(context.ctx, rpc.BlockNumber(number))\n\tif err != nil {\n\t\treturn nil\n\t}\n\tif header.Hash() == hash {\n\t\treturn header\n\t}\n\theader, err = context.api.backend.HeaderByHash(context.ctx, hash)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn header\n}\n\n// chainContext construts the context reader which is used by the evm for reading\n// the necessary chain context.\nfunc (api *API) chainContext(ctx context.Context) core.ChainContext {\n\treturn &chainContext{api: api, ctx: ctx}\n}\n\n// blockByNumber is the wrapper of the chain access function offered by the backend.\n// It will return an error if the block is not found.\nfunc (api *API) blockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {\n\tblock, err := api.backend.BlockByNumber(ctx, number)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif block == nil {\n\t\treturn nil, fmt.Errorf(\"block #%d not found\", number)\n\t}\n\treturn block, nil\n}\n\n// blockByHash is the wrapper of the chain access function offered by the backend.\n// It will return an error if the block is not found.\nfunc (api *API) blockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {\n\tblock, err := api.backend.BlockByHash(ctx, hash)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif block == nil {\n\t\treturn nil, fmt.Errorf(\"block %s not found\", hash.Hex())\n\t}\n\treturn block, nil\n}\n\n// blockByNumberAndHash is the wrapper of the chain access function offered by\n// the backend. It will return an error if the block is not found.\n//\n// Note this function is friendly for the light client which can only retrieve the\n// historical(before the CHT) header/block by number.\nfunc (api *API) blockByNumberAndHash(ctx context.Context, number rpc.BlockNumber, hash common.Hash) (*types.Block, error) {\n\tblock, err := api.blockByNumber(ctx, number)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif block.Hash() == hash {\n\t\treturn block, nil\n\t}\n\treturn api.blockByHash(ctx, hash)\n}\n\n// TraceConfig holds extra parameters to trace functions.\ntype TraceConfig struct {\n\t*vm.LogConfig\n\tTracer  *string\n\tTimeout *string\n\tReexec  *uint64\n}\n\n// StdTraceConfig holds extra parameters to standard-json trace functions.\ntype StdTraceConfig struct {\n\tvm.LogConfig\n\tReexec *uint64\n\tTxHash common.Hash\n}\n\n// txTraceContext is the contextual infos about a transaction before it gets run.\ntype txTraceContext struct {\n\tindex int         // Index of the transaction within the block\n\thash  common.Hash // Hash of the transaction\n\tblock common.Hash // Hash of the block containing the transaction\n}\n\n// txTraceResult is the result of a single transaction trace.\ntype txTraceResult struct {\n\tResult interface{} `json:\"result,omitempty\"` // Trace results produced by the tracer\n\tError  string      `json:\"error,omitempty\"`  // Trace failure produced by the tracer\n}\n\n// blockTraceTask represents a single block trace task when an entire chain is\n// being traced.\ntype blockTraceTask struct {\n\tstatedb *state.StateDB   // Intermediate state prepped for tracing\n\tblock   *types.Block     // Block to trace the transactions from\n\tresults []*txTraceResult // Trace results procudes by the task\n}\n\n// blockTraceResult represets the results of tracing a single block when an entire\n// chain is being traced.\ntype blockTraceResult struct {\n\tBlock  hexutil.Uint64   `json:\"block\"`  // Block number corresponding to this trace\n\tHash   common.Hash      `json:\"hash\"`   // Block hash corresponding to this trace\n\tTraces []*txTraceResult `json:\"traces\"` // Trace results produced by the task\n}\n\n// txTraceTask represents a single transaction trace task when an entire block\n// is being traced.\ntype txTraceTask struct {\n\tstatedb *state.StateDB // Intermediate state prepped for tracing\n\tindex   int            // Transaction offset in the block\n}\n\n// TraceChain returns the structured logs created during the execution of EVM\n// between two blocks (excluding start) and returns them as a JSON object.\nfunc (api *API) TraceChain(ctx context.Context, start, end rpc.BlockNumber, config *TraceConfig) (*rpc.Subscription, error) { // Fetch the block interval that we want to trace\n\tfrom, err := api.blockByNumber(ctx, start)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tto, err := api.blockByNumber(ctx, end)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif from.Number().Cmp(to.Number()) >= 0 {\n\t\treturn nil, fmt.Errorf(\"end block (#%d) needs to come after start block (#%d)\", end, start)\n\t}\n\treturn api.traceChain(ctx, from, to, config)\n}\n\n// traceChain configures a new tracer according to the provided configuration, and\n// executes all the transactions contained within. The return value will be one item\n// per transaction, dependent on the requested tracer.\nfunc (api *API) traceChain(ctx context.Context, start, end *types.Block, config *TraceConfig) (*rpc.Subscription, error) {\n\t// Tracing a chain is a **long** operation, only do with subscriptions\n\tnotifier, supported := rpc.NotifierFromContext(ctx)\n\tif !supported {\n\t\treturn &rpc.Subscription{}, rpc.ErrNotificationsUnsupported\n\t}\n\tsub := notifier.CreateSubscription()\n\n\t// Shift the border to a block ahead in order to get the states\n\t// before these blocks.\n\tendBlock, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(end.NumberU64()-1), end.ParentHash())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Prepare all the states for tracing. Note this procedure can take very\n\t// long time. Timeout mechanism is necessary.\n\treexec := defaultTraceReexec\n\tif config != nil && config.Reexec != nil {\n\t\treexec = *config.Reexec\n\t}\n\tstates, release, err := api.backend.StatesInRange(ctx, start, endBlock, reexec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer release() // Release all the resources in the last step.\n\n\tblocks := int(end.NumberU64() - start.NumberU64())\n\tthreads := runtime.NumCPU()\n\tif threads > blocks {\n\t\tthreads = blocks\n\t}\n\tvar (\n\t\tpend    = new(sync.WaitGroup)\n\t\ttasks   = make(chan *blockTraceTask, threads)\n\t\tresults = make(chan *blockTraceTask, threads)\n\t)\n\tfor th := 0; th < threads; th++ {\n\t\tpend.Add(1)\n\t\tgo func() {\n\t\t\tdefer pend.Done()\n\n\t\t\t// Fetch and execute the next block trace tasks\n\t\t\tfor task := range tasks {\n\t\t\t\tsigner := types.MakeSigner(api.backend.ChainConfig(), task.block.Number())\n\t\t\t\tblockCtx := core.NewEVMBlockContext(task.block.Header(), api.chainContext(ctx), nil)\n\t\t\t\t// Trace all the transactions contained within\n\t\t\t\tfor i, tx := range task.block.Transactions() {\n\t\t\t\t\tmsg, _ := tx.AsMessage(signer)\n\t\t\t\t\ttxctx := &txTraceContext{\n\t\t\t\t\t\tindex: i,\n\t\t\t\t\t\thash:  tx.Hash(),\n\t\t\t\t\t\tblock: task.block.Hash(),\n\t\t\t\t\t}\n\t\t\t\t\tres, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ttask.results[i] = &txTraceResult{Error: err.Error()}\n\t\t\t\t\t\tlog.Warn(\"Tracing failed\", \"hash\", tx.Hash(), \"block\", task.block.NumberU64(), \"err\", err)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\t// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect\n\t\t\t\t\ttask.statedb.Finalise(api.backend.ChainConfig().IsEIP158(task.block.Number()))\n\t\t\t\t\ttask.results[i] = &txTraceResult{Result: res}\n\t\t\t\t}\n\t\t\t\t// Stream the result back to the user or abort on teardown\n\t\t\t\tselect {\n\t\t\t\tcase results <- task:\n\t\t\t\tcase <-notifier.Closed():\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\t// Start a goroutine to feed all the blocks into the tracers\n\tbegin := time.Now()\n\n\tgo func() {\n\t\tvar (\n\t\t\tlogged time.Time\n\t\t\tnumber uint64\n\t\t\ttraced uint64\n\t\t\tfailed error\n\t\t)\n\t\t// Ensure everything is properly cleaned up on any exit path\n\t\tdefer func() {\n\t\t\tclose(tasks)\n\t\t\tpend.Wait()\n\n\t\t\tswitch {\n\t\t\tcase failed != nil:\n\t\t\t\tlog.Warn(\"Chain tracing failed\", \"start\", start.NumberU64(), \"end\", end.NumberU64(), \"transactions\", traced, \"elapsed\", time.Since(begin), \"err\", failed)\n\t\t\tcase number < end.NumberU64():\n\t\t\t\tlog.Warn(\"Chain tracing aborted\", \"start\", start.NumberU64(), \"end\", end.NumberU64(), \"abort\", number, \"transactions\", traced, \"elapsed\", time.Since(begin))\n\t\t\tdefault:\n\t\t\t\tlog.Info(\"Chain tracing finished\", \"start\", start.NumberU64(), \"end\", end.NumberU64(), \"transactions\", traced, \"elapsed\", time.Since(begin))\n\t\t\t}\n\t\t\tclose(results)\n\t\t}()\n\t\t// Feed all the blocks both into the tracer, as well as fast process concurrently\n\t\tfor number = start.NumberU64() + 1; number <= end.NumberU64(); number++ {\n\t\t\t// Stop tracing if interruption was requested\n\t\t\tselect {\n\t\t\tcase <-notifier.Closed():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\t// Print progress logs if long enough time elapsed\n\t\t\tif time.Since(logged) > 8*time.Second {\n\t\t\t\tlogged = time.Now()\n\t\t\t\tlog.Info(\"Tracing chain segment\", \"start\", start.NumberU64(), \"end\", end.NumberU64(), \"current\", number, \"transactions\", traced, \"elapsed\", time.Since(begin))\n\t\t\t}\n\t\t\t// Retrieve the next block to trace\n\t\t\tblock, err := api.blockByNumber(ctx, rpc.BlockNumber(number))\n\t\t\tif err != nil {\n\t\t\t\tfailed = err\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// Send the block over to the concurrent tracers (if not in the fast-forward phase)\n\t\t\ttxs := block.Transactions()\n\t\t\tselect {\n\t\t\tcase tasks <- &blockTraceTask{statedb: states[int(number-start.NumberU64()-1)], block: block, results: make([]*txTraceResult, len(txs))}:\n\t\t\tcase <-notifier.Closed():\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttraced += uint64(len(txs))\n\t\t}\n\t}()\n\n\t// Keep reading the trace results and stream the to the user\n\tgo func() {\n\t\tvar (\n\t\t\tdone = make(map[uint64]*blockTraceResult)\n\t\t\tnext = start.NumberU64() + 1\n\t\t)\n\t\tfor res := range results {\n\t\t\t// Queue up next received result\n\t\t\tresult := &blockTraceResult{\n\t\t\t\tBlock:  hexutil.Uint64(res.block.NumberU64()),\n\t\t\t\tHash:   res.block.Hash(),\n\t\t\t\tTraces: res.results,\n\t\t\t}\n\t\t\tdone[uint64(result.Block)] = result\n\n\t\t\t// Stream completed traces to the user, aborting on the first error\n\t\t\tfor result, ok := done[next]; ok; result, ok = done[next] {\n\t\t\t\tif len(result.Traces) > 0 || next == end.NumberU64() {\n\t\t\t\t\tnotifier.Notify(sub.ID, result)\n\t\t\t\t}\n\t\t\t\tdelete(done, next)\n\t\t\t\tnext++\n\t\t\t}\n\t\t}\n\t}()\n\treturn sub, nil\n}\n\n// TraceBlockByNumber returns the structured logs created during the execution of\n// EVM and returns them as a JSON object.\nfunc (api *API) TraceBlockByNumber(ctx context.Context, number rpc.BlockNumber, config *TraceConfig) ([]*txTraceResult, error) {\n\tblock, err := api.blockByNumber(ctx, number)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn api.traceBlock(ctx, block, config)\n}\n\n// TraceBlockByHash returns the structured logs created during the execution of\n// EVM and returns them as a JSON object.\nfunc (api *API) TraceBlockByHash(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) {\n\tblock, err := api.blockByHash(ctx, hash)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn api.traceBlock(ctx, block, config)\n}\n\n// TraceBlock returns the structured logs created during the execution of EVM\n// and returns them as a JSON object.\nfunc (api *API) TraceBlock(ctx context.Context, blob []byte, config *TraceConfig) ([]*txTraceResult, error) {\n\tblock := new(types.Block)\n\tif err := rlp.Decode(bytes.NewReader(blob), block); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not decode block: %v\", err)\n\t}\n\treturn api.traceBlock(ctx, block, config)\n}\n\n// TraceBlockFromFile returns the structured logs created during the execution of\n// EVM and returns them as a JSON object.\nfunc (api *API) TraceBlockFromFile(ctx context.Context, file string, config *TraceConfig) ([]*txTraceResult, error) {\n\tblob, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not read file: %v\", err)\n\t}\n\treturn api.TraceBlock(ctx, blob, config)\n}\n\n// TraceBadBlock returns the structured logs created during the execution of\n// EVM against a block pulled from the pool of bad ones and returns them as a JSON\n// object.\nfunc (api *API) TraceBadBlock(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) {\n\tfor _, block := range rawdb.ReadAllBadBlocks(api.backend.ChainDb()) {\n\t\tif block.Hash() == hash {\n\t\t\treturn api.traceBlock(ctx, block, config)\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"bad block %#x not found\", hash)\n}\n\n// StandardTraceBlockToFile dumps the structured logs created during the\n// execution of EVM to the local file system and returns a list of files\n// to the caller.\nfunc (api *API) StandardTraceBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) {\n\tblock, err := api.blockByHash(ctx, hash)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn api.standardTraceBlockToFile(ctx, block, config)\n}\n\n// StandardTraceBadBlockToFile dumps the structured logs created during the\n// execution of EVM against a block pulled from the pool of bad ones to the\n// local file system and returns a list of files to the caller.\nfunc (api *API) StandardTraceBadBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) {\n\tfor _, block := range rawdb.ReadAllBadBlocks(api.backend.ChainDb()) {\n\t\tif block.Hash() == hash {\n\t\t\treturn api.standardTraceBlockToFile(ctx, block, config)\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"bad block %#x not found\", hash)\n}\n\n// traceBlock configures a new tracer according to the provided configuration, and\n// executes all the transactions contained within. The return value will be one item\n// per transaction, dependent on the requestd tracer.\nfunc (api *API) traceBlock(ctx context.Context, block *types.Block, config *TraceConfig) ([]*txTraceResult, error) {\n\tif block.NumberU64() == 0 {\n\t\treturn nil, errors.New(\"genesis is not traceable\")\n\t}\n\tparent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treexec := defaultTraceReexec\n\tif config != nil && config.Reexec != nil {\n\t\treexec = *config.Reexec\n\t}\n\tstatedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer release()\n\n\t// Execute all the transaction contained within the block concurrently\n\tvar (\n\t\tsigner  = types.MakeSigner(api.backend.ChainConfig(), block.Number())\n\t\ttxs     = block.Transactions()\n\t\tresults = make([]*txTraceResult, len(txs))\n\n\t\tpend = new(sync.WaitGroup)\n\t\tjobs = make(chan *txTraceTask, len(txs))\n\t)\n\tthreads := runtime.NumCPU()\n\tif threads > len(txs) {\n\t\tthreads = len(txs)\n\t}\n\tblockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)\n\tblockHash := block.Hash()\n\tfor th := 0; th < threads; th++ {\n\t\tpend.Add(1)\n\t\tgo func() {\n\t\t\tdefer pend.Done()\n\t\t\t// Fetch and execute the next transaction trace tasks\n\t\t\tfor task := range jobs {\n\t\t\t\tmsg, _ := txs[task.index].AsMessage(signer)\n\t\t\t\ttxctx := &txTraceContext{\n\t\t\t\t\tindex: task.index,\n\t\t\t\t\thash:  txs[task.index].Hash(),\n\t\t\t\t\tblock: blockHash,\n\t\t\t\t}\n\t\t\t\tres, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config)\n\t\t\t\tif err != nil {\n\t\t\t\t\tresults[task.index] = &txTraceResult{Error: err.Error()}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tresults[task.index] = &txTraceResult{Result: res}\n\t\t\t}\n\t\t}()\n\t}\n\t// Feed the transactions into the tracers and return\n\tvar failed error\n\tfor i, tx := range txs {\n\t\t// Send the trace task over for execution\n\t\tjobs <- &txTraceTask{statedb: statedb.Copy(), index: i}\n\n\t\t// Generate the next state snapshot fast without tracing\n\t\tmsg, _ := tx.AsMessage(signer)\n\t\tstatedb.Prepare(tx.Hash(), block.Hash(), i)\n\t\tvmenv := vm.NewEVM(blockCtx, core.NewEVMTxContext(msg), statedb, api.backend.ChainConfig(), vm.Config{})\n\t\tif _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil {\n\t\t\tfailed = err\n\t\t\tbreak\n\t\t}\n\t\t// Finalize the state so any modifications are written to the trie\n\t\t// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect\n\t\tstatedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))\n\t}\n\tclose(jobs)\n\tpend.Wait()\n\n\t// If execution failed in between, abort\n\tif failed != nil {\n\t\treturn nil, failed\n\t}\n\treturn results, nil\n}\n\n// standardTraceBlockToFile configures a new tracer which uses standard JSON output,\n// and traces either a full block or an individual transaction. The return value will\n// be one filename per transaction traced.\nfunc (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block, config *StdTraceConfig) ([]string, error) {\n\t// If we're tracing a single transaction, make sure it's present\n\tif config != nil && config.TxHash != (common.Hash{}) {\n\t\tif !containsTx(block, config.TxHash) {\n\t\t\treturn nil, fmt.Errorf(\"transaction %#x not found in block\", config.TxHash)\n\t\t}\n\t}\n\tif block.NumberU64() == 0 {\n\t\treturn nil, errors.New(\"genesis is not traceable\")\n\t}\n\tparent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treexec := defaultTraceReexec\n\tif config != nil && config.Reexec != nil {\n\t\treexec = *config.Reexec\n\t}\n\tstatedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer release()\n\n\t// Retrieve the tracing configurations, or use default values\n\tvar (\n\t\tlogConfig vm.LogConfig\n\t\ttxHash    common.Hash\n\t)\n\tif config != nil {\n\t\tlogConfig = config.LogConfig\n\t\ttxHash = config.TxHash\n\t}\n\tlogConfig.Debug = true\n\n\t// Execute transaction, either tracing all or just the requested one\n\tvar (\n\t\tdumps       []string\n\t\tsigner      = types.MakeSigner(api.backend.ChainConfig(), block.Number())\n\t\tchainConfig = api.backend.ChainConfig()\n\t\tvmctx       = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)\n\t\tcanon       = true\n\t)\n\t// Check if there are any overrides: the caller may wish to enable a future\n\t// fork when executing this block. Note, such overrides are only applicable to the\n\t// actual specified block, not any preceding blocks that we have to go through\n\t// in order to obtain the state.\n\t// Therefore, it's perfectly valid to specify `\"futureForkBlock\": 0`, to enable `futureFork`\n\n\tif config != nil && config.Overrides != nil {\n\t\t// Copy the config, to not screw up the main config\n\t\t// Note: the Clique-part is _not_ deep copied\n\t\tchainConfigCopy := new(params.ChainConfig)\n\t\t*chainConfigCopy = *chainConfig\n\t\tchainConfig = chainConfigCopy\n\t\tif berlin := config.LogConfig.Overrides.BerlinBlock; berlin != nil {\n\t\t\tchainConfig.BerlinBlock = berlin\n\t\t\tcanon = false\n\t\t}\n\t}\n\tfor i, tx := range block.Transactions() {\n\t\t// Prepare the trasaction for un-traced execution\n\t\tvar (\n\t\t\tmsg, _    = tx.AsMessage(signer)\n\t\t\ttxContext = core.NewEVMTxContext(msg)\n\t\t\tvmConf    vm.Config\n\t\t\tdump      *os.File\n\t\t\twriter    *bufio.Writer\n\t\t\terr       error\n\t\t)\n\t\t// If the transaction needs tracing, swap out the configs\n\t\tif tx.Hash() == txHash || txHash == (common.Hash{}) {\n\t\t\t// Generate a unique temporary file to dump it into\n\t\t\tprefix := fmt.Sprintf(\"block_%#x-%d-%#x-\", block.Hash().Bytes()[:4], i, tx.Hash().Bytes()[:4])\n\t\t\tif !canon {\n\t\t\t\tprefix = fmt.Sprintf(\"%valt-\", prefix)\n\t\t\t}\n\t\t\tdump, err = ioutil.TempFile(os.TempDir(), prefix)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tdumps = append(dumps, dump.Name())\n\n\t\t\t// Swap out the noop logger to the standard tracer\n\t\t\twriter = bufio.NewWriter(dump)\n\t\t\tvmConf = vm.Config{\n\t\t\t\tDebug:                   true,\n\t\t\t\tTracer:                  vm.NewJSONLogger(&logConfig, writer),\n\t\t\t\tEnablePreimageRecording: true,\n\t\t\t}\n\t\t}\n\t\t// Execute the transaction and flush any traces to disk\n\t\tvmenv := vm.NewEVM(vmctx, txContext, statedb, chainConfig, vmConf)\n\t\tstatedb.Prepare(tx.Hash(), block.Hash(), i)\n\t\t_, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas()))\n\t\tif writer != nil {\n\t\t\twriter.Flush()\n\t\t}\n\t\tif dump != nil {\n\t\t\tdump.Close()\n\t\t\tlog.Info(\"Wrote standard trace\", \"file\", dump.Name())\n\t\t}\n\t\tif err != nil {\n\t\t\treturn dumps, err\n\t\t}\n\t\t// Finalize the state so any modifications are written to the trie\n\t\t// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect\n\t\tstatedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))\n\n\t\t// If we've traced the transaction we were looking for, abort\n\t\tif tx.Hash() == txHash {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn dumps, nil\n}\n\n// containsTx reports whether the transaction with a certain hash\n// is contained within the specified block.\nfunc containsTx(block *types.Block, hash common.Hash) bool {\n\tfor _, tx := range block.Transactions() {\n\t\tif tx.Hash() == hash {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// TraceTransaction returns the structured logs created during the execution of EVM\n// and returns them as a JSON object.\nfunc (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config *TraceConfig) (interface{}, error) {\n\t_, blockHash, blockNumber, index, err := api.backend.GetTransaction(ctx, hash)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// It shouldn't happen in practice.\n\tif blockNumber == 0 {\n\t\treturn nil, errors.New(\"genesis is not traceable\")\n\t}\n\treexec := defaultTraceReexec\n\tif config != nil && config.Reexec != nil {\n\t\treexec = *config.Reexec\n\t}\n\tblock, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(blockNumber), blockHash)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmsg, vmctx, statedb, release, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer release()\n\n\ttxctx := &txTraceContext{\n\t\tindex: int(index),\n\t\thash:  hash,\n\t\tblock: blockHash,\n\t}\n\treturn api.traceTx(ctx, msg, txctx, vmctx, statedb, config)\n}\n\n// TraceCall lets you trace a given eth_call. It collects the structured logs\n// created during the execution of EVM if the given transaction was added on\n// top of the provided block and returns them as a JSON object.\n// You can provide -2 as a block number to trace on top of the pending block.\nfunc (api *API) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *TraceConfig) (interface{}, error) {\n\t// Try to retrieve the specified block\n\tvar (\n\t\terr   error\n\t\tblock *types.Block\n\t)\n\tif hash, ok := blockNrOrHash.Hash(); ok {\n\t\tblock, err = api.blockByHash(ctx, hash)\n\t} else if number, ok := blockNrOrHash.Number(); ok {\n\t\tblock, err = api.blockByNumber(ctx, number)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// try to recompute the state\n\treexec := defaultTraceReexec\n\tif config != nil && config.Reexec != nil {\n\t\treexec = *config.Reexec\n\t}\n\tstatedb, release, err := api.backend.StateAtBlock(ctx, block, reexec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer release()\n\n\t// Execute the trace\n\tmsg := args.ToMessage(api.backend.RPCGasCap())\n\tvmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)\n\n\treturn api.traceTx(ctx, msg, new(txTraceContext), vmctx, statedb, config)\n}\n\n// traceTx configures a new tracer according to the provided configuration, and\n// executes the given message in the provided environment. The return value will\n// be tracer dependent.\nfunc (api *API) traceTx(ctx context.Context, message core.Message, txctx *txTraceContext, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) {\n\t// Assemble the structured logger or the JavaScript tracer\n\tvar (\n\t\ttracer    vm.Tracer\n\t\terr       error\n\t\ttxContext = core.NewEVMTxContext(message)\n\t)\n\tswitch {\n\tcase config != nil && config.Tracer != nil:\n\t\t// Define a meaningful timeout of a single transaction trace\n\t\ttimeout := defaultTraceTimeout\n\t\tif config.Timeout != nil {\n\t\t\tif timeout, err = time.ParseDuration(*config.Timeout); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\t// Constuct the JavaScript tracer to execute with\n\t\tif tracer, err = New(*config.Tracer, txContext); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Handle timeouts and RPC cancellations\n\t\tdeadlineCtx, cancel := context.WithTimeout(ctx, timeout)\n\t\tgo func() {\n\t\t\t<-deadlineCtx.Done()\n\t\t\ttracer.(*Tracer).Stop(errors.New(\"execution timeout\"))\n\t\t}()\n\t\tdefer cancel()\n\n\tcase config == nil:\n\t\ttracer = vm.NewStructLogger(nil)\n\n\tdefault:\n\t\ttracer = vm.NewStructLogger(config.LogConfig)\n\t}\n\t// Run the transaction with tracing enabled.\n\tvmenv := vm.NewEVM(vmctx, txContext, statedb, api.backend.ChainConfig(), vm.Config{Debug: true, Tracer: tracer})\n\n\t// Call Prepare to clear out the statedb access list\n\tstatedb.Prepare(txctx.hash, txctx.block, txctx.index)\n\n\tresult, err := core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas()))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"tracing failed: %v\", err)\n\t}\n\n\t// Depending on the tracer type, format and return the output.\n\tswitch tracer := tracer.(type) {\n\tcase *vm.StructLogger:\n\t\t// If the result contains a revert reason, return it.\n\t\treturnVal := fmt.Sprintf(\"%x\", result.Return())\n\t\tif len(result.Revert()) > 0 {\n\t\t\treturnVal = fmt.Sprintf(\"%x\", result.Revert())\n\t\t}\n\t\treturn &ethapi.ExecutionResult{\n\t\t\tGas:         result.UsedGas,\n\t\t\tFailed:      result.Failed(),\n\t\t\tReturnValue: returnVal,\n\t\t\tStructLogs:  ethapi.FormatLogs(tracer.StructLogs()),\n\t\t}, nil\n\n\tcase *Tracer:\n\t\treturn tracer.GetResult()\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"bad tracer type %T\", tracer))\n\t}\n}\n\n// APIs return the collection of RPC services the tracer package offers.\nfunc APIs(backend Backend) []rpc.API {\n\t// Append all the local APIs and return\n\treturn []rpc.API{\n\t\t{\n\t\t\tNamespace: \"debug\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   NewAPI(backend),\n\t\t\tPublic:    false,\n\t\t},\n\t}\n}\n"
  },
  {
    "path": "eth/tracers/api_test.go",
    "content": "// Copyright 2021 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage tracers\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/ecdsa\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n\t\"github.com/ethereum/go-ethereum/consensus\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/internal/ethapi\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\nvar (\n\terrStateNotFound       = errors.New(\"state not found\")\n\terrBlockNotFound       = errors.New(\"block not found\")\n\terrTransactionNotFound = errors.New(\"transaction not found\")\n)\n\ntype testBackend struct {\n\tchainConfig *params.ChainConfig\n\tengine      consensus.Engine\n\tchaindb     ethdb.Database\n\tchain       *core.BlockChain\n}\n\nfunc newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend {\n\tbackend := &testBackend{\n\t\tchainConfig: params.TestChainConfig,\n\t\tengine:      ethash.NewFaker(),\n\t\tchaindb:     rawdb.NewMemoryDatabase(),\n\t}\n\t// Generate blocks for testing\n\tgspec.Config = backend.chainConfig\n\tvar (\n\t\tgendb   = rawdb.NewMemoryDatabase()\n\t\tgenesis = gspec.MustCommit(gendb)\n\t)\n\tblocks, _ := core.GenerateChain(backend.chainConfig, genesis, backend.engine, gendb, n, generator)\n\n\t// Import the canonical chain\n\tgspec.MustCommit(backend.chaindb)\n\tcacheConfig := &core.CacheConfig{\n\t\tTrieCleanLimit:    256,\n\t\tTrieDirtyLimit:    256,\n\t\tTrieTimeLimit:     5 * time.Minute,\n\t\tSnapshotLimit:     0,\n\t\tTrieDirtyDisabled: true, // Archive mode\n\t}\n\tchain, err := core.NewBlockChain(backend.chaindb, cacheConfig, backend.chainConfig, backend.engine, vm.Config{}, nil, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create tester chain: %v\", err)\n\t}\n\tif n, err := chain.InsertChain(blocks); err != nil {\n\t\tt.Fatalf(\"block %d: failed to insert into chain: %v\", n, err)\n\t}\n\tbackend.chain = chain\n\treturn backend\n}\n\nfunc (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {\n\treturn b.chain.GetHeaderByHash(hash), nil\n}\n\nfunc (b *testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) {\n\tif number == rpc.PendingBlockNumber || number == rpc.LatestBlockNumber {\n\t\treturn b.chain.CurrentHeader(), nil\n\t}\n\treturn b.chain.GetHeaderByNumber(uint64(number)), nil\n}\n\nfunc (b *testBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {\n\treturn b.chain.GetBlockByHash(hash), nil\n}\n\nfunc (b *testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {\n\tif number == rpc.PendingBlockNumber || number == rpc.LatestBlockNumber {\n\t\treturn b.chain.CurrentBlock(), nil\n\t}\n\treturn b.chain.GetBlockByNumber(uint64(number)), nil\n}\n\nfunc (b *testBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {\n\ttx, hash, blockNumber, index := rawdb.ReadTransaction(b.chaindb, txHash)\n\tif tx == nil {\n\t\treturn nil, common.Hash{}, 0, 0, errTransactionNotFound\n\t}\n\treturn tx, hash, blockNumber, index, nil\n}\n\nfunc (b *testBackend) RPCGasCap() uint64 {\n\treturn 25000000\n}\n\nfunc (b *testBackend) ChainConfig() *params.ChainConfig {\n\treturn b.chainConfig\n}\n\nfunc (b *testBackend) Engine() consensus.Engine {\n\treturn b.engine\n}\n\nfunc (b *testBackend) ChainDb() ethdb.Database {\n\treturn b.chaindb\n}\n\nfunc (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error) {\n\tstatedb, err := b.chain.StateAt(block.Root())\n\tif err != nil {\n\t\treturn nil, nil, errStateNotFound\n\t}\n\treturn statedb, func() {}, nil\n}\n\nfunc (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) {\n\tparent := b.chain.GetBlock(block.ParentHash(), block.NumberU64()-1)\n\tif parent == nil {\n\t\treturn nil, vm.BlockContext{}, nil, nil, errBlockNotFound\n\t}\n\tstatedb, err := b.chain.StateAt(parent.Root())\n\tif err != nil {\n\t\treturn nil, vm.BlockContext{}, nil, nil, errStateNotFound\n\t}\n\tif txIndex == 0 && len(block.Transactions()) == 0 {\n\t\treturn nil, vm.BlockContext{}, statedb, func() {}, nil\n\t}\n\t// Recompute transactions up to the target index.\n\tsigner := types.MakeSigner(b.chainConfig, block.Number())\n\tfor idx, tx := range block.Transactions() {\n\t\tmsg, _ := tx.AsMessage(signer)\n\t\ttxContext := core.NewEVMTxContext(msg)\n\t\tcontext := core.NewEVMBlockContext(block.Header(), b.chain, nil)\n\t\tif idx == txIndex {\n\t\t\treturn msg, context, statedb, func() {}, nil\n\t\t}\n\t\tvmenv := vm.NewEVM(context, txContext, statedb, b.chainConfig, vm.Config{})\n\t\tif _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {\n\t\t\treturn nil, vm.BlockContext{}, nil, nil, fmt.Errorf(\"transaction %#x failed: %v\", tx.Hash(), err)\n\t\t}\n\t\tstatedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))\n\t}\n\treturn nil, vm.BlockContext{}, nil, nil, fmt.Errorf(\"transaction index %d out of range for block %#x\", txIndex, block.Hash())\n}\n\nfunc (b *testBackend) StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error) {\n\tvar result []*state.StateDB\n\tfor number := fromBlock.NumberU64(); number <= toBlock.NumberU64(); number += 1 {\n\t\tblock := b.chain.GetBlockByNumber(number)\n\t\tif block == nil {\n\t\t\treturn nil, nil, errBlockNotFound\n\t\t}\n\t\tstatedb, err := b.chain.StateAt(block.Root())\n\t\tif err != nil {\n\t\t\treturn nil, nil, errStateNotFound\n\t\t}\n\t\tresult = append(result, statedb)\n\t}\n\treturn result, func() {}, nil\n}\n\nfunc TestTraceCall(t *testing.T) {\n\tt.Parallel()\n\n\t// Initialize test accounts\n\taccounts := newAccounts(3)\n\tgenesis := &core.Genesis{Alloc: core.GenesisAlloc{\n\t\taccounts[0].addr: {Balance: big.NewInt(params.Ether)},\n\t\taccounts[1].addr: {Balance: big.NewInt(params.Ether)},\n\t\taccounts[2].addr: {Balance: big.NewInt(params.Ether)},\n\t}}\n\tgenBlocks := 10\n\tsigner := types.HomesteadSigner{}\n\tapi := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {\n\t\t// Transfer from account[0] to account[1]\n\t\t//    value: 1000 wei\n\t\t//    fee:   0 wei\n\t\ttx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, big.NewInt(0), nil), signer, accounts[0].key)\n\t\tb.AddTx(tx)\n\t}))\n\n\tvar testSuite = []struct {\n\t\tblockNumber rpc.BlockNumber\n\t\tcall        ethapi.CallArgs\n\t\tconfig      *TraceConfig\n\t\texpectErr   error\n\t\texpect      interface{}\n\t}{\n\t\t// Standard JSON trace upon the genesis, plain transfer.\n\t\t{\n\t\t\tblockNumber: rpc.BlockNumber(0),\n\t\t\tcall: ethapi.CallArgs{\n\t\t\t\tFrom:  &accounts[0].addr,\n\t\t\t\tTo:    &accounts[1].addr,\n\t\t\t\tValue: (*hexutil.Big)(big.NewInt(1000)),\n\t\t\t},\n\t\t\tconfig:    nil,\n\t\t\texpectErr: nil,\n\t\t\texpect: &ethapi.ExecutionResult{\n\t\t\t\tGas:         params.TxGas,\n\t\t\t\tFailed:      false,\n\t\t\t\tReturnValue: \"\",\n\t\t\t\tStructLogs:  []ethapi.StructLogRes{},\n\t\t\t},\n\t\t},\n\t\t// Standard JSON trace upon the head, plain transfer.\n\t\t{\n\t\t\tblockNumber: rpc.BlockNumber(genBlocks),\n\t\t\tcall: ethapi.CallArgs{\n\t\t\t\tFrom:  &accounts[0].addr,\n\t\t\t\tTo:    &accounts[1].addr,\n\t\t\t\tValue: (*hexutil.Big)(big.NewInt(1000)),\n\t\t\t},\n\t\t\tconfig:    nil,\n\t\t\texpectErr: nil,\n\t\t\texpect: &ethapi.ExecutionResult{\n\t\t\t\tGas:         params.TxGas,\n\t\t\t\tFailed:      false,\n\t\t\t\tReturnValue: \"\",\n\t\t\t\tStructLogs:  []ethapi.StructLogRes{},\n\t\t\t},\n\t\t},\n\t\t// Standard JSON trace upon the non-existent block, error expects\n\t\t{\n\t\t\tblockNumber: rpc.BlockNumber(genBlocks + 1),\n\t\t\tcall: ethapi.CallArgs{\n\t\t\t\tFrom:  &accounts[0].addr,\n\t\t\t\tTo:    &accounts[1].addr,\n\t\t\t\tValue: (*hexutil.Big)(big.NewInt(1000)),\n\t\t\t},\n\t\t\tconfig:    nil,\n\t\t\texpectErr: fmt.Errorf(\"block #%d not found\", genBlocks+1),\n\t\t\texpect:    nil,\n\t\t},\n\t\t// Standard JSON trace upon the latest block\n\t\t{\n\t\t\tblockNumber: rpc.LatestBlockNumber,\n\t\t\tcall: ethapi.CallArgs{\n\t\t\t\tFrom:  &accounts[0].addr,\n\t\t\t\tTo:    &accounts[1].addr,\n\t\t\t\tValue: (*hexutil.Big)(big.NewInt(1000)),\n\t\t\t},\n\t\t\tconfig:    nil,\n\t\t\texpectErr: nil,\n\t\t\texpect: &ethapi.ExecutionResult{\n\t\t\t\tGas:         params.TxGas,\n\t\t\t\tFailed:      false,\n\t\t\t\tReturnValue: \"\",\n\t\t\t\tStructLogs:  []ethapi.StructLogRes{},\n\t\t\t},\n\t\t},\n\t\t// Standard JSON trace upon the pending block\n\t\t{\n\t\t\tblockNumber: rpc.PendingBlockNumber,\n\t\t\tcall: ethapi.CallArgs{\n\t\t\t\tFrom:  &accounts[0].addr,\n\t\t\t\tTo:    &accounts[1].addr,\n\t\t\t\tValue: (*hexutil.Big)(big.NewInt(1000)),\n\t\t\t},\n\t\t\tconfig:    nil,\n\t\t\texpectErr: nil,\n\t\t\texpect: &ethapi.ExecutionResult{\n\t\t\t\tGas:         params.TxGas,\n\t\t\t\tFailed:      false,\n\t\t\t\tReturnValue: \"\",\n\t\t\t\tStructLogs:  []ethapi.StructLogRes{},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, testspec := range testSuite {\n\t\tresult, err := api.TraceCall(context.Background(), testspec.call, rpc.BlockNumberOrHash{BlockNumber: &testspec.blockNumber}, testspec.config)\n\t\tif testspec.expectErr != nil {\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"Expect error %v, get nothing\", testspec.expectErr)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(err, testspec.expectErr) {\n\t\t\t\tt.Errorf(\"Error mismatch, want %v, get %v\", testspec.expectErr, err)\n\t\t\t}\n\t\t} else {\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Expect no error, get %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(result, testspec.expect) {\n\t\t\t\tt.Errorf(\"Result mismatch, want %v, get %v\", testspec.expect, result)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestTraceTransaction(t *testing.T) {\n\tt.Parallel()\n\n\t// Initialize test accounts\n\taccounts := newAccounts(2)\n\tgenesis := &core.Genesis{Alloc: core.GenesisAlloc{\n\t\taccounts[0].addr: {Balance: big.NewInt(params.Ether)},\n\t\taccounts[1].addr: {Balance: big.NewInt(params.Ether)},\n\t}}\n\ttarget := common.Hash{}\n\tsigner := types.HomesteadSigner{}\n\tapi := NewAPI(newTestBackend(t, 1, genesis, func(i int, b *core.BlockGen) {\n\t\t// Transfer from account[0] to account[1]\n\t\t//    value: 1000 wei\n\t\t//    fee:   0 wei\n\t\ttx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, big.NewInt(0), nil), signer, accounts[0].key)\n\t\tb.AddTx(tx)\n\t\ttarget = tx.Hash()\n\t}))\n\tresult, err := api.TraceTransaction(context.Background(), target, nil)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to trace transaction %v\", err)\n\t}\n\tif !reflect.DeepEqual(result, &ethapi.ExecutionResult{\n\t\tGas:         params.TxGas,\n\t\tFailed:      false,\n\t\tReturnValue: \"\",\n\t\tStructLogs:  []ethapi.StructLogRes{},\n\t}) {\n\t\tt.Error(\"Transaction tracing result is different\")\n\t}\n}\n\nfunc TestTraceBlock(t *testing.T) {\n\tt.Parallel()\n\n\t// Initialize test accounts\n\taccounts := newAccounts(3)\n\tgenesis := &core.Genesis{Alloc: core.GenesisAlloc{\n\t\taccounts[0].addr: {Balance: big.NewInt(params.Ether)},\n\t\taccounts[1].addr: {Balance: big.NewInt(params.Ether)},\n\t\taccounts[2].addr: {Balance: big.NewInt(params.Ether)},\n\t}}\n\tgenBlocks := 10\n\tsigner := types.HomesteadSigner{}\n\tapi := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) {\n\t\t// Transfer from account[0] to account[1]\n\t\t//    value: 1000 wei\n\t\t//    fee:   0 wei\n\t\ttx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, big.NewInt(0), nil), signer, accounts[0].key)\n\t\tb.AddTx(tx)\n\t}))\n\n\tvar testSuite = []struct {\n\t\tblockNumber rpc.BlockNumber\n\t\tconfig      *TraceConfig\n\t\texpect      interface{}\n\t\texpectErr   error\n\t}{\n\t\t// Trace genesis block, expect error\n\t\t{\n\t\t\tblockNumber: rpc.BlockNumber(0),\n\t\t\tconfig:      nil,\n\t\t\texpect:      nil,\n\t\t\texpectErr:   errors.New(\"genesis is not traceable\"),\n\t\t},\n\t\t// Trace head block\n\t\t{\n\t\t\tblockNumber: rpc.BlockNumber(genBlocks),\n\t\t\tconfig:      nil,\n\t\t\texpectErr:   nil,\n\t\t\texpect: []*txTraceResult{\n\t\t\t\t{\n\t\t\t\t\tResult: &ethapi.ExecutionResult{\n\t\t\t\t\t\tGas:         params.TxGas,\n\t\t\t\t\t\tFailed:      false,\n\t\t\t\t\t\tReturnValue: \"\",\n\t\t\t\t\t\tStructLogs:  []ethapi.StructLogRes{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Trace non-existent block\n\t\t{\n\t\t\tblockNumber: rpc.BlockNumber(genBlocks + 1),\n\t\t\tconfig:      nil,\n\t\t\texpectErr:   fmt.Errorf(\"block #%d not found\", genBlocks+1),\n\t\t\texpect:      nil,\n\t\t},\n\t\t// Trace latest block\n\t\t{\n\t\t\tblockNumber: rpc.LatestBlockNumber,\n\t\t\tconfig:      nil,\n\t\t\texpectErr:   nil,\n\t\t\texpect: []*txTraceResult{\n\t\t\t\t{\n\t\t\t\t\tResult: &ethapi.ExecutionResult{\n\t\t\t\t\t\tGas:         params.TxGas,\n\t\t\t\t\t\tFailed:      false,\n\t\t\t\t\t\tReturnValue: \"\",\n\t\t\t\t\t\tStructLogs:  []ethapi.StructLogRes{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// Trace pending block\n\t\t{\n\t\t\tblockNumber: rpc.PendingBlockNumber,\n\t\t\tconfig:      nil,\n\t\t\texpectErr:   nil,\n\t\t\texpect: []*txTraceResult{\n\t\t\t\t{\n\t\t\t\t\tResult: &ethapi.ExecutionResult{\n\t\t\t\t\t\tGas:         params.TxGas,\n\t\t\t\t\t\tFailed:      false,\n\t\t\t\t\t\tReturnValue: \"\",\n\t\t\t\t\t\tStructLogs:  []ethapi.StructLogRes{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, testspec := range testSuite {\n\t\tresult, err := api.TraceBlockByNumber(context.Background(), testspec.blockNumber, testspec.config)\n\t\tif testspec.expectErr != nil {\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"Expect error %v, get nothing\", testspec.expectErr)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(err, testspec.expectErr) {\n\t\t\t\tt.Errorf(\"Error mismatch, want %v, get %v\", testspec.expectErr, err)\n\t\t\t}\n\t\t} else {\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Expect no error, get %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(result, testspec.expect) {\n\t\t\t\tt.Errorf(\"Result mismatch, want %v, get %v\", testspec.expect, result)\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype Account struct {\n\tkey  *ecdsa.PrivateKey\n\taddr common.Address\n}\n\ntype Accounts []Account\n\nfunc (a Accounts) Len() int           { return len(a) }\nfunc (a Accounts) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a Accounts) Less(i, j int) bool { return bytes.Compare(a[i].addr.Bytes(), a[j].addr.Bytes()) < 0 }\n\nfunc newAccounts(n int) (accounts Accounts) {\n\tfor i := 0; i < n; i++ {\n\t\tkey, _ := crypto.GenerateKey()\n\t\taddr := crypto.PubkeyToAddress(key.PublicKey)\n\t\taccounts = append(accounts, Account{key: key, addr: addr})\n\t}\n\tsort.Sort(accounts)\n\treturn accounts\n}\n"
  },
  {
    "path": "eth/tracers/internal/tracers/4byte_tracer.js",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// 4byteTracer searches for 4byte-identifiers, and collects them for post-processing.\n// It collects the methods identifiers along with the size of the supplied data, so\n// a reversed signature can be matched against the size of the data.\n//\n// Example:\n//   > debug.traceTransaction( \"0x214e597e35da083692f5386141e69f47e973b2c56e7a8073b1ea08fd7571e9de\", {tracer: \"4byteTracer\"})\n//   {\n//     0x27dc297e-128: 1,\n//     0x38cc4831-0: 2,\n//     0x524f3889-96: 1,\n//     0xadf59f99-288: 1,\n//     0xc281d19e-0: 1\n//   }\n{\n\t// ids aggregates the 4byte ids found.\n\tids : {},\n\n\t// callType returns 'false' for non-calls, or the peek-index for the first param\n\t// after 'value', i.e. meminstart.\n\tcallType: function(opstr){\n\t\tswitch(opstr){\n\t\tcase \"CALL\": case \"CALLCODE\":\n\t\t\t// gas, addr, val, memin, meminsz, memout, memoutsz\n\t\t\treturn 3; // stack ptr to memin\n\n\t\tcase \"DELEGATECALL\": case \"STATICCALL\":\n\t\t\t// gas, addr, memin, meminsz, memout, memoutsz\n\t\t\treturn 2; // stack ptr to memin\n\t\t}\n\t\treturn false;\n\t},\n\n\t// store save the given indentifier and datasize.\n\tstore: function(id, size){\n\t\tvar key = \"\" + toHex(id) + \"-\" + size;\n\t\tthis.ids[key] = this.ids[key] + 1 || 1;\n\t},\n\n\t// step is invoked for every opcode that the VM executes.\n\tstep: function(log, db) {\n\t\t// Skip any opcodes that are not internal calls\n\t\tvar ct = this.callType(log.op.toString());\n\t\tif (!ct) {\n\t\t\treturn;\n\t\t}\n\t\t// Skip any pre-compile invocations, those are just fancy opcodes\n\t\tif (isPrecompiled(toAddress(log.stack.peek(1).toString(16)))) {\n\t\t\treturn;\n\t\t}\n\t\t// Gather internal call details\n\t\tvar inSz = log.stack.peek(ct + 1).valueOf();\n\t\tif (inSz >= 4) {\n\t\t\tvar inOff = log.stack.peek(ct).valueOf();\n\t\t\tthis.store(log.memory.slice(inOff, inOff + 4), inSz-4);\n\t\t}\n\t},\n\n\t// fault is invoked when the actual execution of an opcode fails.\n\tfault: function(log, db) { },\n\n\t// result is invoked when all the opcodes have been iterated over and returns\n\t// the final result of the tracing.\n\tresult: function(ctx) {\n\t\t// Save the outer calldata also\n\t\tif (ctx.input.length >= 4) {\n\t\t\tthis.store(slice(ctx.input, 0, 4), ctx.input.length-4)\n\t\t}\n\t\treturn this.ids;\n\t},\n}\n"
  },
  {
    "path": "eth/tracers/internal/tracers/assets.go",
    "content": "// Code generated by go-bindata. DO NOT EDIT.\n// sources:\n// 4byte_tracer.js (2.933kB)\n// bigram_tracer.js (1.712kB)\n// call_tracer.js (8.956kB)\n// evmdis_tracer.js (4.195kB)\n// noop_tracer.js (1.271kB)\n// opcount_tracer.js (1.372kB)\n// prestate_tracer.js (4.287kB)\n// trigram_tracer.js (1.788kB)\n// unigram_tracer.js (1.469kB)\n\npackage tracers\n\nimport (\n\t\"bytes\"\n\t\"compress/gzip\"\n\t\"crypto/sha256\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc bindataRead(data []byte, name string) ([]byte, error) {\n\tgz, err := gzip.NewReader(bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"read %q: %w\", name, err)\n\t}\n\n\tvar buf bytes.Buffer\n\t_, err = io.Copy(&buf, gz)\n\tclErr := gz.Close()\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"read %q: %w\", name, err)\n\t}\n\tif clErr != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}\n\ntype asset struct {\n\tbytes  []byte\n\tinfo   os.FileInfo\n\tdigest [sha256.Size]byte\n}\n\ntype bindataFileInfo struct {\n\tname    string\n\tsize    int64\n\tmode    os.FileMode\n\tmodTime time.Time\n}\n\nfunc (fi bindataFileInfo) Name() string {\n\treturn fi.name\n}\nfunc (fi bindataFileInfo) Size() int64 {\n\treturn fi.size\n}\nfunc (fi bindataFileInfo) Mode() os.FileMode {\n\treturn fi.mode\n}\nfunc (fi bindataFileInfo) ModTime() time.Time {\n\treturn fi.modTime\n}\nfunc (fi bindataFileInfo) IsDir() bool {\n\treturn false\n}\nfunc (fi bindataFileInfo) Sys() interface{} {\n\treturn nil\n}\n\nvar __4byte_tracerJs = []byte(\"\\x1f\\x8b\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\x94\\x56\\x5b\\x6f\\xdb\\x4a\\x0e\\x7e\\xb6\\x7f\\x05\\xd7\\x2f\\xb5\\x51\\x59\\x8e\\x2f\\x89\\x2f\\xd9\\x16\\xf0\\xe6\\xa4\\x6d\\x80\\x9c\\x24\\x88\\xdd\\x3d\\x28\\x16\\xfb\\x30\\x9e\\xa1\\xac\\xd9\\xc8\\x33\\xc2\\x0c\\xe5\\x4b\\x73\\xf2\\xdf\\x17\\x1c\\x49\\x89\\x93\\xd3\\x62\\xbb\\x4f\\x96\\x47\\xc3\\x8f\\x1f\\xc9\\x8f\\xa4\\x7a\\x3d\\xb8\\xb0\\xf9\\xc1\\xe9\\x75\\x4a\\x30\\x38\\xe9\\x8f\\x61\\x99\\x22\\xac\\x6d\\x17\\x29\\x45\\x87\\xc5\\x06\\xe6\\x05\\xa5\\xd6\\xf9\\x66\\xaf\\x07\\xcb\\x54\\x7b\\x48\\x74\\x86\\xa0\\x3d\\xe4\\xc2\\x11\\xd8\\x04\\xe8\\xcd\\xfd\\x4c\\xaf\\x9c\\x70\\x87\\xb8\\xd9\\xeb\\x95\\x36\\x3f\\x7c\\xcd\\x08\\x89\\x43\\x04\\x6f\\x13\\xda\\x09\\x87\\x33\\x38\\xd8\\x02\\xa4\\x30\\xe0\\x50\\x69\\x4f\\x4e\\xaf\\x0a\\x42\\xd0\\x04\\xc2\\xa8\\x9e\\x75\\xb0\\xb1\\x4a\\x27\\x07\\x86\\xd4\\x04\\x85\\x51\\xe8\\x82\\x6b\\x42\\xb7\\xf1\\x35\\x8f\\xcf\\x37\\x5f\\xe1\\x1a\\xbd\\x47\\x07\\x9f\\xd1\\xa0\\x13\\x19\\xdc\\x15\\xab\\x4c\\x4b\\xb8\\xd6\\x12\\x8d\\x47\\x10\\x1e\\x72\\x3e\\xf1\\x29\\x2a\\x58\\x05\\x38\\x36\\xfc\\xc4\\x54\\x16\\x15\\x15\\xf8\\x64\\x0b\\xa3\\x04\\x69\\x6b\\x22\\x40\\xcd\\xcc\\x61\\x8b\\xce\\x6b\\x6b\\x60\\x58\\xbb\\xaa\\x00\\x23\\xb0\\x8e\\x41\\xda\\x82\\x38\\x00\\x07\\x36\\x67\\xbb\\x0e\\x08\\x73\\x80\\x4c\\xd0\\x8b\\xe9\\x2f\\x24\\xe4\\x25\\x6e\\x05\\xda\\x04\\x37\\xa9\\xcd\\x11\\x28\\x15\\xc4\\x51\\xef\\x74\\x96\\xc1\\x0a\\xa1\\xf0\\x98\\x14\\x59\\xc4\\x68\\xab\\x82\\xe0\\x8f\\xab\\xe5\\x97\\xdb\\xaf\\x4b\\x98\\xdf\\x7c\\x83\\x3f\\xe6\\xf7\\xf7\\xf3\\x9b\\xe5\\xb7\\x73\\xd8\\x69\\x4a\\x6d\\x41\\x80\\x5b\\x2c\\xa1\\xf4\\x26\\xcf\\x34\\x2a\\xd8\\x09\\xe7\\x84\\xa1\\x03\\xd8\\x84\\x11\\x7e\\xbf\\xbc\\xbf\\xf8\\x32\\xbf\\x59\\xce\\xff\\x71\\x75\\x7d\\xb5\\xfc\\x06\\xd6\\xc1\\xa7\\xab\\xe5\\xcd\\xe5\\x62\\x01\\x9f\\x6e\\xef\\x61\\x0e\\x77\\xf3\\xfb\\xe5\\xd5\\xc5\\xd7\\xeb\\xf9\\x3d\\xdc\\x7d\\xbd\\xbf\\xbb\\x5d\\x5c\\xc6\\xb0\\x40\\x66\\x85\\x6c\\xff\\xbf\\x73\\x9e\\x84\\xea\\x39\\x04\\x85\\x24\\x74\\xe6\\xeb\\x4c\\x7c\\xb3\\x05\\xf8\\xd4\\x16\\x99\\x82\\x54\\x6c\\x11\\x1c\\x4a\\xd4\\x5b\\x54\\x20\\x40\\xda\\xfc\\xf0\\xcb\\x45\\x65\\x2c\\x91\\x59\\xb3\\x0e\\x31\\xff\\x54\\x90\\x70\\x95\\x80\\xb1\\x14\\x81\\x47\\x84\\xbf\\xa7\\x44\\xf9\\xac\\xd7\\xdb\\xed\\x76\\xf1\\xda\\x14\\xb1\\x75\\xeb\\x5e\\x56\\xc2\\xf9\\xde\\xc7\\xb8\\xc9\\x98\\xa3\\xd5\\x81\\x70\\xe9\\x84\\x44\\x07\\x1e\\x85\\x93\\x29\\xfa\\x10\\x4c\\x78\\xd1\\xd5\\x0a\\x0d\\xe9\\x44\\xa3\\xf3\\x11\\x8b\\x14\\xa4\\xcd\\x32\\x94\\xe4\\x99\\xc1\\x26\\x5c\\xcc\\xad\\xa7\\x6e\\xee\\xac\\x44\\xef\\xb5\\x59\\x73\\xe0\\x70\\x45\\xaf\\x2e\\xc2\\x06\\x29\\xb5\\xca\\xc3\\x11\\xdc\\xdb\\x68\\xbc\\xfe\\x8e\\x75\\x36\\x7c\\x91\\x97\\x65\\x54\\x82\\x44\\x04\\xde\\x86\\xe8\\xc1\\x21\\xcb\\x0c\\x15\\x78\\xbd\\x36\\x82\\x0a\\x87\\xa1\\x97\\x56\\x08\\x1b\\x41\\x92\\xc5\\x2e\\xd6\\x42\\x1b\\x4f\\x7f\\x01\\x64\\x9c\\xba\\x22\\x97\\x7b\\xb1\\xc9\\x33\\x9c\\xf1\\x33\\xc0\\x47\\x50\\xb8\\x2a\\xd6\\x31\\x71\\x0a\\x96\\x4e\\x18\\x2f\\x24\\x8b\\xbb\\x0d\\xad\\x93\\xfd\\xa0\\x3f\\xc2\\xd3\\xe9\\x18\\x87\\xa7\\x4a\\x9c\\x4c\\x86\\x67\\xd3\\x41\\x72\\x3a\\x9c\\x9c\\xf5\\x47\\x7d\\x3c\\x9b\\x26\\xa3\\x31\\x4e\\xc7\\xc3\\xd5\\x40\\x9e\\x9e\\xe1\\x58\\x4c\\x4e\\xc6\\xc3\\x55\\x1f\\xc5\\xc9\\x24\\x51\\xe3\\xd3\\x71\\x1f\\xa7\\x0a\\x5b\\x11\\x3c\\x06\\x60\\x37\\x83\\xd6\\x51\\xa6\\x5b\\x4f\\x9d\\xd2\\xfb\\x63\\xf9\\x03\\x70\\xb2\\x1f\\x8c\\x95\\x1c\\x4c\\xc7\\xd8\\xed\\x0f\\x26\\x33\\xe8\\x47\\x2f\\x6f\\x86\\x13\\x29\\x47\\x93\\x61\\xbf\\x7b\\x32\\x83\\xc1\\xd1\\xf9\\xe9\\x60\\x94\\x0c\\x27\\x93\\x69\\x77\\x7a\\xf6\\xda\\x40\\xa8\\xe4\\x74\\x9a\\x4c\\xa7\\xdd\\xc1\\xe4\\x0d\\x94\\x1c\\x4c\\xfa\\xaa\\x3f\\x45\\x86\\xea\\x97\\xc7\\x4f\\xcd\\xc7\\x66\\x83\\x07\\x8e\\xf2\\x20\\xd6\\x6b\\x87\\x6b\\x41\\x58\\x56\\x2d\\x30\\x0e\\x2f\\x12\\x1e\\x16\\x71\\xb3\\xc1\\xcf\\x33\\x78\\x7c\\x8a\\x9a\\xc1\\x46\\x8a\\x2c\\x5b\\x1e\\x72\\x56\\x35\\x15\\xce\\x78\\x78\\x97\\x88\\xcc\\xe3\\xbb\\xa0\\x0b\\x63\\x4d\\x97\\x2f\\x78\\x1e\\x1f\\x01\\x2f\\x47\\x7c\\xe8\\x6a\\xa3\\x70\\x1f\\x2e\\xf0\\x51\\xa2\\x9d\\x27\\x1e\\xb3\\x62\\x13\\x10\\x45\\xc2\\xd3\\xe4\\xdd\\x56\\x64\\x05\\xbe\\x8b\\x40\\xc7\\x18\\xc3\\x06\\x37\\x5c\\x54\\xe1\\x28\\x6e\\x36\\x6a\\x97\\x33\\x48\\x0a\\x53\\x56\\xca\\xe6\\x9e\\x5c\\xe7\\xb1\\xd9\\x68\\xf8\\x9d\\x26\\x99\\x1e\\x1d\\x48\\xe1\\x11\\x5a\\x17\\xf3\\xeb\\xeb\\xd6\\x0c\\x5e\\xfe\\x5c\\xdc\\xfe\\x76\\xd9\\x9a\\x35\\x1b\\x0d\\x76\\xb9\\x16\\x2c\\x6d\\xa5\\x5c\\x04\\x5b\\x91\\x45\\xa5\\xbb\\xea\\xc7\\x7f\\x0f\\x0f\\xb6\\xa0\\xfa\\xd7\\x7f\\x67\\xb3\\x32\\x5e\\x18\\x9e\\x43\\xaf\\x07\\x9e\\x84\\x7c\\x80\\x9c\\x1c\\x90\\x2d\\xcd\\x9a\\xcf\\xae\\x7f\\xbb\\xbc\\xbe\\xfc\\x3c\\x5f\\x5e\\xbe\\xa2\\xb0\\x58\\xce\\x97\\x57\\x17\\xe5\\xd1\\x5f\\x49\\xfc\\x1f\\xfe\\x07\\x3f\\xf3\\xdf\\x68\\x3c\\x35\\x9f\\x6f\\x85\\x9a\\x9c\\x37\\x1b\\x75\\xd5\\x3c\\xf1\\x9c\\xf2\\x3c\\x8d\\xc2\\x18\\xd1\\x3c\\x3c\\xb9\\x2c\\x55\\x6b\\x86\\x3e\\xe7\\x8e\\xe1\\x0e\\x8a\\x9b\\x8d\\x70\\xff\\x28\\xdf\\x5a\\x45\\xa1\\xb9\\x42\\x86\\xb7\\xc2\\xc1\\x03\\x1e\\xe0\\x03\\xb4\\x5a\\xf0\\x1e\\xc8\\x7e\\xc1\\x7d\\x5b\\xab\\x0e\\xbc\\x87\\x56\\x97\\x4f\\xf8\\xe6\\x79\\xb3\\xd1\\xa0\\x54\\xfb\\x58\\x2b\\xff\\xaf\\x07\\x3c\\xfc\\x1b\\x3e\\xc0\\xeb\\xff\\xef\\xa1\\x0f\\x7f\\xfe\\x09\\xfd\\x57\\x34\\x31\\xe7\\x85\\xa1\\xcd\\xd6\\x3e\\xa0\\x0a\\x92\\xe1\\x01\\x70\\x00\\x9b\\x4b\\xab\\xaa\\x8d\\xc1\\x11\\xfc\\xf3\\x77\\xc0\\x3d\\xca\\x82\\xd0\\x07\\xba\\x98\\x1f\\xb1\\xcd\\xec\\x3a\\x02\\xb5\\xea\\x00\\xb3\\xed\\xf5\\x60\\xf1\\xa0\\xf3\\xb0\\xb8\\x4a\\x14\\x5f\\xc2\\xf0\\x46\\x34\\x96\\x40\\x1b\\x42\\x67\\x44\\x16\\xa4\\xed\\xab\\xf8\\x24\\xd5\\x7c\\x6b\\xf5\\x31\\x6a\\x6c\\xf3\\x98\\xec\\x82\\x9c\\x36\\xeb\\x76\\xa7\\xc3\\x31\\xea\\x04\\xda\\x7f\\x93\\x54\\xfa\\xaa\\xd2\\x7f\\x5e\\x15\\xe3\\xd8\\x75\\xee\\xb0\\x2b\\xed\\x26\\x0f\\x5f\\x19\\x66\\x6b\\x65\\xd8\\xc3\\x3e\\x02\\x4a\\x2d\\xef\\x6f\\x87\\xf0\\x9f\\xc2\\x13\\x24\\xc2\\xc8\\x67\\xa2\\x15\\xbe\\xf6\\x77\\x0e\\x2b\\x63\\xd5\\x26\\x3b\\x57\\xca\\xa1\\xf7\\x81\\x51\\x50\\x42\\xcc\\x6d\\xd6\\xee\\x77\\x5e\\xc8\\xf5\\xcf\\x3a\\x9d\\xce\\xcf\\x48\\x7d\\x16\\x61\\xf7\\xbf\\x0a\\xbc\\x5e\\x62\\x55\\xfc\\xda\\x2c\\xbe\\xc3\\x07\\x78\\xe3\\x41\\x12\\x57\\xad\\x13\\x87\\x5e\\xbd\\x4d\\xda\\xcf\\x19\\x08\\xd7\\x3f\\x7e\\x80\\x51\\xe5\\xb2\\x84\\xb8\\x4d\\x92\\x1f\\x61\\xbc\\xb1\\x2f\\x65\\x12\\x14\\x17\\x22\\x62\\xd1\\xbb\\x43\\xec\\x79\\x6d\\xb5\\x03\\x48\\x54\\x61\\xbd\\x87\\x51\\x27\\x0a\\xd4\\xba\\xa3\\x4e\\x15\\x4f\\x2d\\x9d\\x44\\x14\\x19\\x1d\\x6b\\x67\\x97\\x56\\xdf\\x07\\x42\\x52\\x21\\xb2\\x4a\\x2e\\xfc\\xad\\x63\\x13\\x10\\xa6\\x56\\x54\\x52\\x6e\\xee\\x46\\xb0\\xff\\xa1\\x86\\xa0\\x76\\xe1\\xd0\\xff\\xc8\\x07\\x27\\x8f\\xfd\\xd4\\xe2\\x0a\\x3b\\x7f\\x85\\xdc\\x60\\x84\\x4e\\xf0\\x47\\x8f\\xdd\\x56\\x2d\\x56\\x0d\\xcd\\x00\\x57\\xce\\x42\\xce\\x7f\\x05\\x5c\\x2d\\x2e\\xde\\x1e\\x61\\xa9\\x36\\xca\\xf3\\x23\\x52\\x92\\xf6\\x2f\\xa2\\xae\\x9b\\xd9\\x16\\x3c\\x3f\\xb9\\x86\\xdc\\xc0\\x20\\x32\\x6f\\xab\\xaa\\x48\\xda\\xc7\\xda\\xe4\\x05\\xc5\\x19\\x9a\\x35\\xa5\\xc7\\x15\\x3a\\x4a\\x7a\\x99\\xe9\\xe7\\xcb\\x11\\x9c\\x44\\x21\\xd1\\x6f\\xcd\\xbb\\xa3\\xce\\xeb\\x29\\x53\\xf7\\x73\\xd9\\xc1\\x4f\\xcd\\xff\\x06\\x00\\x00\\xff\\xff\\x8e\\xc8\\x27\\x72\\x75\\x0b\\x00\\x00\")\n\nfunc _4byte_tracerJsBytes() ([]byte, error) {\n\treturn bindataRead(\n\t\t__4byte_tracerJs,\n\t\t\"4byte_tracer.js\",\n\t)\n}\n\nfunc _4byte_tracerJs() (*asset, error) {\n\tbytes, err := _4byte_tracerJsBytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo := bindataFileInfo{name: \"4byte_tracer.js\", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}\n\ta := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb4, 0xc5, 0x48, 0x2d, 0xd9, 0x43, 0x95, 0x93, 0x3b, 0x93, 0x2c, 0x47, 0x8c, 0x84, 0x32, 0x3c, 0x8b, 0x2e, 0xf3, 0x72, 0xc4, 0x57, 0xe6, 0x3a, 0xb3, 0xdf, 0x1d, 0xbf, 0x45, 0x3, 0xfc, 0xa}}\n\treturn a, nil\n}\n\nvar _bigram_tracerJs = []byte(\"\\x1f\\x8b\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\x8c\\x54\\x5b\\x6f\\xdb\\x36\\x14\\x7e\\xf7\\xaf\\xf8\\xde\\x92\\x20\\xae\\xd4\\x6e\\x2f\\x83\\x33\\x0f\\xd0\\xb2\\xa4\\x35\\x90\\xda\\x81\\xad\\xac\\x30\\x86\\x3d\\x50\\xd2\\x91\\x44\\x84\\x26\\x05\\xf2\\xd0\\xae\\x50\\xe4\\xbf\\x17\\x94\\x2c\\x5f\\x8a\\x14\\x8d\\x9e\\x64\\xf3\\xbb\\x9d\\x0b\\x15\\xc7\\xb8\\x35\\x4d\\x6b\\x65\\x55\\x33\\x7e\\x7b\\xff\\xe1\\x0f\\xa4\\x35\\xa1\\x32\\xef\\x88\\x6b\\xb2\\xe4\\x37\\x48\\x3c\\xd7\\xc6\\xba\\x51\\x1c\\x23\\xad\\xa5\\x43\\x29\\x15\\x41\\x3a\\x34\\xc2\\x32\\x4c\\x09\\xfe\\x01\\xaf\\x64\\x66\\x85\\x6d\\xa3\\x51\\x1c\\xf7\\x9c\\x57\\x8f\\x83\\x42\\x69\\x89\\xe0\\x4c\\xc9\\x3b\\x61\\x69\\x82\\xd6\\x78\\xe4\\x42\\xc3\\x52\\x21\\x1d\\x5b\\x99\\x79\\x26\\x48\\x86\\xd0\\x45\\x6c\\x2c\\x36\\xa6\\x90\\x65\\x1b\\x24\\x25\\xc3\\xeb\\x82\\x6c\\x67\\xcd\\x64\\x37\\x6e\\xc8\\xf1\\x71\\xfe\\x84\\x07\\x72\\x8e\\x2c\\x3e\\x92\\x26\\x2b\\x14\\x1e\\x7d\\xa6\\x64\\x8e\\x07\\x99\\x93\\x76\\x04\\xe1\\xd0\\x84\\x7f\\x5c\\x4d\\x05\\xb2\\x4e\\x2e\\x10\\xef\\x43\\x94\\xd5\\x3e\\x0a\\xee\\x8d\\xd7\\x85\\x60\\x69\\xf4\\x18\\x24\\x43\\x72\\x6c\\xc9\\x3a\\x69\\x34\\x7e\\x1f\\xac\\xf6\\x82\\x63\\x18\\x1b\\x44\\x2e\\x05\\x87\\x02\\x2c\\x4c\\x13\\x78\\x57\\x10\\xba\\x85\\x12\\x7c\\xa4\\xbe\\xa1\\x21\\xc7\\xba\\x0b\\x48\\xdd\\xd9\\xd4\\xa6\\x21\\x70\\x2d\\x38\\x54\\xbd\\x93\\x4a\\x21\\x23\\x78\\x47\\xa5\\x57\\xe3\\xa0\\x96\\x79\\xc6\\x97\\x59\\xfa\\x69\\xf1\\x94\\x22\\x99\\xaf\\xf1\\x25\\x59\\x2e\\x93\\x79\\xba\\xbe\\xc1\\x4e\\x72\\x6d\\x3c\\x83\\xb6\\xd4\\x4b\\xc9\\x4d\\xa3\\x24\\x15\\xd8\\x09\\x6b\\x85\\xe6\\x16\\xa6\\x0c\\x0a\\x9f\\xef\\x96\\xb7\\x9f\\x92\\x79\\x9a\\xfc\\x3d\\x7b\\x98\\xa5\\x6b\\x18\\x8b\\xfb\\x59\\x3a\\xbf\\x5b\\xad\\x70\\xbf\\x58\\x22\\xc1\\x63\\xb2\\x4c\\x67\\xb7\\x4f\\x0f\\xc9\\x12\\x8f\\x4f\\xcb\\xc7\\xc5\\xea\\x2e\\xc2\\x8a\\x42\\x2a\\x0a\\xfc\\x5f\\xf7\\xbc\\xec\\xa6\\x67\\x09\\x05\\xb1\\x90\\xca\\x0d\\x9d\\x58\\x1b\\x0f\\x57\\x1b\\xaf\\x0a\\xd4\\x62\\x4b\\xb0\\x94\\x93\\xdc\\x52\\x01\\x81\\xdc\\x34\\xed\\x9b\\x87\\x1a\\xb4\\x84\\x32\\xba\\xea\\x6a\\xfe\\xe9\\x42\\x62\\x56\\x42\\x1b\\x1e\\xc3\\x11\\xe1\\xcf\\x9a\\xb9\\x99\\xc4\\xf1\\x6e\\xb7\\x8b\\x2a\\xed\\x23\\x63\\xab\\x58\\xf5\\x72\\x2e\\xfe\\x2b\\x1a\\x8d\\xbe\\x8d\\x00\\x20\\x8e\\x51\\x4b\\xc7\\x61\\x38\\x41\\x36\\x37\\x5e\\x33\\xd9\\x6e\\xdf\\x4c\\x93\\x9b\\x82\\x90\\xc9\\xca\\x8a\\x8d\\xeb\\xd0\\x01\\x3a\\xc1\\xb7\\x97\\xf1\\xc0\\x55\\xc2\\xf1\\xa2\\x09\\xec\\xf0\\x06\\xd3\\x90\\xed\\xd6\\xaa\\x3b\\xef\\x0f\\x27\\xb8\\xb8\\x38\\xe0\\xe9\\x2b\\xe5\\x3e\\x00\\x50\\x50\\xc3\\x75\\xb0\\xd9\\x13\\x0f\\x8c\\x7f\\xc2\\xc1\\x04\\xef\\x0f\\x1c\\xc7\\xd4\\x39\\x48\\xbd\\x35\\xcf\\x54\\x74\\xdd\\xa6\\x2d\\xd9\\x76\\x48\\xd8\\x6d\\x4f\\x48\\xff\\xef\\xe7\\xbd\\x01\\xb9\\xa8\\x63\\x07\\xea\\x04\\xa5\\xd7\\x79\\xf0\\xbc\\x54\\xa6\\x1a\\xa3\\xc8\\xae\\xd0\\xd7\\x1e\\x9e\\xad\\x08\\x1b\\x8d\\x29\\x94\\xa9\\x22\\xd3\\x44\\x6c\\x56\\x6c\\xa5\\xae\\x2e\\xaf\\x6e\\xce\\x30\\x7d\\xdc\\x1e\\x56\\x51\\x1f\\xf2\\x14\\x23\\x4b\\x5c\\xee\\x31\\x53\\x70\\x2d\\x5d\\x74\\xa8\\xe5\\xea\\xe8\\x36\\xa8\\x3d\\x53\\x8b\\x13\\xd8\\xa2\\xb9\\xbe\\x78\\x77\\x71\\x6d\\x9a\\x9b\\x33\\x64\\xd0\\xec\\x30\\xa1\\xed\\xff\\x3d\\x53\\xfb\\xff\\x0f\\x52\\xe1\\x39\\x07\\x5c\\x5f\\x9f\\x4b\\xbc\\x9c\\xfd\\x22\\xe5\\x08\\xbf\\x92\\xc0\\x14\\x1f\\x7e\\x26\\x72\\x7c\\x3b\\xc9\\x8e\\x29\\x4e\\x93\\x9f\\x17\\x8f\\x69\\xdf\\xba\\xfe\\xfc\\xb8\\x38\\xa5\\xf0\\x8a\\x4f\\xa7\\xba\\xab\\xf7\\xb7\\x58\\xe4\\xec\\x85\\x3a\\xd9\\x14\\x53\\x42\\xe8\\x61\\xd6\\x65\\x7f\\xbf\\x82\\x4a\\x27\\xf1\\xea\\x74\\x8f\\x36\\x96\\xdc\\x6b\\x3e\\x42\\xa9\\xce\\xab\\x17\\x75\\xfd\\xed\\xcc\\x88\\x34\\x24\\x87\\x0d\\xa6\\x02\\x66\\x4b\\x36\\x7c\\x99\\x61\\x89\\xbd\\xd5\\x6e\\x50\\x0c\\xb4\\x52\\x6a\\xa1\\x06\\xed\\xfd\\x25\\x66\\x2b\\x72\\xa9\\xab\\x3e\\x5a\\x7f\\x74\\x92\\x2d\\xe7\\xaf\\xa7\\x5b\\xd7\\x6b\\x1e\\x1b\\x7f\\xe8\\xce\\xcb\\xe8\\x7b\\x00\\x00\\x00\\xff\\xff\\x83\\xb5\\xcb\\x27\\xb0\\x06\\x00\\x00\")\n\nfunc bigram_tracerJsBytes() ([]byte, error) {\n\treturn bindataRead(\n\t\t_bigram_tracerJs,\n\t\t\"bigram_tracer.js\",\n\t)\n}\n\nfunc bigram_tracerJs() (*asset, error) {\n\tbytes, err := bigram_tracerJsBytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo := bindataFileInfo{name: \"bigram_tracer.js\", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}\n\ta := &asset{bytes: bytes, info: info, digest: [32]uint8{0x77, 0x6c, 0xd, 0x24, 0xf2, 0x49, 0xbd, 0x58, 0x8b, 0xb5, 0xd1, 0xc9, 0xcd, 0xcf, 0x5b, 0x3e, 0x5c, 0xfb, 0x14, 0x50, 0xe7, 0xe3, 0xb9, 0xd1, 0x54, 0x69, 0xe6, 0x5e, 0x45, 0xa6, 0x2c, 0x6c}}\n\treturn a, nil\n}\n\nvar _call_tracerJs = []byte(\"\\x1f\\x8b\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xd4\\x5a\\xdf\\x6f\\x1b\\x37\\xf2\\x7f\\x96\\xfe\\x8a\\x89\\x1f\\x6a\\x09\\x51\\x24\\x39\\xe9\\xb7\\x5f\\xc0\\xae\\x7a\\x50\\x1d\\x25\\x35\\xe0\\xc6\\x81\\xad\\x34\\x08\\x82\\x3c\\x50\\xbb\\xb3\\x12\\x6b\\x8a\\xdc\\x92\\x5c\\xc9\\xba\\xd6\\xff\\xfb\\x61\\x86\\xdc\\xd5\\xae\\x24\\x3b\\xbe\\x5e\\x71\\xe8\\xbd\\x69\\x97\\x33\\xc3\\xe1\\xcc\\x67\\x7e\\x71\\x35\\x18\\xc0\\xb9\\xc9\\x37\\x56\\xce\\x17\\x1e\\x5e\\x0e\\x4f\\xfe\\x1f\\xa6\\x0b\\x84\\xb9\\x79\\x81\\x7e\\x81\\x16\\x8b\\x25\\x8c\\x0b\\xbf\\x30\\xd6\\xb5\\x07\\x03\\x98\\x2e\\xa4\\x83\\x4c\\x2a\\x04\\xe9\\x20\\x17\\xd6\\x83\\xc9\\xc0\\xef\\xd0\\x2b\\x39\\xb3\\xc2\\x6e\\xfa\\xed\\xc1\\x20\\xf0\\x1c\\x5c\\x26\\x09\\x99\\x45\\x04\\x67\\x32\\xbf\\x16\\x16\\x4f\\x61\\x63\\x0a\\x48\\x84\\x06\\x8b\\xa9\\x74\\xde\\xca\\x59\\xe1\\x11\\xa4\\x07\\xa1\\xd3\\x81\\xb1\\xb0\\x34\\xa9\\xcc\\x36\\x24\\x52\\x7a\\x28\\x74\\x8a\\x96\\xb7\\xf6\\x68\\x97\\xae\\xd4\\xe3\\xed\\xbb\\x0f\\x70\\x89\\xce\\xa1\\x85\\xb7\\xa8\\xd1\\x0a\\x05\\xef\\x8b\\x99\\x92\\x09\\x5c\\xca\\x04\\xb5\\x43\\x10\\x0e\\x72\\x7a\\xe3\\x16\\x98\\xc2\\x8c\\xc5\\x11\\xe3\\x1b\\x52\\xe5\\x26\\xaa\\x02\\x6f\\x4c\\xa1\\x53\\xe1\\xa5\\xd1\\x3d\\x40\\x49\\x9a\\xc3\\x0a\\xad\\x93\\x46\\xc3\\xab\\x72\\xab\\x28\\xb0\\x07\\xc6\\x92\\x90\\x8e\\xf0\\x74\\x00\\x0b\\x26\\x27\\xbe\\x2e\\x08\\xbd\\x01\\x25\\xfc\\x96\\xf5\\x09\\x06\\xd9\\x9e\\x3b\\x05\\xa9\\x79\\x9b\\x85\\xc9\\x11\\xfc\\x42\\x78\\x3a\\xf5\\x5a\\x2a\\x05\\x33\\x84\\xc2\\x61\\x56\\xa8\\x1e\\x49\\x9b\\x15\\x1e\\x3e\\x5e\\x4c\\x7f\\xba\\xfa\\x30\\x85\\xf1\\xbb\\x4f\\xf0\\x71\\x7c\\x7d\\x3d\\x7e\\x37\\xfd\\x74\\x06\\x6b\\xe9\\x17\\xa6\\xf0\\x80\\x2b\\x0c\\xa2\\xe4\\x32\\x57\\x12\\x53\\x58\\x0b\\x6b\\x85\\xf6\\x1b\\x30\\x19\\x49\\xf8\\x79\\x72\\x7d\\xfe\\xd3\\xf8\\xdd\\x74\\xfc\\xe3\\xc5\\xe5\\xc5\\xf4\\x13\\x18\\x0b\\x6f\\x2e\\xa6\\xef\\x26\\x37\\x37\\xf0\\xe6\\xea\\x1a\\xc6\\xf0\\x7e\\x7c\\x3d\\xbd\\x38\\xff\\x70\\x39\\xbe\\x86\\xf7\\x1f\\xae\\xdf\\x5f\\xdd\\x4c\\xfa\\x70\\x83\\xa4\\x15\\x12\\xff\\xd7\\x6d\\x9e\\xb1\\xf7\\x2c\\x42\\x8a\\x5e\\x48\\xe5\\x4a\\x4b\\x7c\\x32\\x05\\xb8\\x85\\x29\\x54\\x0a\\x0b\\xb1\\x42\\xb0\\x98\\xa0\\x5c\\x61\\x0a\\x02\\x12\\x93\\x6f\\x9e\\xec\\x54\\x92\\x25\\x94\\xd1\\x73\\x3e\\xf3\\x83\\x80\\x84\\x8b\\x0c\\xb4\\xf1\\x3d\\x70\\x88\\xf0\\xfd\\xc2\\xfb\\xfc\\x74\\x30\\x58\\xaf\\xd7\\xfd\\xb9\\x2e\\xfa\\xc6\\xce\\x07\\x2a\\x88\\x73\\x83\\x1f\\xfa\\x6d\\x92\\x99\\x08\\xa5\\xa6\\x56\\x24\\x68\\xc9\\x39\\x02\\xb2\\x82\\xcc\\xaf\\xcc\\x5a\\x83\\xb7\\x42\\x3b\\x91\\x90\\xab\\xe9\\x77\\xc2\\x60\\x14\\x1e\\xf0\\x8e\\x9e\\xbc\\x23\\xd0\\x82\\xc5\\xdc\\x58\\xfa\\xad\\x54\\x89\\x33\\xa9\\x3d\\x5a\\x2d\\x14\\xcb\\x76\\xb0\\x14\\x29\\xc2\\x6c\\x03\\xa2\\x2e\\xb0\\x57\\x3f\\x0c\\xc1\\x28\\xb8\\x1b\\xa4\\xce\\x8c\\x5d\\x32\\x2c\\xfb\\xed\\xdf\\xdb\\xad\\xa8\\xa1\\xf3\\x22\\xb9\\x25\\x05\\x49\\x7e\\x52\\x58\\x8b\\xda\\x93\\x29\\x0b\\xeb\\xe4\\x0a\\x99\\x04\\x02\\x4d\\xb4\\xe7\\xe4\\x97\\x9f\\x01\\xef\\x30\\x29\\x82\\xa4\\x56\\x25\\xe4\\x14\\x3e\\xff\\x7e\\xff\\xa5\\xd7\\x66\\xd1\\x29\\xba\\x04\\x75\\x8a\\x29\\x9f\\xef\\xd6\\xc1\\x7a\\xc1\\x16\\x85\\x35\\x1e\\xaf\\x10\\x7e\\x2d\\x9c\\xaf\\xd1\\x64\\xd6\\x2c\\x41\\x68\\x30\\x05\\x21\\xbe\\x6e\\x1d\\xa9\\xbd\\x61\\x81\\x82\\x7e\\x6b\\xb4\\xac\\x51\\xbf\\xdd\\xaa\\x98\\x4f\\x21\\x13\\xca\\x61\\xdc\\xd7\\x79\\xcc\\xe9\\x34\\x52\\xaf\\xcc\\x2d\\x49\\x36\\x96\\x20\\x6c\\x37\\x60\\xf2\\xc4\\xa4\\x31\\x18\\xe8\\x1c\\xd5\\x31\\xd0\\xf5\\xdb\\x2d\\xe2\\x3b\\x85\\xac\\xd0\\xbc\\x6d\\x47\\x99\\x79\\x0f\\xd2\\x59\\x17\\x7e\\x6f\\xb7\\x48\\xec\\xb9\\xc8\\x7d\\x61\\x91\\xed\\x89\\xd6\\x1a\\xeb\\x40\\x2e\\x97\\x98\\x4a\\xe1\\x51\\x6d\\xda\\xad\\xd6\\x4a\\xd8\\xb0\\x00\\x23\\x50\\x66\\xde\\x9f\\xa3\\x9f\\xd0\\x63\\xa7\\x7b\\xd6\\x6e\\xb5\\x64\\x06\\x9d\\xb0\\xfa\\x6c\\x34\\xe2\\xec\\x93\\x49\\x8d\\x69\\x10\\xdf\\xf2\\x0b\\xe9\\xfa\\x99\\x28\\x94\\xaf\\xf6\\x25\\xa6\\x96\\x45\\x5f\\x58\\x4d\\x3f\\xef\\x83\\x16\\x1f\\x11\\x8c\\x56\\x1b\\x48\\x28\\xcb\\x88\\x19\\x85\\xa7\\xdb\\x38\\x8f\\xcb\\x78\\x38\\xd7\\x83\\x4c\\x38\\x32\\xa1\\xcc\\x60\\x8d\\x90\\x5b\\x7c\\x91\\x2c\\x90\\x7c\\xa7\\x13\\x8c\\x5a\\xba\\x8d\\x63\\xa7\\x8e\\x80\\x76\\xeb\\x9b\\xbc\\xef\\xcd\\xbb\\x62\\x39\\x43\\xdb\\xe9\\xc2\\x37\\x30\\xbc\\xcb\\x86\\x5d\\x18\\x8d\\xf8\\x47\\xa9\\x7b\\xe4\\x89\\xfa\\x92\\x14\\x93\\xc7\\x83\\x32\\xff\\x8d\\xb7\\x52\\xcf\\xc3\\x59\\xa3\\xae\\x17\\x19\\x08\\xd0\\xb8\\x86\\xc4\\x68\\x06\\x35\\x79\\x65\\x86\\x52\\xcf\\x21\\xb1\\x28\\x3c\\xa6\\x3d\\x10\\x69\\x0a\\xde\\x04\\xe4\\x55\\x38\\x6b\\x6e\\x09\\xdf\\x7c\\x03\\x1d\\xda\\x6c\\x04\\xc7\\xe7\\xd7\\x93\\xf1\\x74\\x72\\x0c\\x7f\\xfc\\x01\\xe1\\xcd\\x51\\x78\\xf3\\xf2\\xa8\\x5b\\xd3\\x4c\\xea\\xab\\x2c\\x8b\\xca\\xb1\\xc0\\x7e\\x8e\\x78\\xdb\\x39\\xe9\\xf6\\x57\\x42\\x15\\x78\\x95\\x05\\x35\\x23\\xed\\x44\\xa7\\x30\\x8a\\x3c\\xcf\\x77\\x79\\x5e\\x36\\x78\\x88\\x69\\x30\\x80\\xb1\\x73\\xb8\\x9c\\x29\\xdc\\x0f\\xc8\\x18\\xb1\\x1c\\xbc\\xce\\x53\\xc6\\x22\\xf4\\x25\\x66\\x99\\x2b\\x24\\x54\\x95\\xbb\\x46\\xf3\\xb3\\xc6\\x2d\\xbf\\xc9\\xf1\\x14\\x00\\xc0\\xe4\\x3d\\x7e\\x41\\xb1\\xc0\\x2f\\xbc\\xf9\\x09\\xef\\xd8\\x47\\xa5\\x09\\x09\\x55\\xe3\\x34\\xb5\\xe8\\x5c\\xa7\\xdb\\x0d\\xe4\\x52\\xe7\\x85\\x3f\\x6d\\x90\\x2f\\x71\\x69\\xec\\xa6\\xef\\x28\\x21\\x75\\xf8\\x68\\xbd\\x70\\xd2\\x92\\x67\\x2e\\xdc\\x85\\x26\\x9e\\x88\\xd4\\xb7\\xc2\\x75\\xb6\\x4b\\xe7\\xc6\\xf9\\xd3\\x72\\x89\\x1e\\xca\\x35\\xb6\\x05\\xb1\\x1d\\x0f\\xef\\x8e\\xf7\\xad\\x35\\xec\\x6e\\x91\\x70\\xf2\\x5d\\x97\\x58\\xee\\xcf\\x2a\\x7c\\x57\\x69\\xa2\\x9f\\x17\\x6e\\xd1\\x61\\x38\\x6d\\x57\\xb7\\xa9\\x60\\x04\\xde\\x16\\x78\\x10\\xfe\\x0c\\xa9\\x7d\\x38\\x39\\x54\\x19\\xe5\\x12\\x6f\\x8b\\x84\\x61\\x35\\x17\\x9c\\x69\\x38\\xd2\\x05\\x65\\x5e\\x57\\xcc\\xd8\\xe6\\xde\\x98\\x7d\\x74\\x45\\x70\\xdd\\x4c\\x2e\\xdf\\xbc\\x9e\\xdc\\x4c\\xaf\\x3f\\x9c\\x4f\\x8f\\x6b\\x70\\x52\\x98\\x79\\x52\\xaa\\x79\\x06\\x85\\x7a\\xee\\x17\\xac\\x3f\\x89\\x6b\\xae\\x7e\\x26\\x9e\\x17\\x27\\x5f\\xc2\\x1b\\x18\\x1d\\x08\\xf9\\xd6\\xe3\\x1c\\xf0\\xf9\\x0b\\xcb\\xbe\\xdf\\x37\\x5f\\x93\\x34\\x18\\xf3\\xaf\\x41\\x92\\x37\\x4c\\x5c\\x92\\x7b\\x53\\x12\\x3c\\xee\\xe7\\xbf\\x18\\x54\\xe9\\x8c\\x28\\x7e\\x14\\x4a\\xe8\\x04\\x1f\\xd1\\x79\\x1f\\x6b\\xf5\\xa4\\x79\\x20\\x0f\\x2d\\xd1\\x2f\\x4c\\xca\\x85\\x21\\x11\\xa1\\xb6\\x94\\x08\\x4a\\x8d\\xc6\\x7f\\x3f\\x1b\\x8d\\x2f\\x2f\\x6b\\xb9\\x88\\x9f\\xcf\\xaf\\x5e\\xd7\\xf3\\xd3\\xf1\\xeb\\xc9\\xe5\\xe4\\xed\\x78\\x3a\\xd9\\xa5\\xbd\\x99\\x8e\\xa7\\x17\\xe7\\xfc\\xb6\\x4c\\x5d\\x83\\x01\\xdc\\xdc\\xca\\x9c\\x2b\\x0c\\xe7\\x6d\\xb3\\xcc\\xb9\\x55\\xae\\xf4\\x75\\x3d\\xf0\\x0b\\x43\\x4d\\xa8\\x8d\\x05\\x34\\x13\\x3a\\x29\\x0b\\x9b\\x2b\\x01\\xeb\\x0d\\xc1\\xf5\\x21\\xe7\\x9d\\xec\\x38\\xaf\\x82\\xb0\\x74\\xef\\x2d\\xc6\\x4d\\xd3\\x8e\\x37\\xa5\\x5e\\x5b\\x83\\x06\\x34\\x72\\xf2\\xe7\\x04\\xdb\\x79\\xfa\\x21\\xe1\\x1f\\x30\\x84\\x53\\x38\\x89\\x59\\xf4\\x91\\x34\\xfd\\x12\\x9e\\x93\\xf8\\x3f\\x91\\xac\\x5f\\x1d\\xe0\\xfc\\x7b\\xa6\\xec\\xbd\\x40\\xfb\\xef\\xa7\\x72\\x53\\xf8\\xab\\x2c\\x3b\\x85\\x5d\\x23\\x7e\\xbb\\x67\\xc4\\x8a\\xfe\\x12\\xf5\\x3e\\xfd\\xff\\xed\\xd1\\x6f\\xd3\\x3e\\xa1\\xca\\xe4\\xf0\\x6c\\x0f\\x22\\x21\\xe9\\x3e\\xdb\\x89\\x83\\x68\\x5c\\x6e\\xef\\x58\\x1a\\x8c\\x1e\\x28\\x34\\x2f\\x9b\\x18\\x7e\\x28\\x53\\xfe\\x47\\x85\\xe6\\x60\\x9b\\x4a\\xcd\\x68\\xb3\\x11\\xed\\x81\\x45\\x6f\\x25\\xae\\x68\\xd4\\x3c\\x76\\x2c\\x92\\x1a\\x76\\xb3\\xa6\\xf4\\xd5\\x87\\x8f\\x18\\x24\\x6a\\x44\\x4e\\x2e\\xb1\\xc1\\xa7\\xfe\\x8c\\x7b\\x5e\\x6a\\xd2\\xe3\\xa8\\xc6\\x10\\x13\\xdc\\x87\\x5b\\x84\\xa5\\xd8\\xd0\\xa8\\x96\\x15\\xfa\\x76\\x03\\x73\\xe1\\x20\\xdd\\x68\\xb1\\x94\\x89\\x0b\\xf2\\xb8\\xb9\\xb7\\x38\\x17\\x96\\xc5\\x5a\\xfc\\xad\\x40\\x47\\x73\\x1f\\x01\\x59\\x24\\xbe\\x10\\x4a\\x6d\\x60\\x2e\\x69\\x78\\x23\\xee\\xce\\xcb\\x57\\xc3\\x21\\x38\\x2f\\x73\\xd4\\x69\\x0f\\xbe\\x7b\\x35\\xf8\\xee\\x5b\\xb0\\x85\\xc2\\x6e\\xbf\\x5d\\x2b\\x61\\xd5\\x51\\xa3\\x37\\x68\\x21\\xa2\\xe7\\x35\\xe6\\x7e\\xd1\\xe9\\xc2\\x0f\\x0f\\xd4\\xc2\\x07\\x0a\\xdb\\x41\\x5a\\x78\\x01\\x27\\x5f\\xfa\\xa4\\xd7\\xa8\\x81\\xdb\\xe0\\x49\\x40\\xe5\\x30\\x4a\\xa3\\x81\\xf7\\xea\\xf5\\x55\\xe7\\x56\\x58\\xa1\\xc4\\x0c\\xbb\\xa7\\x3c\\x00\\xb3\\xad\\xd6\\x22\\x4e\\x40\\xe4\\x14\\xc8\\x95\\x90\\x1a\\x44\\x92\\x98\\x42\\x7b\\x32\\x7c\\x39\\xcc\\xa8\\x0d\\xe5\\xf7\\x63\\x5f\\xca\\xe3\\x59\\x51\\x24\\x09\\x3a\\x57\\xa6\\x7b\\xf6\\x1a\\xa9\\x23\\x96\\xc4\\x0d\\x52\\x3b\\x99\\x62\\xcd\\x2b\\x94\\x1d\\x0c\\xa7\\xe6\\x48\\x41\\xa3\\x74\\x29\\x70\\x69\\x1c\\x6d\\x32\\x43\\x58\\x5b\\x1a\\xbc\\x9c\\xd4\\x09\\xdf\\x3c\\xa4\\x48\\xd6\\x76\\x60\\x34\\x08\\x50\\x86\\xaf\\x3b\\x38\\xc6\\x41\\xd8\\xb9\\xeb\\x87\\x7c\\x4f\\xdb\\x52\\xce\\xd1\\x66\\xdd\\x6f\\x02\\xb9\\x0e\\x55\\x1e\\x71\\x76\\x5a\\x21\\x0d\\x78\\x27\\x9d\\xe7\\x8e\\x9a\\xb4\\x94\\x0e\\x02\\x92\\xa5\\x9e\\xf7\\x20\\x37\\x39\\xe7\\xe9\\xaf\\x95\\xb3\\x98\\xac\\xaf\\x27\\xbf\\x4c\\xae\\xab\\xc6\\xe7\\xe9\\x4e\\x2c\\x67\\x9e\\xa3\\x6a\\x24\\x04\\x4b\\xf3\\x96\\xc7\\xf4\\xe8\\xc0\\x10\\x73\\x00\\x50\\xa3\\x07\\x00\\x45\\xf2\\xb7\\xb5\\xf1\\x7d\\xed\\x38\\x4a\\x38\\xbf\\x75\\xcc\\x1c\\xc3\\x3c\\x57\\x57\\xc0\\x15\\xca\\xbb\\x9d\\xdc\\xbd\\x9b\\x1c\\x4c\\x5e\\x56\\x08\\x52\\x8a\\xd3\\x0e\\x25\\xf6\\xdd\\x49\\xa3\\xb1\\xb0\\x1d\\x38\\xb6\\xf8\\xbc\\xa8\\xd9\\x78\\xcd\\xed\\x66\\x20\\xaa\\xa5\\x06\\x5e\\x2f\\xfb\\x56\\x11\\xaa\\x01\\xeb\\x6e\\x0a\\x4f\\x70\\xa0\\xfa\\xbd\\x4d\\x7e\\x73\\xe1\\x3e\\x38\\xf6\\x7a\\x4c\\x7f\\x33\\x39\\xbf\\xd0\\xbe\\x53\\x2e\\x5e\\x68\\x78\\x01\\xe5\\x03\\x25\\x75\\x78\\xd1\\x88\\xa2\\x03\\xd9\\xb1\\x95\\xa2\\x42\\x8f\\xb0\\x15\\x71\\x06\\x3b\\xaf\\x48\\x50\\x30\\x07\\x1b\\xcd\\xa2\\xdf\\x2f\\xce\\xc3\\x28\\x8d\\x0c\\xf6\\xcc\\xa2\\xef\\xe3\\x6f\\x85\\x50\\xae\\x33\\xac\\x9a\\x85\\x70\\x02\\x6f\\xb8\\xbc\\x8d\\xf6\\x3a\\x49\\xe2\\x69\\xf6\\x8e\\x67\\x35\\xb6\\x68\\x8d\\x92\\x2d\\x74\\x82\\xe7\\x26\\xc5\\x47\\x25\\x44\\x11\\x31\\x6d\\x54\\xbe\\x8c\\xc0\\x3c\\xd4\\x7b\\xb7\\xea\\x04\\x70\\x54\\x35\\x04\\x99\\x90\\xaa\\xb0\\x78\\x74\\x06\\x07\\xd2\\x8e\\x2b\\x6c\\x26\\x12\\xf6\\xa5\\x43\\xe0\\x69\\xdd\\x81\\x33\\x4b\\x5c\\x98\\x75\\x50\\xe0\\x50\\xf2\\xda\\x07\\x47\\x85\\x83\\x9d\\xf2\\xc1\\xd7\\x4e\\xc2\\x41\\xe1\\xc4\\x1c\\x6b\\xe0\\xa8\\x0c\\x5e\\x3a\\xea\\xe0\\x15\\xc2\\x9f\\x86\\xce\\xf3\\xea\\xf1\\x09\\x28\\xba\\xff\\x6b\\xe0\\xb1\\xe3\\xe7\\xbd\\x3e\\xa7\\x24\\xe2\\x6e\\xa7\\xf6\\x50\\x2a\\x1b\\x9a\\x91\\xbf\\x97\\xe3\\x9f\\x1c\\x61\\xbb\\xb4\\xe1\\x68\\x4d\\xe2\\x70\\xc0\\x6d\\x5f\\xf3\\x75\\xf7\\x57\\xab\\x0f\\x79\\xfe\\xa1\\x96\\x89\\x30\\xaa\\x7f\\xc5\\xc4\\x6f\\x71\\xca\\x5d\\x0e\\x3d\\xe5\\x16\\x57\\xd2\\x14\\x54\\xc0\\xf0\\x7f\\x69\\x1c\\xae\\x5a\\xbe\\xfb\\x76\\xeb\\x3e\\xde\\x0b\\xb2\\xdf\\xea\\x17\\x83\\xeb\\x45\\xbc\\xd7\\x0e\\xdd\\x52\\xad\\x7c\\x18\\xae\\xad\\xf1\\xba\\x30\\x0b\\x37\\xce\\x2d\\xe6\\x7f\\xe4\\x82\\x30\\x06\\xba\\x37\\x39\\xb5\\x03\\xb1\\x3a\\x29\\x8b\\x22\\xdd\\x54\\x05\\xb1\\x17\\x1a\\x11\\x58\\x08\\x9d\\xc6\\x61\\x44\\xa4\\xa9\\x24\\x79\\x0c\\x42\\xd2\\x50\\xcc\\x85\\xd4\\xed\\x83\\x66\\xfc\\x6a\\x15\\x3e\\x84\\x8c\\xbd\\xde\\xb6\\x5e\\x48\\xe3\\x10\\x49\\x13\\x1f\\x6b\\xdc\\x7e\\x42\\xc1\\xdc\\x09\\xa2\\xdd\\xbb\\xce\\x78\\x5d\\x6a\\xb4\\x2b\\x96\\xdc\\x09\\x83\\x58\\x09\\xa9\\x04\\x4d\\x5f\\xdc\\x61\\xe9\\x14\\x12\\x85\\x42\\x87\\x2f\\x1c\\x98\\x79\\xb3\\x42\\xeb\\xda\\x4f\\x00\\xf9\\x9f\\xc1\\xf8\\x4e\\x56\\x2c\\x1f\\xa3\\x39\\x9e\\x1e\\xb3\\x4f\\x8d\\xd8\\x70\\xfc\\x37\\x4a\\x78\\x1f\\xe1\\x55\\x33\\x6f\\x88\\x2c\\xe9\\xf9\\xe3\\x17\\x6a\\xdf\\x7e\\x5a\\x48\\x71\\xcf\\x44\\x34\\x3f\\xc0\\xb0\\xd6\\x97\\xff\\x5d\\x82\\x6c\\x1f\\x62\\x97\\x55\\x7f\\x16\\x0f\\xef\\x8d\\xe9\\x81\\x42\\xc1\\x53\\x52\\xf9\\x69\\xaa\\xec\\x47\\x1f\\x1b\\xda\\xca\\xe8\\x0d\\x1d\\xdd\\x5e\\xf8\\xf2\\x9d\\xde\\x02\\xcb\\x1b\\x90\\xd0\\xda\\xcf\\x10\\x35\\x48\\x8f\\x56\\xd0\\x3c\\x44\\xe8\\x8a\\x5f\\x53\\x48\\x4b\\xc7\\xe2\\xd8\\x2f\\x92\\x82\\x2e\\x0a\\x8e\\x9f\\x36\\xa8\\x30\\x4b\\x3d\\xef\\xb7\\x5b\\xe1\\x7d\\x2d\\xde\\x13\\x7f\\xb7\\x8d\\xf7\\x50\\x01\\x99\\x33\\xde\\x09\\x54\\x57\\x02\\x89\\xbf\\xe3\\x6e\\x91\\xc7\\xe6\\x9d\\x7b\\x01\\x5a\\xa3\\x57\\x61\\xa6\\xde\\xb9\\x05\\x60\\xc6\\x78\\x13\\xb0\\x7b\\x27\\x46\\x6b\\xfc\\xae\\x01\\x70\\x26\\x9d\\x0b\\x17\\xc4\\xec\\x84\\x84\\xbf\\xdb\\x8f\\x88\\x92\\x81\\x82\\xe1\\xf4\\x30\\x03\\x2d\\x1d\\x60\\xda\\xb9\\x99\\x20\\x62\\x7e\\x15\\x56\\x43\\x3d\\x3f\\xad\\xaf\\x86\\x57\\xf1\\xa0\\x72\\x59\\xb3\\x8d\\x5c\\xb2\\x6d\\xee\\xcf\\x0e\\x27\\xb9\\x61\\x89\\xc7\\xc3\\xc9\\x8c\\x6c\\x5e\\x01\\xf6\\x01\\xd6\\xfa\\xac\\xb1\\x4f\\xf2\\x58\\xaa\\x64\\xe9\\x65\\x66\\x7b\\x80\\x95\\xa5\\xd7\\x5a\\x0e\\x7f\\xf7\\x74\\x91\\x15\\x71\\x5d\\xc5\\x06\\x4d\\x43\\x08\\xdf\\x36\\xee\\x2d\\x1f\\x9a\\xb4\\x68\\x50\\x89\\x84\\x65\\x73\\x35\\x1a\\x1d\\x0d\\xef\\xaa\\x0f\\x23\\x31\\x57\\x35\\x68\\x4a\\x25\\x42\\x64\\x84\\xf3\\x72\\x54\\xc8\\x7f\\x62\\xdc\\xb6\\x1e\\x83\\xe5\\x12\\x58\\x0c\\x1f\\x70\\xb8\\x9b\\xa5\\x10\\x34\\x33\\x6e\\x20\\x0a\\x47\\xa3\\xe8\\x36\\xb6\\x52\\x74\\xd2\\x62\\x0a\\x99\\x44\\x95\\x82\\x49\\xd1\\xf2\\xa0\\xfb\\xab\\x33\\x3a\\x7c\\xaa\\x43\\x2b\\x49\\x62\\xf8\\x24\\x19\\xfe\\x1d\\xc0\\x1f\\x4a\\xb5\\x4c\\xd0\\x6f\\x20\\x43\\xc1\\xdf\\xdc\\xbc\\x81\\x5c\\x38\\x07\\x4b\\x14\\x34\\xda\\x66\\x85\\x52\\x1b\\x30\\x36\\x45\\x12\\x5e\\xcd\\x7a\\x14\\xd6\\x06\\x0a\\x87\\xd6\\xc1\\x7a\\x61\\x62\\xa9\\xe5\\x16\\x2f\\xa7\\x6e\\x55\\xfa\\x5e\\xbc\\xce\\x91\\x2e\\x57\\x62\\x03\\xd2\\x53\\x59\\x8f\\x87\\xaa\\x47\\x7a\\xf5\\xa1\\x8b\\xbf\\x96\\x19\\x32\\xf0\\x7e\\x98\\x97\\x53\\x61\\x33\\xce\\xf9\\x35\\x3d\\x35\\x23\\x3c\\x0e\\x45\\xcd\\xd8\\xde\\x5e\\x74\\x35\\x03\\xb9\\x2c\\x3d\\xcd\\x68\\xad\\x17\\xb2\\x66\\x48\\xf2\\x0a\\x3f\\x35\\x83\\xb1\\xd6\\x6a\\xf3\\x02\\x23\\xa8\\x62\\xe0\\xa7\\x9d\\xf0\\x64\\x2d\\x63\\x7c\\x86\\xcf\\xba\\x15\\x39\\x3f\\xf5\\x22\\x60\\xc8\\x8b\\x1d\\x32\\xce\\x2d\\x6e\\x28\\x9b\\x07\\x1b\\xd5\\x4a\\x53\\x78\\xf1\\xf9\\x16\\x37\\x5f\\x0e\\x57\\xa2\\x08\\xc7\\x1a\\x5d\\x55\\x7a\\xca\\xb0\\x08\\x6b\\x8f\\x24\\x83\\x4a\\x0b\\x39\\x1a\\x9e\\x81\\xfc\\xbe\\xce\\x50\\x56\\x4f\\x90\\xcf\\x9f\\x97\\x7b\\xd6\\xd7\\x3f\\xcb\\x2f\\x65\\x84\\x57\\x88\\xdf\\x59\\xef\\x36\\x34\\x8a\\x31\\x12\\x68\\x28\\x28\\xda\\xf7\\xed\\x7f\\x05\\x00\\x00\\xff\\xff\\xfb\\x65\\x93\\x4f\\xfc\\x22\\x00\\x00\")\n\nfunc call_tracerJsBytes() ([]byte, error) {\n\treturn bindataRead(\n\t\t_call_tracerJs,\n\t\t\"call_tracer.js\",\n\t)\n}\n\nfunc call_tracerJs() (*asset, error) {\n\tbytes, err := call_tracerJsBytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo := bindataFileInfo{name: \"call_tracer.js\", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}\n\ta := &asset{bytes: bytes, info: info, digest: [32]uint8{0x46, 0x79, 0xb6, 0xbc, 0xd2, 0xc, 0x25, 0xb1, 0x22, 0x56, 0xef, 0x77, 0xb9, 0x5e, 0x2e, 0xf4, 0xda, 0xb2, 0x2f, 0x53, 0xa4, 0xff, 0xc8, 0xac, 0xbb, 0x75, 0x22, 0x46, 0x59, 0xe3, 0x1d, 0x7d}}\n\treturn a, nil\n}\n\nvar _evmdis_tracerJs = []byte(\"\\x1f\\x8b\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\xac\\x57\\xdf\\x6f\\xda\\xca\\x12\\x7e\\x86\\xbf\\x62\\x94\\x27\\x50\\x29\\x60\\x63\\x08\\x38\\x27\\x47\\xe2\\xa6\\xf4\\x1c\\xae\\xd2\\x24\\x02\\x72\\x8f\\x2a\\x94\\x87\\x05\\xc6\\xb0\\xaa\\xf1\\x5a\\xbb\\x6b\\x72\\xb8\\x55\\xfe\\xf7\\xab\\xd9\\x59\\x03\\xf9\\x75\\xdb\\x4a\\xa7\\x0f\\x3b\\xb5\\x77\\xbe\\x6f\\xbe\\x9d\\x19\\xcf\\x92\\x56\\x0b\\xae\\x54\\xbe\\xd7\\x72\\xbd\\xb1\\x10\\xb6\\x83\\x73\\x98\\x6d\\x10\\xd6\\xea\\x23\\xda\\x0d\\x6a\\x2c\\xb6\\x30\\x2c\\xec\\x46\\x69\\x53\\x6d\\xb5\\x60\\xb6\\x91\\x06\\x12\\x99\\x22\\x48\\x03\\xb9\\xd0\\x16\\x54\\x02\\xf6\\x85\\x7f\\x2a\\x17\\x5a\\xe8\\x7d\\xb3\\xda\\x6a\\x31\\xe6\\xcd\\x6d\\x62\\x48\\x34\\x22\\x18\\x95\\xd8\\x47\\xa1\\x31\\x86\\xbd\\x2a\\x60\\x29\\x32\\xd0\\xb8\\x92\\xc6\\x6a\\xb9\\x28\\x2c\\x82\\xb4\\x20\\xb2\\x55\\x4b\\x69\\xd8\\xaa\\x95\\x4c\\xf6\\x44\\x29\\x2d\\x14\\xd9\\x0a\\xb5\\x0b\\x6d\\x51\\x6f\\x4d\\xa9\\xe3\\x8f\\x9b\\x7b\\xb8\\x46\\x63\\x50\\xc3\\x1f\\x98\\xa1\\x16\\x29\\xdc\\x15\\x8b\\x54\\x2e\\xe1\\x5a\\x2e\\x31\\x33\\x08\\xc2\\x40\\x4e\\x6f\\xcc\\x06\\x57\\xb0\\x70\\x74\\x04\\xfc\\x4c\\x52\\xa6\\x5e\\x0a\\x7c\\x56\\x45\\xb6\\x12\\x56\\xaa\\xac\\x01\\x28\\x49\\x39\\xec\\x50\\x1b\\xa9\\x32\\xe8\\x94\\xa1\\x3c\\x61\\x03\\x94\\x26\\x92\\x9a\\xb0\\x74\\x00\\x0d\\x2a\\x27\\x5c\\x1d\\x44\\xb6\\x87\\x54\\xd8\\x23\\xf4\\x27\\x12\\x72\\x3c\\xf7\\x0a\\x64\\xe6\\xc2\\x6c\\x54\\x8e\\x60\\x37\\xc2\\xd2\\xa9\\x1f\\x65\\x9a\\xc2\\x02\\xa1\\x30\\x98\\x14\\x69\\x83\\xd8\\x16\\x85\\x85\\xbf\\xc6\\xb3\\x3f\\x6f\\xef\\x67\\x30\\xbc\\xf9\\x0a\\x7f\\x0d\\x27\\x93\\xe1\\xcd\\xec\\xeb\\x05\\x3c\\x4a\\xbb\\x51\\x85\\x05\\xdc\\x21\\x53\\xc9\\x6d\\x9e\\x4a\\x5c\\xc1\\xa3\\xd0\\x5a\\x64\\x76\\x0f\\x2a\\x21\\x86\\x2f\\xa3\\xc9\\xd5\\x9f\\xc3\\x9b\\xd9\\xf0\\x5f\\xe3\\xeb\\xf1\\xec\\x2b\\x28\\x0d\\x9f\\xc7\\xb3\\x9b\\xd1\\x74\\x0a\\x9f\\x6f\\x27\\x30\\x84\\xbb\\xe1\\x64\\x36\\xbe\\xba\\xbf\\x1e\\x4e\\xe0\\xee\\x7e\\x72\\x77\\x3b\\x1d\\x35\\x61\\x8a\\xa4\\x0a\\x09\\xff\\xe3\\x9c\\x27\\xae\\x7a\\x1a\\x61\\x85\\x56\\xc8\\xd4\\x94\\x99\\xf8\\xaa\\x0a\\x30\\x1b\\x55\\xa4\\x2b\\xd8\\x88\\x1d\\x82\\xc6\\x25\\xca\\x1d\\xae\\x40\\xc0\\x52\\xe5\\xfb\\x9f\\x2e\\x2a\\x71\\x89\\x54\\x65\\x6b\\x77\\xe6\\x77\\x1b\\x12\\xc6\\x09\\x64\\xca\\x36\\xc0\\x20\\xc2\\x6f\\x1b\\x6b\\xf3\\xb8\\xd5\\x7a\\x7c\\x7c\\x6c\\xae\\xb3\\xa2\\xa9\\xf4\\xba\\x95\\x32\\x9d\\x69\\xfd\\xde\\xac\\x12\\x27\\xee\\xb6\\x2b\\x69\\x66\\x5a\\x2c\\x51\\x83\\x46\\x5b\\xe8\\xcc\\x80\\x29\\x92\\x44\\x2e\\x25\\x66\\x16\\x64\\x96\\x28\\xbd\\x75\\x7d\\x02\\x89\\x56\\x5b\\x10\\x60\\xc9\\x19\\xac\\x82\\x1c\\x35\\x6d\\x7a\\x8e\\x8f\\xc6\\xee\\x53\\xa7\\x73\\x25\\x8d\\x30\\x06\\xb7\\x8b\\x74\\xdf\\xac\\x7e\\xaf\\x56\\x8c\\x15\\xcb\\x6f\\x31\\xcc\\xbf\\xab\\xdc\\xc4\\x30\\x7f\\x78\\x7a\\x68\\x54\\xab\\x95\\x2c\\x2f\\xcc\\x06\\x4d\\x0c\\xdf\\xdb\\x31\\xb4\\x1b\\x10\\xc4\\x10\\x34\\x20\\x74\\x6b\\xc7\\xad\\x91\\x5b\\xbb\\x6e\\xed\\xb9\\xf5\\xdc\\xad\\x7d\\xb7\\x0e\\xdc\\x1a\\xb4\\xd9\\x30\\x3a\\x60\\xb7\\x80\\xfd\\x02\\x76\\x0c\\xd8\\x33\\x64\\xcf\\xd0\\xc7\\xe1\\x40\\x21\\x47\\x0a\\x39\\x54\\xc8\\xb1\\x42\\x66\\xe9\\xb0\\x4b\\xc4\\x2c\\x11\\xb3\\x74\\x99\\xa5\\xcb\\x2c\\x5d\\x76\\xe9\\x32\\x4b\\xd7\\x0b\\xee\\xba\\xf3\\x74\\x99\\xa5\\x7b\\xce\\x4f\\xcc\\xd2\\x65\\x96\\x1e\\x1f\\xb9\\xc7\\x80\\x9e\\x3f\\x22\\x03\\x7a\\x2c\\xbe\\xc7\\x80\\x1e\\x03\\xfa\\x0c\\xe8\\x73\\xd8\\x7e\\xc8\\x4f\\x1d\\x36\\xcc\\xd2\\xe7\\xb0\\xfd\\x1e\\x1b\\x0e\\xdb\\x67\\x96\\x3e\\xb3\\x0c\\x58\\xfc\\x20\\x70\\x7b\\x03\\x8e\\x37\\xe0\\x78\\x03\\x9f\\xd5\\x32\\xad\\x3e\\xaf\\x6d\\x9f\\xd8\\x76\\xe8\\x6d\\xc7\\xdb\\xc8\\xdb\\xae\\xb7\\x3e\\xf3\\x6d\\x9f\\xfa\\xb6\\xcf\\x7d\\xdb\\xf3\\x1d\\xea\\xe4\\xf9\\x02\\xcf\\x17\\x78\\xbe\\xc0\\xf3\\x05\\x9e\\xaf\\xac\\x64\\x59\\xca\\xb2\\x96\\xbe\\x98\\x81\\xaf\\x66\\xe0\\xcb\\x19\\xf8\\x7a\\x06\\xbe\\xa0\\x81\\xaf\\x68\\xe0\\x4b\\x1a\\xf8\\x9a\\x06\\xa1\\xe7\\x0b\\xfb\\x31\\x84\\x64\\x07\\x31\\x74\\x1a\\x10\\x74\\xda\\x31\\x44\\x64\\x83\\x18\\xba\\x64\\xc3\\x18\\x7a\\x64\\x3b\\x31\\x9c\\x93\\x8d\\x62\\xe8\\x93\\xed\\xc6\\x30\\x20\\x4b\\x7c\\xd4\\xb5\\x1d\\x22\\x24\\xc6\\x0e\\x29\\x24\\xca\\x0e\\x49\\x24\\xce\\x88\\x34\\x12\\x69\\x44\\x22\\x89\\x35\\x22\\x95\\x44\\x1b\\x91\\x4c\\xe2\\x8d\\x22\\xd6\\x11\\x75\\x59\\x47\\xd4\\x63\\x1d\\xd1\\x39\\xeb\\xa0\\xee\\x73\\x80\\x01\\xeb\\xa0\\xfe\\x23\\x1d\\xd4\\x80\\xa4\\xc3\\x75\\x20\\xe9\\x70\\x3d\\x48\\x3a\\x5c\\x17\\x12\\x25\\xf5\\xa1\\xd3\\xe1\\x3a\\x91\\x48\\xa9\\x17\\x9d\\x0e\\xd7\\x8d\\x44\\xeb\\xfa\\x91\\x78\\x7d\\x47\\x06\\xbd\\xc0\\xdb\\xd0\\xdb\\x8e\\xb7\\x91\\xb3\\x61\\xe4\\xbf\\xa2\\xc8\\x7f\\x46\\x91\\xff\\x8e\\xa2\\x8e\\xdf\\xf7\\x7e\\xee\\x23\\x78\\xa2\\xef\\xbc\\xd5\\x02\\x8d\\xa6\\x48\\x2d\\x4d\\x7f\\x99\\xed\\xd4\\x37\\x9a\\xcf\\x1b\\xcc\\x40\\xa4\\xa9\\x1b\\x64\\x2a\\x5f\\xaa\\x15\\x1a\\x1e\\x90\\x0b\\xc4\\x0c\\xa4\\x45\\x2d\\xe8\\x86\\x50\\x3b\\xd4\\x74\\x39\\x96\\xa3\\xc9\\xd1\\x11\\x26\\x91\\x99\\x48\\x4b\\x62\\x3f\\x44\\x69\\x30\\xc9\\x6c\\xdd\\xac\\x56\\xf8\\x7d\\x0c\\x49\\x91\\x2d\\x69\\x74\\xd5\\xea\\xf0\\xdd\\x53\\x80\\xdd\\x48\\xd3\\x74\\x23\\x69\\xde\\x7e\\x68\\xaa\\xdc\\x5c\\x40\\xa9\\x33\\x11\\x6f\\xc9\\x24\\x6a\\xb1\\xb4\\x85\\x48\\x01\\xff\\xc6\\x65\\xe1\\x66\\xa1\\x4a\\x40\\x64\\x5e\\x39\\x24\\x3c\\xf1\\x2b\\x0e\\x7f\\x12\\x35\\x55\\xeb\\x06\\xac\\x16\\x14\\xbc\\x0c\\x61\\x2c\\xe6\\xa7\\x11\\xe8\\xde\\xc0\\x1d\\xea\\x7d\\xc9\\xe5\\xee\\x41\\x0a\\xf9\\x9f\\x2f\\x3e\\x1c\\x12\\x35\\xe1\\xde\\x64\\xae\\x56\\x2a\\x3b\\xa1\\x21\\xd1\\x62\\x8b\\x70\\x79\\x7a\\xba\\xe3\\x7f\\x9b\\x29\\x66\\x6b\\xbb\\x81\\x8f\\x10\\x3c\\x5c\\x54\\x3d\\x02\\xb5\\x56\\x1a\\x2e\\x21\\x55\\xeb\\xe6\\x1a\\xed\\x88\\x1e\\x6b\\xf5\\x8b\\x6a\\xa5\\x22\\x13\\xa8\\xb9\\x5d\\xa6\\xaf\\x38\\xee\\xf9\\x99\\x7b\\x75\\xf6\\x00\\x97\\x0c\\x25\\xcf\\x27\\xc0\\xd4\\x20\\x10\\xc0\\xd3\\x7c\\xc2\\xdc\\x6e\\x6a\\x75\\xb8\\x3c\\x95\\xe2\\xe3\\x7b\\x3a\\x95\\xd3\\xa5\\x02\\x97\\xfc\\x54\\x51\\x79\\x0c\\xf4\\x8f\\x08\\x54\\xde\\xb4\\xea\\xa6\\xd8\\x2e\\x50\\xd7\\xea\\x0d\\xb7\\xbd\\x22\\x42\\x88\\xe1\\x39\\x3f\\xef\\x95\\x65\\x9e\\x3f\\xb8\\xe7\\x27\\x92\\xe4\\xd4\\x3b\\xc5\\x54\\xdb\\xf2\\xe4\\xbf\\x43\\xdb\\x47\\x77\\x67\\xcf\\x35\\xee\\x54\\x0e\\x97\\x70\\x70\\x9c\\xbf\\x82\\x70\\xb2\\x08\\x91\\x28\\x5d\\x23\\x94\\x84\\x4b\\x68\\x5f\\x80\\x84\\xdf\\xf8\\x6c\\xfe\\x06\\x9b\\x33\\x5b\\x53\\xe5\\x0f\\x17\\x20\\x3f\\x7c\\xa8\\x3b\\x50\\xc5\\xbf\\x65\\x8d\\x4d\\x72\\x75\\x39\\xe2\\x84\\xe4\\x88\\xdf\\x6a\\xb2\\xde\\xb4\\x6a\\x6a\\xb5\\xcc\\xd6\\xb5\\xa0\\x57\\x77\\xb9\\xaf\\x3c\\xd1\\x62\\x1e\\xa5\\x5d\\xb2\\xbf\\x4b\\x89\\x77\\xaa\\xfb\\x33\\x2c\\x85\\x41\\x38\\xbb\\x1a\\x5e\\x5f\\x9f\\xc5\\x70\\x7c\\xb8\\xba\\xfd\\x34\\x3a\\x8b\\x0f\\x87\\x94\\x99\\xb1\\xf4\\xfb\\x95\\x4b\\x7c\\x12\\xb7\\x53\\x6f\\xee\\x44\\x5a\\xe0\\x6d\\xc2\\xf5\\x3e\\xb8\\xcb\\xff\\xe2\\x6b\\xef\\xe8\\x95\\x37\\x17\\x70\\x7e\\xb6\\x16\\xc6\\xb5\\xc3\\x0b\\x40\\xfb\\x5d\\x80\\x55\\x6f\\xf9\\x07\\xcf\\xd3\\xf0\\x1c\\xe2\\x98\\xde\\x42\\x85\\x27\\xa8\\x17\\x18\\x99\\xe5\\x85\\x3d\\x60\\xb6\\xb8\\x55\\x7a\\xdf\\x34\\xf4\\xcb\\xa7\\xe6\\x73\\xd2\\x38\\x24\\xe7\\x83\\x3f\\xf7\\x0b\\x8a\\x63\\xaf\\x67\\x45\\x9a\\x3e\\xdf\\xe3\\x39\\xf2\\xce\\xa6\\xca\\x39\\x27\\x73\\xdf\\x3b\\x27\\x1f\\x81\\x6b\\x01\\xf6\\xf3\\xd1\\x16\\x1a\\xc5\\xb7\\x8b\\x63\\x45\\x3f\\x8d\\xae\\x47\\x7f\\x0c\\x67\\xa3\\x67\\x95\\x9d\\xce\\x86\\xb3\\xf1\\x15\\xbf\\xfa\\x71\\x6d\\xc3\\x5f\\xaa\\xed\\xeb\\x4e\\x38\\x9e\\xc3\\x1d\\x03\\x5e\\xb5\\xe0\\xdb\\x2d\\xf0\\xcb\\x3d\\xf0\\x4b\\x4d\\x70\\x2c\\xe8\\x3f\\x51\\xd1\\xff\\x5f\\xd2\\x7f\\xba\\xa6\\x93\\xd1\\xec\\x7e\\x72\\x73\\x52\\x3a\\xfa\\x7b\\xe5\\x27\\xbe\\x19\\xef\\xfa\\x76\\xdd\\x82\\x57\\xee\\x3c\\xbe\\xfc\\x15\\xf7\\x46\\xe3\\xab\\xc2\\x36\\x5c\\xe8\\x0f\\x25\\xeb\\x3b\\x7a\\xa7\\xb3\\xdb\\xbb\\x63\\xef\\xdd\\x8f\\xaf\\xc6\\x87\\xa1\\xf2\\xa3\\x18\\xed\\x06\\xb4\\xdf\\x61\\xfd\\xf7\\xfd\\x97\\xbb\\x4f\\xa3\\xe9\\xcc\\x33\\x95\\x99\\xcd\\x97\\x87\\xcf\\x74\\x8d\\xf6\\xee\\xaa\\x76\\x32\\x03\\x65\\x52\\xce\\x3f\\x69\\xee\\x28\\xcd\\xe5\\xf4\\x3b\\xa0\\x53\\xcc\\x0e\\xf0\\x67\\x37\\x07\\x7c\\x84\\xf6\\xdf\\x5d\\x3c\\x72\\x1d\\x87\\xfb\\xcb\\x82\\xf9\\x1b\\xcc\\x11\\x1f\\xeb\\xfa\\xec\\x22\\x3d\\x9e\\xee\\xf9\\x1d\\xc4\\xf8\\x6a\\xe5\\xa9\\xfa\\x54\\xfd\\x5f\\x00\\x00\\x00\\xff\\xff\\xdf\\x2f\\xd9\\xfa\\x63\\x10\\x00\\x00\")\n\nfunc evmdis_tracerJsBytes() ([]byte, error) {\n\treturn bindataRead(\n\t\t_evmdis_tracerJs,\n\t\t\"evmdis_tracer.js\",\n\t)\n}\n\nfunc evmdis_tracerJs() (*asset, error) {\n\tbytes, err := evmdis_tracerJsBytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo := bindataFileInfo{name: \"evmdis_tracer.js\", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}\n\ta := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb5, 0xc8, 0x73, 0x8e, 0xfb, 0x1f, 0x84, 0x7d, 0x37, 0xd9, 0x26, 0x24, 0x37, 0xb8, 0x65, 0xb1, 0xed, 0xa0, 0x76, 0x9a, 0xf0, 0x8e, 0x3a, 0x9b, 0x20, 0x93, 0x27, 0x26, 0x2e, 0xc9, 0x9b, 0xde}}\n\treturn a, nil\n}\n\nvar _noop_tracerJs = []byte(\"\\x1f\\x8b\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\x8c\\x93\\x4f\\x6f\\xdb\\x46\\x10\\xc5\\xcf\\xe6\\xa7\\x78\\xc7\\x04\\x50\\xc5\\xfe\\x39\\x14\\x70\\x8a\\x02\\xac\\x61\\x27\\x2a\\x1c\\xdb\\x90\\xe8\\x06\\x3e\\x0e\\xc9\\xa1\\xb8\\xe9\\x6a\\x87\\x9d\\x9d\\x95\\x22\\x18\\xfe\\xee\\xc5\\x92\\x12\\x12\\x14\\x69\\x9b\\x9b\\xb0\\xd2\\xfb\\xbd\\x37\\xf3\\x46\\x65\\x89\\x2b\\x19\\x8f\\xea\\xb6\\x83\\xe1\\xc7\\xef\\x7f\\xf8\\x19\\xf5\\xc0\\xd8\\xca\\x77\\x6c\\x03\\x2b\\xa7\\x1d\\xaa\\x64\\x83\\x68\\x2c\\xca\\x12\\xf5\\xe0\\x22\\x7a\\xe7\\x19\\x2e\\x62\\x24\\x35\\x48\\x0f\\xfb\\xc7\\xef\\xbd\\x6b\\x94\\xf4\\xb8\\x2c\\xca\\x72\\xd6\\x7c\\xf5\\xeb\\x4c\\xe8\\x95\\x19\\x51\\x7a\\x3b\\x90\\xf2\\x25\\x8e\\x92\\xd0\\x52\\x80\\x72\\xe7\\xa2\\xa9\\x6b\\x92\\x31\\x9c\\x81\\x42\\x57\\x8a\\x62\\x27\\x9d\\xeb\\x8f\\x19\\xe9\\x0c\\x29\\x74\\xac\\x93\\xb5\\xb1\\xee\\xe2\\x39\\xc7\\xdb\\xbb\\x47\\xdc\\x72\\x8c\\xac\\x78\\xcb\\x81\\x95\\x3c\\x1e\\x52\\xe3\\x5d\\x8b\\x5b\\xd7\\x72\\x88\\x0c\\x8a\\x18\\xf3\\x4b\\x1c\\xb8\\x43\\x33\\xe1\\xb2\\xf0\\x26\\x47\\xd9\\x9c\\xa2\\xe0\\x46\\x52\\xe8\\xc8\\x9c\\x84\\x05\\xd8\\xe5\\xe4\\xd8\\xb3\\x46\\x27\\x01\\x3f\\x9d\\xad\\x4e\\xc0\\x05\\x44\\x33\\xe4\\x15\\x59\\x1e\\x40\\x21\\x63\\xd6\\xbd\\x06\\x85\\x23\\x3c\\xd9\\x67\\xe9\\x37\\x2c\\xe4\\xf3\\xdc\\x1d\\x5c\\x98\\x6c\\x06\\x19\\x19\\x36\\x90\\xe5\\xa9\\x0f\\xce\\x7b\\x34\\x8c\\x14\\xb9\\x4f\\x7e\\x91\\x69\\x4d\\x32\\x7c\\x58\\xd5\\xef\\xee\\x1f\\x6b\\x54\\x77\\x4f\\xf8\\x50\\xad\\xd7\\xd5\\x5d\\xfd\\xf4\\x06\\x07\\x67\\x83\\x24\\x03\\xef\\x79\\x46\\xb9\\xdd\\xe8\\x1d\\x77\\x38\\x90\\x2a\\x05\\x3b\\x42\\xfa\\x4c\\x78\\x7f\\xbd\\xbe\\x7a\\x57\\xdd\\xd5\\xd5\\x6f\\xab\\xdb\\x55\\xfd\\x04\\x51\\xdc\\xac\\xea\\xbb\\xeb\\xcd\\x06\\x37\\xf7\\x6b\\x54\\x78\\xa8\\xd6\\xf5\\xea\\xea\\xf1\\xb6\\x5a\\xe3\\xe1\\x71\\xfd\\x70\\xbf\\xb9\\x5e\\x62\\xc3\\x39\\x15\\x67\\xfd\\xff\\xef\\xbc\\x9f\\xda\\x53\\x46\\xc7\\x46\\xce\\xc7\\xf3\\x26\\x9e\\x24\\x21\\x0e\\x92\\x7c\\x87\\x81\\xf6\\x0c\\xe5\\x96\\xdd\\x9e\\x3b\\x10\\x5a\\x19\\x8f\\xdf\\x5c\\x6a\\x66\\x91\\x97\\xb0\\x9d\\x66\\xfe\\xd7\\x83\\xc4\\xaa\\x47\\x10\\x5b\\x20\\x32\\xe3\\x97\\xc1\\x6c\\xbc\\x2c\\xcb\\xc3\\xe1\\xb0\\xdc\\x86\\xb4\\x14\\xdd\\x96\\x7e\\xc6\\xc5\\xf2\\xd7\\x65\\x91\\x99\\x41\\x64\\xac\\x95\\x5a\\xd6\\x5c\\xce\\xc7\\x14\\x6d\\x62\\x37\\xa4\\xdc\\x48\\x60\\x34\\xe2\\x3c\\xeb\\x98\\x5b\\x46\\x2b\\x5d\\x1e\\xe0\\xaf\\xe4\\x94\\x3b\\xf4\\x2a\\x3b\\x10\\x7e\\xa7\\x3d\\x6d\\x5a\\x75\\xa3\\x65\\x9c\\x34\\x1f\\xb9\\x35\\x98\\xcc\\x15\\x52\\xe3\\xa7\\x73\\x24\\x98\\x52\\x88\\xd4\\xe6\\xbb\\xc9\\x9f\\x5b\\xd6\\x65\\xf1\\x5c\\x5c\\x94\\x25\\xa2\\xf1\\x98\\xbd\\x5d\\xd8\\xcb\\x9f\\x99\\x2b\\x9a\\xfb\\xd4\\x23\\x64\\x9c\\x1c\\xa7\\xcb\\xc8\\xa1\\xfe\\x78\\x0f\\xfe\\xc4\\x6d\\x32\\x8e\\xcb\\xe2\\x22\\xeb\\x2e\\xd1\\xa7\\x30\\x41\\x5f\\x79\\xd9\\x2e\\xd0\\x35\\xaf\\xf1\\x8c\\x97\\x45\\x31\\x91\\x7b\\x4a\\xde\\xbe\\x44\\x1f\\x86\\xd3\\x99\\x50\\x6b\\x89\\xfc\\x89\\x96\\x23\\x49\\x0f\\x0a\\x67\\xc3\\x7e\\x2e\\xf0\\x62\\xd2\\xff\\xb7\\x85\\x72\\xfc\\x9a\\x07\\x79\\x3f\\xf9\\xcc\\xc0\\x38\\x57\\xdf\\x30\\x07\\x38\\x63\\xa5\\x7c\\xfb\\xb2\\x67\\xcd\\x7f\\x7b\\x28\\x5b\\xd2\\x10\\x27\\x5c\\xd6\\xf4\\x2e\\x90\\x3f\\x83\\x4f\\xe7\\x91\\x37\\xe6\\xc2\\x76\\x59\\x5c\\xcc\\xef\\x5f\\x84\\x6a\\xed\\xd3\\x39\\xd4\\x4c\\xc2\\xf3\\xcb\\x1b\\xbc\\x14\\x2f\\xc5\\xdf\\x01\\x00\\x00\\xff\\xff\\x77\\x56\\xe7\\x1a\\xf7\\x04\\x00\\x00\")\n\nfunc noop_tracerJsBytes() ([]byte, error) {\n\treturn bindataRead(\n\t\t_noop_tracerJs,\n\t\t\"noop_tracer.js\",\n\t)\n}\n\nfunc noop_tracerJs() (*asset, error) {\n\tbytes, err := noop_tracerJsBytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo := bindataFileInfo{name: \"noop_tracer.js\", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}\n\ta := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe3, 0xf, 0x1c, 0x6f, 0x65, 0xaf, 0x90, 0x31, 0xab, 0xf, 0xe0, 0xca, 0x54, 0x7, 0xfd, 0xd3, 0xa1, 0x4a, 0x14, 0x1, 0x2a, 0x9d, 0xdc, 0xb9, 0x64, 0x69, 0x83, 0x30, 0xb1, 0x2a, 0xbd, 0xfb}}\n\treturn a, nil\n}\n\nvar _opcount_tracerJs = []byte(\"\\x1f\\x8b\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\x8c\\x94\\xcf\\x6e\\xdb\\x46\\x10\\x87\\xcf\\xe2\\x53\\xfc\\x8e\\x09\\xa2\\x92\\x69\\x7b\\x28\\xe0\\x16\\x05\\x58\\xc3\\x4e\\x04\\xd8\\xb2\\x21\\xd1\\x09\\x7c\\x5c\\x92\\x43\\x71\\x9b\\xd5\\x2e\\x31\\x3b\\x2b\\x86\\x08\\xfc\\xee\\xc5\\x2e\\xc5\\xc6\\x08\\x5c\\xd4\\xd7\\xd5\\xcc\\xf7\\xcd\\x3f\\xb1\\x28\\x70\\xe9\\x86\\x89\\xf5\\xa1\\x17\\xfc\\xf2\\xfe\\xe7\\xdf\\x50\\xf5\\x84\\x83\\xfb\\x89\\xa4\\x27\\xa6\\x70\\x44\\x19\\xa4\\x77\\xec\\xb3\\xa2\\x40\\xd5\\x6b\\x8f\\x4e\\x1b\\x82\\xf6\\x18\\x14\\x0b\\x5c\\x07\\xf9\\x21\\xde\\xe8\\x9a\\x15\\x4f\\x79\\x56\\x14\\x73\\xce\\x8b\\x3f\\x47\\x42\\xc7\\x44\\xf0\\xae\\x93\\x51\\x31\\x5d\\x60\\x72\\x01\\x8d\\xb2\\x60\\x6a\\xb5\\x17\\xd6\\x75\\x10\\x82\\x16\\x28\\xdb\\x16\\x8e\\x71\\x74\\xad\\xee\\xa6\\x88\\xd4\\x82\\x60\\x5b\\xe2\\xa4\\x16\\xe2\\xa3\\x5f\\xea\\xf8\\xb0\\x7d\\xc0\\x0d\\x79\\x4f\\x8c\\x0f\\x64\\x89\\x95\\xc1\\x7d\\xa8\\x8d\\x6e\\x70\\xa3\\x1b\\xb2\\x9e\\xa0\\x3c\\x86\\xf8\\xe2\\x7b\\x6a\\x51\\x27\\x5c\\x4c\\xbc\\x8e\\xa5\\xec\\xcf\\xa5\\xe0\\xda\\x05\\xdb\\x2a\\xd1\\xce\\xae\\x41\\x3a\\x56\\x8e\\x13\\xb1\\xd7\\xce\\xe2\\xd7\\x45\\x75\\x06\\xae\\xe1\\x38\\x42\\xde\\x28\\x89\\x0d\\x30\\xdc\\x10\\xf3\\xde\\x42\\xd9\\x09\\x46\\xc9\\xf7\\xd4\\x57\\x0c\\xe4\\x7b\\xdf\\x2d\\xb4\\x4d\\x9a\\xde\\x0d\\x04\\xe9\\x95\\xc4\\xae\\x47\\x6d\\x0c\\x6a\\x42\\xf0\\xd4\\x05\\xb3\\x8e\\xb4\\x3a\\x08\\x3e\\x6f\\xaa\\x8f\\x77\\x0f\\x15\\xca\\xed\\x23\\x3e\\x97\\xbb\\x5d\\xb9\\xad\\x1e\\x7f\\xc7\\xa8\\xa5\\x77\\x41\\x40\\x27\\x9a\\x51\\xfa\\x38\\x18\\x4d\\x2d\\x46\\xc5\\xac\\xac\\x4c\\x70\\x5d\\x24\\xdc\\x5e\\xed\\x2e\\x3f\\x96\\xdb\\xaa\\xfc\\x6b\\x73\\xb3\\xa9\\x1e\\xe1\\x18\\xd7\\x9b\\x6a\\x7b\\xb5\\xdf\\xe3\\xfa\\x6e\\x87\\x12\\xf7\\xe5\\xae\\xda\\x5c\\x3e\\xdc\\x94\\x3b\\xdc\\x3f\\xec\\xee\\xef\\xf6\\x57\\x39\\xf6\\x14\\xab\\xa2\\x98\\xff\\xff\\x33\\xef\\xd2\\xf6\\x98\\xd0\\x92\\x28\\x6d\\xfc\\x32\\x89\\x47\\x17\\xe0\\x7b\\x17\\x4c\\x8b\\x5e\\x9d\\x08\\x4c\\x0d\\xe9\\x13\\xb5\\x50\\x68\\xdc\\x30\\xbd\\x7a\\xa9\\x91\\xa5\\x8c\\xb3\\x87\\xd4\\xf3\\x7f\\x1e\\x24\\x36\\x1d\\xac\\x93\\x35\\x3c\\x11\\xfe\\xe8\\x45\\x86\\x8b\\xa2\\x18\\xc7\\x31\\x3f\\xd8\\x90\\x3b\\x3e\\x14\\x66\\xc6\\xf9\\xe2\\xcf\\x3c\\x8b\\x4c\\x37\\x34\\x2e\\x58\\xa9\\x58\\x35\\xc4\\x71\\x3f\\x0a\\x5e\\x1d\\x07\\x43\\x90\\xf9\\x29\\xed\\xe5\\xef\\xe0\\x05\\x29\\xd0\\x27\\xb5\\x0d\\xc7\\x9a\\x38\\x16\\xaf\\xad\\x17\\x0e\\x4d\\xbc\\x87\\xf4\\xf7\\xa1\\xaf\\xd4\\xa4\\xdd\\xd6\\x53\\x8a\\xbc\\xfa\\x74\\x8b\\x9a\\xba\\x38\\x99\\x74\\xc9\\xac\\xac\\x57\\x29\\x3c\\x5d\\xb5\\xb6\\x4a\\xa8\\xcd\\xb3\\x6f\\xd9\\xaa\\x28\\x66\\x43\\x12\\x7f\\xf9\\xd1\\x13\\x39\\xcf\\x5d\\xff\\x8a\\xf2\\x6c\\x95\\xd2\\x2e\\xf0\\x7e\\x9d\\x25\\x8a\\x17\\x1a\\x62\\x27\\xda\\x9e\\xdc\\x17\\x6a\\xd3\\x6a\\xe8\\x44\\x3c\\xa5\\x66\\xdb\\xf3\\xa9\\x45\\xfc\\xa7\\xdb\\x05\\xe3\\xf3\\x6c\\x15\\xf3\\x2e\\xd0\\x05\\x9b\\x0c\\x6f\\x8c\\x3b\\xac\\xd1\\xd6\\x6f\\xf1\\x0d\\xd2\\x6b\\x9f\\x27\\xcb\\xbb\\x77\\x78\\x3a\\x6b\\x3a\\x15\\x8c\\x3c\\xf7\\x8c\\xfd\\xf9\\x08\\x55\\x23\\x41\\x99\\x33\\x3a\\x76\\xea\\x3a\\x28\\xbb\\xd8\\xbb\\xf9\\x3c\\x56\\x29\\xff\\x65\\xdf\\xa2\\x60\\xf2\\x2f\\x39\\x94\\x31\\xc9\\x33\\x03\\xfd\\x7c\\x58\\x35\\x91\\x85\\x16\\xe2\\x38\\x50\\xb8\\x13\\x71\\xfc\\xa8\\x80\\x49\\x02\\x5b\\x9f\\x70\\x31\\xa7\\xd3\\x56\\x99\\x05\\x7c\\x3e\\xbe\\x38\\x70\\x6d\\x0f\\x79\\xb6\\x9a\\xdf\\x9f\\x15\\xd5\\xc8\\xd7\\xa5\\xa8\\x99\\xf4\\x6c\\x16\\x78\\xca\\x9e\\xb2\\x7f\\x02\\x00\\x00\\xff\\xff\\xdd\\xd8\\xa1\\x0a\\x5c\\x05\\x00\\x00\")\n\nfunc opcount_tracerJsBytes() ([]byte, error) {\n\treturn bindataRead(\n\t\t_opcount_tracerJs,\n\t\t\"opcount_tracer.js\",\n\t)\n}\n\nfunc opcount_tracerJs() (*asset, error) {\n\tbytes, err := opcount_tracerJsBytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo := bindataFileInfo{name: \"opcount_tracer.js\", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}\n\ta := &asset{bytes: bytes, info: info, digest: [32]uint8{0x27, 0xe, 0x97, 0x88, 0x9b, 0x53, 0xbb, 0x20, 0x44, 0xd8, 0xf5, 0xeb, 0x41, 0xd2, 0x7e, 0xd6, 0xda, 0x6b, 0xf5, 0xaf, 0x0, 0x75, 0x9f, 0xd9, 0x22, 0xc, 0x6e, 0x74, 0xac, 0x2a, 0xa9, 0xa7}}\n\treturn a, nil\n}\n\nvar _prestate_tracerJs = []byte(\"\\x1f\\x8b\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\x9c\\x57\\xdd\\x6f\\xdb\\x38\\x12\\x7f\\xb6\\xfe\\x8a\\x41\\x5f\\x6c\\x5d\\x5d\\xb9\\xcd\\x02\\x7b\\x80\\x73\\x39\\x40\\x75\\xdd\\x36\\x40\\x36\\x09\\x6c\\xe7\\x72\\xb9\\xc5\\x3e\\x50\\xe4\\x48\\xe6\\x9a\\x26\\x05\\x92\\xb2\\xe3\\x2b\\xf2\\xbf\\x1f\\x86\\xfa\\xf0\\x47\\x93\\xa6\\x7b\\x6f\\x16\\x39\\xfc\\xcd\\xf7\\x6f\\xc6\\xa3\\x11\\x4c\\x4c\\xb9\\xb3\\xb2\\x58\\x7a\\x38\\x7b\\xff\\xe1\\xef\\xb0\\x58\\x22\\x14\\xe6\\x1d\\xfa\\x25\\x5a\\xac\\xd6\\x90\\x56\\x7e\\x69\\xac\\x8b\\x46\\x23\\x58\\x2c\\xa5\\x83\\x5c\\x2a\\x04\\xe9\\xa0\\x64\\xd6\\x83\\xc9\\xc1\\x9f\\xc8\\x2b\\x99\\x59\\x66\\x77\\x49\\x34\\x1a\\xd5\\x6f\\x9e\\xbd\\x26\\x84\\xdc\\x22\\x82\\x33\\xb9\\xdf\\x32\\x8b\\x63\\xd8\\x99\\x0a\\x38\\xd3\\x60\\x51\\x48\\xe7\\xad\\xcc\\x2a\\x8f\\x20\\x3d\\x30\\x2d\\x46\\xc6\\xc2\\xda\\x08\\x99\\xef\\x08\\x52\\x7a\\xa8\\xb4\\x40\\x1b\\x54\\x7b\\xb4\\x6b\\xd7\\xda\\xf1\\xe5\\xfa\\x0e\\xae\\xd0\\x39\\xb4\\xf0\\x05\\x35\\x5a\\xa6\\xe0\\xb6\\xca\\x94\\xe4\\x70\\x25\\x39\\x6a\\x87\\xc0\\x1c\\x94\\x74\\xe2\\x96\\x28\\x20\\x0b\\x70\\xf4\\xf0\\x33\\x99\\x32\\x6f\\x4c\\x81\\xcf\\xa6\\xd2\\x82\\x79\\x69\\xf4\\x10\\x50\\x92\\xe5\\xb0\\x41\\xeb\\xa4\\xd1\\xf0\\x4b\\xab\\xaa\\x01\\x1c\\x82\\xb1\\x04\\x32\\x60\\x9e\\x1c\\xb0\\x60\\x4a\\x7a\\x17\\x03\\xd3\\x3b\\x50\\xcc\\xef\\x9f\\xfe\\x44\\x40\\xf6\\x7e\\x0b\\x90\\x3a\\xa8\\x59\\x9a\\x12\\xc1\\x2f\\x99\\x27\\xaf\\xb7\\x52\\x29\\xc8\\x10\\x2a\\x87\\x79\\xa5\\x86\\x84\\x96\\x55\\x1e\\xee\\x2f\\x17\\x5f\\x6f\\xee\\x16\\x90\\x5e\\x3f\\xc0\\x7d\\x3a\\x9b\\xa5\\xd7\\x8b\\x87\\x73\\xd8\\x4a\\xbf\\x34\\x95\\x07\\xdc\\x60\\x0d\\x25\\xd7\\xa5\\x92\\x28\\x60\\xcb\\xac\\x65\\xda\\xef\\xc0\\xe4\\x84\\xf0\\xdb\\x74\\x36\\xf9\\x9a\\x5e\\x2f\\xd2\\x8f\\x97\\x57\\x97\\x8b\\x07\\x30\\x16\\x3e\\x5f\\x2e\\xae\\xa7\\xf3\\x39\\x7c\\xbe\\x99\\x41\\x0a\\xb7\\xe9\\x6c\\x71\\x39\\xb9\\xbb\\x4a\\x67\\x70\\x7b\\x37\\xbb\\xbd\\x99\\x4f\\x13\\x98\\x23\\x59\\x85\\xf4\\xfe\\xf5\\x98\\xe7\\x21\\x7b\\x16\\x41\\xa0\\x67\\x52\\xb9\\x36\\x12\\x0f\\xa6\\x02\\xb7\\x34\\x95\\x12\\xb0\\x64\\x1b\\x04\\x8b\\x1c\\xe5\\x06\\x05\\x30\\xe0\\xa6\\xdc\\xfd\\x74\\x52\\x09\\x8b\\x29\\xa3\\x8b\\xe0\\xf3\\x8b\\x05\\x09\\x97\\x39\\x68\\xe3\\x87\\xe0\\x10\\xe1\\x1f\\x4b\\xef\\xcb\\xf1\\x68\\xb4\\xdd\\x6e\\x93\\x42\\x57\\x89\\xb1\\xc5\\x48\\xd5\\x70\\x6e\\xf4\\xcf\\x24\\x22\\xcc\\xd2\\xa2\\xf3\\xcc\\xe3\\xc2\\x32\\x8e\\x16\\x4c\\xe5\\xcb\\xca\\x3b\\x70\\x55\\x9e\\x4b\\x2e\\x51\\x7b\\x90\\x3a\\x37\\x76\\x1d\\x2a\\x05\\xbc\\x01\\x6e\\x91\\x79\\x04\\x06\\xca\\x70\\xa6\\x00\\x1f\\x91\\x57\\xe1\\xae\\x8e\\x74\\x28\\x57\\xcb\\xb4\\x63\\x3c\\x9c\\xe6\\xd6\\xac\\xc9\\xd7\\xca\\x79\\xfa\\xe1\\x1c\\xae\\x33\\x85\\x02\\x0a\\xd4\\xe8\\xa4\\x83\\x4c\\x19\\xbe\\x4a\\xa2\\x6f\\x51\\xef\\xc0\\x18\\xaa\\x93\\xe0\\x61\\x23\\x14\\x6a\\x63\\x8b\\x7d\\x8b\\x90\\x55\\x52\\x09\\xa9\\x8b\\x24\\xea\\xb5\\xd2\\x63\\xd0\\x95\\x52\\xc3\\x28\\x40\\x28\\x63\\x56\\x55\\x99\\x72\\x6e\\xaa\\x60\\xfb\\x9f\\xc8\\x7d\\x0d\\xe6\\x4a\\xe4\\x32\\xa7\\xe2\\x60\\xdd\\xad\\x37\\xe1\\xaa\\xd3\\x6b\\x32\\x92\\x4f\\xa2\\xde\\x11\\xcc\\x18\\xf2\\x4a\\x07\\x77\\x06\\x4c\\x08\\x3b\\x04\\x91\\xc5\\xdf\\xa2\\x5e\\x6f\\xc3\\x2c\\x61\\xc1\\x05\\x78\\xf3\\x15\\x1f\\xc3\\x65\\x7c\\x1e\\xf5\\x7a\\x32\\x87\\x81\\x5f\\x4a\\x97\\xb4\\xc0\\xbf\\x33\\xce\\xff\\x80\\x8b\\x8b\\x8b\\xd0\\xd4\\xb9\\xd4\\x28\\x62\\x20\\x88\\xde\\x73\\x62\\xf5\\x4d\\x2f\\x63\\x8a\\x69\\x8e\\x63\\xe8\\xbf\\x7f\\xec\\xc3\\x5b\\x10\\x59\\x52\\xa0\\xff\\x58\\x9f\\xd6\\xca\\x12\\x6f\\xe6\\xde\\x4a\\x5d\\x0c\\x3e\\xfc\\x1a\\x0f\\xc3\\x2b\\x6d\\xc2\\x1b\\x68\\xc4\\xaf\\x4d\\x27\\x5c\\xdf\\x73\\x23\\xc2\\x75\\x63\\x73\\x2d\\x35\\x31\\xa2\\x11\\x6a\\xa4\\x9c\\x37\\x96\\x15\\x38\\x86\\x6f\\x4f\\xf4\\xfd\\x44\\x5e\\x3d\\x45\\xbd\\xa7\\xa3\\x28\\xcf\\x6b\\xa1\\x17\\xa2\\xdc\\x40\\x00\\x6a\\x6f\\xbb\\x3a\\x2f\\x24\\x75\\xea\\x61\\x02\\x02\\xde\\x8f\\x92\\x30\\x6f\\x4d\\x39\\x49\\xc2\\x0a\\x77\\xaf\\x67\\x82\\x2e\\xa4\\x78\\xec\\x2e\\x56\\xb8\\x8b\\xcf\\xa3\\x17\\x53\\x94\\x34\\x46\\xff\\x2e\\xc5\\xe3\\xcf\\xe6\\xeb\\xe4\\xcd\\x51\\x5c\\xe7\\x24\\xb5\\xb7\\x37\\x8e\\x4f\\xe2\\x68\\xd1\\x55\\xca\\x53\\xb9\\x4b\\xbd\\x31\\x2b\\x22\\xae\\x25\\xc5\\x47\\xa9\\x10\\x12\\x53\\x52\\xb6\\x5c\\xcd\\x1c\\x19\\xa2\\x06\\xe9\\xd1\\x32\\xa2\\x4e\\xb3\\x41\\x4b\\x53\\x03\\x2c\\xfa\\xca\\x6a\\xd7\\x85\\x31\\x97\\x9a\\xa9\\x16\\xb8\\x89\\xba\\xb7\\x8c\\xd7\\x3d\\x53\\x9f\\x1f\\xc4\\x92\\xfb\\xc7\\x10\\xc5\\xe0\\xdd\\x68\\x04\\xa9\\x07\\x72\\x11\\x4a\\x23\\xb5\\x1f\\xc2\\x16\\x41\\x23\\x0a\\x6a\\x7c\\x81\\xa2\\xe2\\x3e\\xe0\\xf5\\x37\\x4c\\x55\\xd8\\xaf\\x9b\\x9b\\x28\\x32\\x3c\\x35\\x15\\x4d\\x82\\x83\\xe6\\x1f\\x06\\x03\\xd7\\x66\\x13\\x46\\x5c\\xc6\\xf8\\x0a\\x9a\\x86\\x33\\x56\\x16\\x52\\x47\\x4d\\x38\\x8f\\x9a\\x8d\\x2c\\x4a\\x08\\x38\\x98\\x15\\x72\\x45\\x49\\xa4\\x93\\x8f\\x4c\\xc1\\x05\\x64\\xb2\\xb8\\xd4\\xfe\\x24\\x79\\x75\\xd0\\xdb\\xa7\\xf1\\x1f\\x49\\xd3\\x3c\\x89\\x23\\xc2\\x1b\\x9c\\xc5\\x43\\xf8\\xf0\\x6b\\x57\\x11\\xde\\x10\\x14\\xbc\\x0e\\xe6\\xcd\\xcb\\x50\\xd1\\x69\\x31\\x3c\\xff\\x2c\\xa8\\xa1\\x0e\\x7e\\x1b\\xb4\\x26\\xae\\xca\\x28\\x1d\\xb5\\x9f\\x21\\x8e\\xc7\\x5d\\x7c\\xfe\\x03\\xdc\\x63\\xdf\\x5a\\xdc\\x26\\x34\\x09\\x13\\xe2\\x10\\x94\\x3e\\xc3\\x77\\xc1\\xdc\\x9d\\x43\\x01\\x6f\\x81\\xbe\\xa4\\x26\\x55\\x4e\\xf2\\x2f\\xcc\\xc5\\xf0\\x37\\x68\\x24\\x6e\\xad\\xe4\\xdf\\x59\\x52\\xe7\\xf5\\x13\\x72\\x8b\\x6b\\x1a\\x05\\x94\\x3a\\xce\\x94\\x42\\xdb\\x77\\x10\\x88\\x66\\xd8\\xd4\\x60\\x48\\x32\\xae\\x4b\\xbf\\x6b\\x07\\x84\\x67\\xb6\\x40\\xef\\x5e\\xf7\\x26\\xe0\\xbc\\x7b\\xd7\\xf2\\x66\\x88\\xdf\\xae\\x44\\xb8\\xb8\\x80\\xfe\\x64\\x36\\x4d\\x17\\xd3\\x7e\\xd3\\x7b\\xa3\\x11\\xdc\\x63\\x58\\x9f\\x32\\x25\\x33\\xa1\\x76\\x20\\x50\\xa1\\xc7\\xda\\x2e\\xa3\\x43\\x5c\\x3b\\x1e\\x19\\xd2\\x1e\\x44\\x1b\\x0a\\x3e\\x4a\\xe7\\xa5\\x2e\\xa0\\xa6\\x97\\x2d\\x0d\\xe3\\x06\\x2e\\x34\\x16\\x67\\x15\\x85\\xe7\\x74\\x72\\x79\\x43\\x6b\\x88\\x45\\x22\\x23\\x1a\\x1a\\xa1\\x47\\x99\\x92\\xdd\\xda\\x92\\x4b\\xeb\\x3c\\x94\\x8a\\x71\\x4c\\x08\\xaf\\x33\\xe6\\xe5\\xa2\\x68\\xda\\x9f\\x54\\xcf\\x42\\xdf\\x06\\xa0\\xfd\\x54\\x64\\x8a\\xa6\\x2a\\xa9\\x77\\x30\\x68\\x31\\xe2\\xa8\\xd7\\xb3\\xad\\xf4\\x01\\xf6\\xf9\\x9e\\x47\\x9c\\xc7\\xf2\\x90\\x45\\x68\\x1b\\xc1\\x0d\\x12\\xef\\x06\\x0a\\xa9\\x27\\x28\\xe9\\xfa\\xd7\\x6f\\xcd\\xc8\\x46\\x97\\x44\\x3d\\x7a\\x77\\x40\\x06\\xca\\x14\\xc7\\x64\\x20\\xea\\xb0\\xf0\\xca\\x5a\\xca\\x7f\\xc7\\xdb\\x39\\x11\\xc3\\x9f\\x95\\xf3\\x14\\x53\\x4b\\xe1\\x69\\x28\\xe6\\x39\\x66\\x0d\\x3c\\x4a\\x23\\x3a\\xfe\\x9e\\x41\\x69\\xd8\\x85\\xe1\\x42\\xea\\x9a\\xd1\\x56\\xaf\\x80\\xa5\\xf1\\xa8\\xbd\\x64\\x4a\\xed\\x28\\x0f\\x5b\\x4b\\xbb\\x0f\\x6d\\x3b\\x43\\x70\\x92\\xa4\\x02\\x4d\\x05\\x51\\xa9\\xb9\\xaa\\x44\\x5d\\x06\\xa1\\xf8\\x1b\\x3c\\x17\\x6c\\x3e\\x5e\\x9a\\xd6\\xe8\\x1c\\x2b\\x30\\xa1\\x4a\\xca\\xe5\\x63\\xb3\\x76\\x6a\\xe8\\xd7\\xcc\\x38\\x88\\xfb\\x49\\x67\\xe4\\x31\\x2f\\x29\\x53\\x24\\x6d\\x91\\x11\\xb7\\xa7\\x42\\x58\\x74\\x6e\\x10\\x37\\x44\\xd5\\x65\\xf6\\x7e\\x89\\x9a\\x82\\x0f\\x1a\\xb7\\xd0\\xed\\x33\\x8c\\x73\\xda\\xef\\xc4\\x10\\x98\\x10\\xc4\\x87\\x27\\xbb\\x47\\xd4\\xeb\\xb9\\xad\\xf4\\x7c\\x09\\x41\\x93\\x29\\xf7\\xbd\\x18\\x37\\xf5\\xcf\\x99\\x43\\x78\\x33\\xfd\\xf7\\x62\\x72\\xf3\\x69\\x3a\\xb9\\xb9\\x7d\\x78\\x33\\x86\\xa3\\xb3\\xf9\\xe5\\x7f\\xa6\\xdd\\xd9\\xc7\\xf4\\x2a\\xbd\\x9e\\x4c\\xdf\\x8c\\xc3\\x40\\x7f\\xc6\\x21\\x6f\\x5a\\x17\\x48\\xa1\\xf3\\x8c\\xaf\\x92\\x12\\x71\\x35\\x78\\x7f\\xcc\\x03\\x7b\\x07\\x7b\\xbd\\xcc\\x22\\x5b\\x9d\\xef\\x8d\\xa9\\x1b\\xb4\\xd1\\xd1\\xf2\\x34\\x5c\\xc0\\x8b\\xc1\\x3a\\x7f\\xd9\\x9a\\x49\\x23\\x3f\\x68\\xd9\\x7f\\xbf\\xbf\\x04\\xaa\\x78\\xdd\\x8e\\xb3\\xbf\\x6c\\x48\\xe8\\x1d\\xc6\\x57\\x63\\x70\\x4c\\xd1\\xda\\x2c\\xff\\x4b\\x7f\\x77\\xf2\\xdc\\xa1\\x1f\\x02\\x6a\\x61\\xb6\\xc4\\x7c\\x1d\\x6a\\x7d\\xd3\\xe0\\x1e\\x84\\xec\\x43\\x5c\\xd3\\xee\\x4d\\x3e\\x88\\x3b\\x61\\x02\\xfb\\x5e\\xf4\\xec\\x39\\x51\\xd4\\x02\\x2e\\x5a\\xf4\\xb7\\xe1\\xe5\\xeb\\x81\\x3a\\x6b\\x22\\x75\\xa2\\xe0\\x97\\x93\\xb5\\x30\\xdc\\xaf\\x71\\x6d\\xec\\xae\\x99\\x61\\x07\\xfe\\xfd\\x38\\xaa\\xe9\\xd5\\x55\\x57\\x4f\\xf4\\x41\\x45\\xd6\\x1d\\x7c\\x9a\\x5e\\x4d\\xbf\\xa4\\x8b\\xe9\\x91\\xd4\\x7c\\x91\\x2e\\x2e\\x27\\xf5\\xd1\\x5f\\x2e\\xbc\\x0f\\x3f\\x5d\\x78\\xfd\\xf9\\x7c\\x71\\x33\\x9b\\xf6\\xc7\\xcd\\xd7\\xd5\\x4d\\xfa\\xa9\\xff\\x9d\\xc2\\x66\\x75\\xfc\\x51\\xeb\\x7a\\x73\\x6f\\xac\\xf8\\x7f\\x3a\\xe0\\x60\\x8d\\xcb\\xd9\\x73\\x5b\\x5c\\xa0\\x76\\xee\\xab\\x93\\x7f\\x49\\xc0\\x74\\xcb\\xca\\x79\\xfd\\x4f\\xb1\\x17\\xde\\x3f\\xcb\\xc3\\x4f\\xd1\\x53\\xf4\\xbf\\x00\\x00\\x00\\xff\\xff\\x3a\\xb7\\x37\\x41\\xbf\\x10\\x00\\x00\")\n\nfunc prestate_tracerJsBytes() ([]byte, error) {\n\treturn bindataRead(\n\t\t_prestate_tracerJs,\n\t\t\"prestate_tracer.js\",\n\t)\n}\n\nfunc prestate_tracerJs() (*asset, error) {\n\tbytes, err := prestate_tracerJsBytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo := bindataFileInfo{name: \"prestate_tracer.js\", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}\n\ta := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd4, 0x9, 0xf9, 0x44, 0x13, 0x31, 0x89, 0xf7, 0x35, 0x9a, 0xc6, 0xf0, 0x86, 0x9d, 0xb2, 0xe3, 0x57, 0xe2, 0xc0, 0xde, 0xc9, 0x3a, 0x4c, 0x4a, 0x94, 0x90, 0xa5, 0x92, 0x2f, 0xbf, 0xc0, 0xb8}}\n\treturn a, nil\n}\n\nvar _trigram_tracerJs = []byte(\"\\x1f\\x8b\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\x8c\\x94\\x4f\\x6f\\xe3\\x36\\x10\\xc5\\xef\\xfe\\x14\\xaf\\x27\\x27\\x88\\xd7\\x4a\\xda\\x4b\\xe1\\xd4\\x05\\xdc\\x6c\\xb2\\x6b\\x20\\x6b\\x07\\xb6\\xd2\\x45\\x10\\xe4\\x40\\x4b\\x23\\x89\\x08\\x4d\\x0a\\xe4\\xd0\\x5e\\x21\\xc8\\x77\\x2f\\xa8\\x3f\\xfe\\x13\\xb8\\xed\\xfa\\x64\\x70\\xe6\\xfd\\xe6\\xcd\\x70\\xc4\\x28\\xc2\\x8d\\x29\\x2b\\x2b\\xf3\\x82\\xf1\\xeb\\xe5\\xd5\\xef\\x88\\x0b\\x42\\x6e\\x3e\\x11\\x17\\x64\\xc9\\xaf\\x31\\xf1\\x5c\\x18\\xeb\\x7a\\x51\\x84\\xb8\\x90\\x0e\\x99\\x54\\x04\\xe9\\x50\\x0a\\xcb\\x30\\x19\\xf8\\x43\\xbe\\x92\\x2b\\x2b\\x6c\\x35\\xec\\x45\\x51\\xa3\\x39\\x19\\x0e\\x84\\xcc\\x12\\xc1\\x99\\x8c\\xb7\\xc2\\xd2\\x08\\x95\\xf1\\x48\\x84\\x86\\xa5\\x54\\x3a\\xb6\\x72\\xe5\\x99\\x20\\x19\\x42\\xa7\\x91\\xb1\\x58\\x9b\\x54\\x66\\x55\\x40\\x4a\\x86\\xd7\\x29\\xd9\\xba\\x34\\x93\\x5d\\xbb\\xce\\xc7\\x97\\xd9\\x23\\xee\\xc9\\x39\\xb2\\xf8\\x42\\x9a\\xac\\x50\\x78\\xf0\\x2b\\x25\\x13\\xdc\\xcb\\x84\\xb4\\x23\\x08\\x87\\x32\\x9c\\xb8\\x82\\x52\\xac\\x6a\\x5c\\x10\\xde\\x05\\x2b\\xcb\\xd6\\x0a\\xee\\x8c\\xd7\\xa9\\x60\\x69\\xf4\\x00\\x24\\x83\\x73\\x6c\\xc8\\x3a\\x69\\x34\\x7e\\xeb\\x4a\\xb5\\xc0\\x01\\x8c\\x0d\\x90\\x33\\xc1\\xa1\\x01\\x0b\\x53\\x06\\xdd\\x39\\x84\\xae\\xa0\\x04\\xef\\xa5\\x3f\\x31\\x90\\x7d\\xdf\\x29\\xa4\\xae\\xcb\\x14\\xa6\\x24\\x70\\x21\\x38\\x74\\xbd\\x95\\x4a\\x61\\x45\\xf0\\x8e\\x32\\xaf\\x06\\x81\\xb6\\xf2\\x8c\\xef\\xd3\\xf8\\xeb\\xfc\\x31\\xc6\\x64\\xf6\\x84\\xef\\x93\\xc5\\x62\\x32\\x8b\\x9f\\xae\\xb1\\x95\\x5c\\x18\\xcf\\xa0\\x0d\\x35\\x28\\xb9\\x2e\\x95\\xa4\\x14\\x5b\\x61\\xad\\xd0\\x5c\\xc1\\x64\\x81\\xf0\\xed\\x76\\x71\\xf3\\x75\\x32\\x8b\\x27\\x7f\\x4d\\xef\\xa7\\xf1\\x13\\x8c\\xc5\\xdd\\x34\\x9e\\xdd\\x2e\\x97\\xb8\\x9b\\x2f\\x30\\xc1\\xc3\\x64\\x11\\x4f\\x6f\\x1e\\xef\\x27\\x0b\\x3c\\x3c\\x2e\\x1e\\xe6\\xcb\\xdb\\x21\\x96\\x14\\x5c\\x51\\xd0\\xff\\xff\\xcc\\xb3\\xfa\\xf6\\x2c\\x21\\x25\\x16\\x52\\xb9\\x6e\\x12\\x4f\\xc6\\xc3\\x15\\xc6\\xab\\x14\\x85\\xd8\\x10\\x2c\\x25\\x24\\x37\\x94\\x42\\x20\\x31\\x65\\xf5\\xd3\\x97\\x1a\\x58\\x42\\x19\\x9d\\xd7\\x3d\\xff\\xeb\\x42\\x62\\x9a\\x41\\x1b\\x1e\\xc0\\x11\\xe1\\x8f\\x82\\xb9\\x1c\\x45\\xd1\\x76\\xbb\\x1d\\xe6\\xda\\x0f\\x8d\\xcd\\x23\\xd5\\xe0\\x5c\\xf4\\xe7\\xb0\\xd7\\x7b\\xeb\\x01\\x40\\x14\\xa1\\x90\\x8e\\xc3\\xe5\\x04\\xec\\x5a\\x94\\xb5\\x2b\\x2b\\x73\\x2b\\xd6\\x48\\x8c\\xd7\\x4c\\xd6\\xd5\\xa9\\x21\\x6f\\x84\\xb7\\xf7\\x41\\x27\\x54\\xc2\\xf1\\xbc\\x0c\\xd2\\xf0\\x0f\\xa6\\x24\\x5b\\xef\\x54\\x1d\\x6f\\x82\\x6e\\x84\\xe7\\x7e\\x7f\\xd0\\xef\\xbf\\x0c\\x76\\xa7\\x9f\\xa9\\xe4\\x62\\x84\\xcb\\xe6\\xa4\\x65\\x39\\xa6\\x9a\\x24\\xf5\\xc6\\xbc\\x52\\x5a\\x8f\\x94\\x36\\x64\\x2b\\x98\\x32\\x31\\x69\\xbb\\x22\\xc1\\xe2\\xdf\\xdf\\x40\\x3f\\x28\\xf1\\x4c\\x6e\\x58\\x13\\x82\\x74\\x84\\xcc\\xeb\\x24\\x14\\x3f\\x53\\x26\\x1f\\x20\\x5d\\x9d\\xe3\\x6d\\xc7\\xdf\\x08\\x8b\\x34\\x54\\xc5\\x18\\xca\\xe4\\xc3\\x9c\\x1a\\x13\\x67\\xe7\\xd7\\xbb\\x1c\\x99\\xe1\\xac\\xc9\\xf9\\x65\\x0c\\x2e\\xa4\\x1b\\xee\\xbc\\x9e\\xef\\x49\\xe1\\xb7\\x0b\\xce\\x4b\\x87\\x71\\xd7\\xdf\\xf5\\xe9\\x9c\\xcf\\x6d\\xd9\\x1a\\x7d\\x9c\\x63\\x89\\xbd\\xd5\\xfb\\xb3\\xf7\\x23\\xbf\\xa6\\x6c\\xcd\\x9a\\x72\\xc8\\x66\\xc9\\x56\\xea\\xfc\\xd0\\x6f\\xc8\\x79\\xa5\\x0a\\xe3\\x23\\x3f\\xcf\\x97\\x2f\\x17\\xfd\\x4f\\xfd\\x8b\\xa3\\xb3\\xab\\xe6\\xcc\\x94\\xc7\\xdd\\xd6\\x39\\xe1\\x52\\x9f\\x5f\\xa9\\x7a\\x39\\xd5\\xe4\\x2e\\x78\\x71\\x71\\xca\\x26\\x29\\x47\\xf8\\x2f\\x19\\xc6\\xb8\\x3a\\x25\\xfc\\xe0\\xf8\\x63\\x0f\\x57\\x07\\xc3\\xfc\\x10\\xc0\\x18\\x5d\\x1b\\xfb\\x3d\\xcc\\x84\\x57\\x7c\\xb8\\x3c\\xdb\\xa2\\x7d\\x11\\x44\\xc2\\x5e\\xa8\\x76\\x5f\\xc2\\xeb\\x66\\x32\\x08\\xdd\\xad\\x54\\xd6\\x7c\\xab\\x81\\x52\\x23\\x4e\\x2e\\xd1\\xbe\\x8c\\x25\\x77\\xaa\\x8e\\x50\\xaa\\xae\\xd5\\x40\\x5d\\xf3\\xa5\\xaf\\x88\\x34\\x24\\x87\\x0f\\x82\\x52\\x98\\x0d\\xd9\\xf0\\xca\\xb7\\x57\\xee\\x3a\\x62\\x90\\x65\\x52\\x0b\\xd5\\xb1\\xdb\\x07\\x81\\xad\\x48\\xa4\\xce\\x1b\\x6b\\x4d\\xe8\\xc0\\x5b\\xc2\\x3f\\x0e\\x97\\xbb\\x61\\xee\\x27\\xbf\\x9b\\xce\\x7b\\xef\\x9f\\x00\\x00\\x00\\xff\\xff\\xb3\\x93\\x16\\xd5\\xfc\\x06\\x00\\x00\")\n\nfunc trigram_tracerJsBytes() ([]byte, error) {\n\treturn bindataRead(\n\t\t_trigram_tracerJs,\n\t\t\"trigram_tracer.js\",\n\t)\n}\n\nfunc trigram_tracerJs() (*asset, error) {\n\tbytes, err := trigram_tracerJsBytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo := bindataFileInfo{name: \"trigram_tracer.js\", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}\n\ta := &asset{bytes: bytes, info: info, digest: [32]uint8{0x40, 0x63, 0xe1, 0x42, 0x60, 0x7, 0x1b, 0x79, 0x47, 0x1, 0xa1, 0xbf, 0xc4, 0x66, 0x19, 0x9b, 0x2b, 0x5a, 0x1f, 0x82, 0x3d, 0xcf, 0xee, 0xe7, 0x60, 0x25, 0x2c, 0x4f, 0x13, 0x97, 0xc7, 0x18}}\n\treturn a, nil\n}\n\nvar _unigram_tracerJs = []byte(\"\\x1f\\x8b\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\xff\\x8c\\x94\\x41\\x6f\\xdb\\xc6\\x13\\xc5\\xef\\xfa\\x14\\xef\\x68\\x23\\xfa\\x8b\\xc9\\xbf\\x97\\x42\\x69\\x0a\\xb0\\x86\\x9d\\x08\\x70\\x64\\x43\\xa2\\x1b\\x18\\x45\\x0f\\x4b\\x72\\x48\\x2e\\xba\\xda\\x21\\x76\\x67\\xa5\\x08\\x81\\xbf\\x7b\\x31\\xa4\\x68\\xb9\\x85\\xdb\\x86\\x27\\x41\\x3b\\xef\\x37\\x6f\\xde\\x0e\\x99\\x65\\xb8\\xe2\\xfe\\x18\\x6c\\xdb\\x09\\xfe\\xff\\xf6\\xdd\\x8f\\x28\\x3a\\x42\\xcb\\xff\\x23\\xe9\\x28\\x50\\xda\\x21\\x4f\\xd2\\x71\\x88\\xb3\\x2c\\x43\\xd1\\xd9\\x88\\xc6\\x3a\\x82\\x8d\\xe8\\x4d\\x10\\x70\\x03\\xf9\\x5b\\xbd\\xb3\\x65\\x30\\xe1\\xb8\\x98\\x65\\xd9\\xa8\\x79\\xf5\\x58\\x09\\x4d\\x20\\x42\\xe4\\x46\\x0e\\x26\\xd0\\x12\\x47\\x4e\\xa8\\x8c\\x47\\xa0\\xda\\x46\\x09\\xb6\\x4c\\x42\\xb0\\x02\\xe3\\xeb\\x8c\\x03\\x76\\x5c\\xdb\\xe6\\xa8\\x48\\x2b\\x48\\xbe\\xa6\\x30\\xb4\\x16\\x0a\\xbb\\x38\\xf9\\xf8\\xb8\\x7e\\xc0\\x2d\\xc5\\x48\\x01\\x1f\\xc9\\x53\\x30\\x0e\\xf7\\xa9\\x74\\xb6\\xc2\\xad\\xad\\xc8\\x47\\x82\\x89\\xe8\\xf5\\x9f\\xd8\\x51\\x8d\\x72\\xc0\\xa9\\xf0\\x46\\xad\\x6c\\x4f\\x56\\x70\\xc3\\xc9\\xd7\\x46\\x2c\\xfb\\x39\\xc8\\xaa\\x73\\xec\\x29\\x44\\xcb\\x1e\\x3f\\x4c\\xad\\x4e\\xc0\\x39\\x38\\x28\\xe4\\xc2\\x88\\x0e\\x10\\xc0\\xbd\\xea\\x2e\\x61\\xfc\\x11\\xce\\xc8\\x59\\xfa\\x1d\\x81\\x9c\\xe7\\xae\\x61\\xfd\\xd0\\xa6\\xe3\\x9e\\x20\\x9d\\x11\\x9d\\xfa\\x60\\x9d\\x43\\x49\\x48\\x91\\x9a\\xe4\\xe6\\x4a\\x2b\\x93\\xe0\\xcb\\xaa\\xf8\\x74\\xf7\\x50\\x20\\x5f\\x3f\\xe2\\x4b\\xbe\\xd9\\xe4\\xeb\\xe2\\xf1\\x3d\\x0e\\x56\\x3a\\x4e\\x02\\xda\\xd3\\x88\\xb2\\xbb\\xde\\x59\\xaa\\x71\\x30\\x21\\x18\\x2f\\x47\\x70\\xa3\\x84\\xcf\\xd7\\x9b\\xab\\x4f\\xf9\\xba\\xc8\\x7f\\x59\\xdd\\xae\\x8a\\x47\\x70\\xc0\\xcd\\xaa\\x58\\x5f\\x6f\\xb7\\xb8\\xb9\\xdb\\x20\\xc7\\x7d\\xbe\\x29\\x56\\x57\\x0f\\xb7\\xf9\\x06\\xf7\\x0f\\x9b\\xfb\\xbb\\xed\\xf5\\x02\\x5b\\x52\\x57\\xa4\\xfa\\xff\\xce\\xbc\\x19\\x6e\\x2f\\x10\\x6a\\x12\\x63\\x5d\\x9c\\x92\\x78\\xe4\\x84\\xd8\\x71\\x72\\x35\\x3a\\xb3\\x27\\x04\\xaa\\xc8\\xee\\xa9\\x86\\x41\\xc5\\xfd\\xf1\\xbb\\x2f\\x55\\x59\\xc6\\xb1\\x6f\\x87\\x99\\xff\\x71\\x21\\xb1\\x6a\\xe0\\x59\\xe6\\x88\\x44\\xf8\\xa9\\x13\\xe9\\x97\\x59\\x76\\x38\\x1c\\x16\\xad\\x4f\\x0b\\x0e\\x6d\\xe6\\x46\\x5c\\xcc\\x7e\\x5e\\xcc\\x66\\xdf\\x66\\x00\\x90\\x65\\xe8\\x6c\\x14\\xbd\\x1c\\xc5\\xee\\x4c\\xaf\\xae\\xb8\\xaf\\xb8\\xa6\\x08\\x61\\x54\\x9c\\xbc\\x50\\x88\\x43\\xb5\\x96\\x2e\\xf1\\xed\\x69\\x3e\\x69\\x3d\\xf7\\x71\\x2c\\x89\\xf0\\x69\\x57\\x52\\x18\\xe5\\x63\\xb9\\x9e\\x2e\\xf1\\xf6\\xb9\\x3a\\x0a\\xf5\\xda\\xc9\\xfa\\x3d\\xff\\x41\\xf5\\x90\\x1b\\xed\\x29\\x1c\\x4f\\x0d\\xc7\\x3d\\x50\\x1f\\xbf\\x7e\\x06\\x7d\\xa5\\x2a\\x09\\xc5\\xc5\\xa0\\x56\\xe9\\x12\\x4d\\xf2\\x95\\x6e\\xdf\\x85\\xe3\\x76\\x8e\\xba\\xbc\\xc4\\x38\\x85\\x3e\\x7b\\xa3\\xbb\\x89\\x0f\\x70\\xdc\\x2e\\xb8\\x5f\\x08\\x6f\\x25\\x58\\xdf\\x5e\\x5c\\xbe\\x7f\\xae\\xb1\\x0d\\x2e\\xa4\\xb3\\x71\\xa1\\x83\\xfc\\xc6\\xfd\\xef\\x97\\x67\\xbd\\x3e\\x7f\\x39\\x7b\\xf3\\xe6\\x2c\\x7c\\x7a\\xfe\\x45\\x2e\\x12\\xfe\\x45\\x85\\x0f\\x78\\xf7\\x9a\\x6e\\x28\\xd2\\x40\\x26\\xec\\x39\\xc4\\xc6\\x24\\x27\\x2f\\x73\\x39\\x74\\xa7\\x8d\\x36\\x95\\x24\\xe3\\x4e\\x51\\xe8\\xdb\\xc9\\x0d\\x8c\\x9f\\xd2\\x6a\\xc6\\x5d\\x53\\xca\\x80\\x78\\x35\\x9f\\xa7\\xf9\\x6c\\xea\\x13\\x28\\xbe\\xd6\\xc8\\x38\\x37\\x34\\x9b\\x2e\\x7d\\x58\\xd5\\x92\\xc8\\xc3\\x0a\\x05\\xa3\\xef\\x2a\\xef\\x29\\xe8\\x67\\x0a\\x81\\x24\\x05\\x1f\\x27\\xa2\\xca\\x1a\\xeb\\x8d\\x9b\\xd8\\xa7\\x8d\\x96\\x60\\x2a\\xeb\\xdb\\xd1\\xdb\\x78\\xf4\\xc2\\x5c\\x25\\x5f\\x5f\\x5e\\xdc\\xc8\\x3c\\xa7\\xf8\\x1c\\xcf\\xd3\\xec\\xcf\\x00\\x00\\x00\\xff\\xff\\xf1\\x91\\x30\\xae\\xbd\\x05\\x00\\x00\")\n\nfunc unigram_tracerJsBytes() ([]byte, error) {\n\treturn bindataRead(\n\t\t_unigram_tracerJs,\n\t\t\"unigram_tracer.js\",\n\t)\n}\n\nfunc unigram_tracerJs() (*asset, error) {\n\tbytes, err := unigram_tracerJsBytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo := bindataFileInfo{name: \"unigram_tracer.js\", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}\n\ta := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc, 0xe6, 0x5c, 0x88, 0x18, 0xa7, 0x85, 0x61, 0x18, 0xc6, 0xec, 0x17, 0xfc, 0xdf, 0x9d, 0xc0, 0x1b, 0x49, 0xf8, 0x8d, 0xf1, 0xeb, 0x35, 0xf3, 0xd, 0x3e, 0xf6, 0xa3, 0xac, 0x8c, 0xba, 0x74}}\n\treturn a, nil\n}\n\n// Asset loads and returns the asset for the given name.\n// It returns an error if the asset could not be found or\n// could not be loaded.\nfunc Asset(name string) ([]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Asset %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.bytes, nil\n\t}\n\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n}\n\n// AssetString returns the asset contents as a string (instead of a []byte).\nfunc AssetString(name string) (string, error) {\n\tdata, err := Asset(name)\n\treturn string(data), err\n}\n\n// MustAsset is like Asset but panics when Asset would return an error.\n// It simplifies safe initialization of global variables.\nfunc MustAsset(name string) []byte {\n\ta, err := Asset(name)\n\tif err != nil {\n\t\tpanic(\"asset: Asset(\" + name + \"): \" + err.Error())\n\t}\n\n\treturn a\n}\n\n// MustAssetString is like AssetString but panics when Asset would return an\n// error. It simplifies safe initialization of global variables.\nfunc MustAssetString(name string) string {\n\treturn string(MustAsset(name))\n}\n\n// AssetInfo loads and returns the asset info for the given name.\n// It returns an error if the asset could not be found or\n// could not be loaded.\nfunc AssetInfo(name string) (os.FileInfo, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"AssetInfo %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.info, nil\n\t}\n\treturn nil, fmt.Errorf(\"AssetInfo %s not found\", name)\n}\n\n// AssetDigest returns the digest of the file with the given name. It returns an\n// error if the asset could not be found or the digest could not be loaded.\nfunc AssetDigest(name string) ([sha256.Size]byte, error) {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\tif f, ok := _bindata[canonicalName]; ok {\n\t\ta, err := f()\n\t\tif err != nil {\n\t\t\treturn [sha256.Size]byte{}, fmt.Errorf(\"AssetDigest %s can't read by error: %v\", name, err)\n\t\t}\n\t\treturn a.digest, nil\n\t}\n\treturn [sha256.Size]byte{}, fmt.Errorf(\"AssetDigest %s not found\", name)\n}\n\n// Digests returns a map of all known files and their checksums.\nfunc Digests() (map[string][sha256.Size]byte, error) {\n\tmp := make(map[string][sha256.Size]byte, len(_bindata))\n\tfor name := range _bindata {\n\t\ta, err := _bindata[name]()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmp[name] = a.digest\n\t}\n\treturn mp, nil\n}\n\n// AssetNames returns the names of the assets.\nfunc AssetNames() []string {\n\tnames := make([]string, 0, len(_bindata))\n\tfor name := range _bindata {\n\t\tnames = append(names, name)\n\t}\n\treturn names\n}\n\n// _bindata is a table, holding each asset generator, mapped to its name.\nvar _bindata = map[string]func() (*asset, error){\n\t\"4byte_tracer.js\":    _4byte_tracerJs,\n\t\"bigram_tracer.js\":   bigram_tracerJs,\n\t\"call_tracer.js\":     call_tracerJs,\n\t\"evmdis_tracer.js\":   evmdis_tracerJs,\n\t\"noop_tracer.js\":     noop_tracerJs,\n\t\"opcount_tracer.js\":  opcount_tracerJs,\n\t\"prestate_tracer.js\": prestate_tracerJs,\n\t\"trigram_tracer.js\":  trigram_tracerJs,\n\t\"unigram_tracer.js\":  unigram_tracerJs,\n}\n\n// AssetDebug is true if the assets were built with the debug flag enabled.\nconst AssetDebug = false\n\n// AssetDir returns the file names below a certain\n// directory embedded in the file by go-bindata.\n// For example if you run go-bindata on data/... and data contains the\n// following hierarchy:\n//     data/\n//       foo.txt\n//       img/\n//         a.png\n//         b.png\n// then AssetDir(\"data\") would return []string{\"foo.txt\", \"img\"},\n// AssetDir(\"data/img\") would return []string{\"a.png\", \"b.png\"},\n// AssetDir(\"foo.txt\") and AssetDir(\"notexist\") would return an error, and\n// AssetDir(\"\") will return []string{\"data\"}.\nfunc AssetDir(name string) ([]string, error) {\n\tnode := _bintree\n\tif len(name) != 0 {\n\t\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\t\tpathList := strings.Split(canonicalName, \"/\")\n\t\tfor _, p := range pathList {\n\t\t\tnode = node.Children[p]\n\t\t\tif node == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n\t\t\t}\n\t\t}\n\t}\n\tif node.Func != nil {\n\t\treturn nil, fmt.Errorf(\"Asset %s not found\", name)\n\t}\n\trv := make([]string, 0, len(node.Children))\n\tfor childName := range node.Children {\n\t\trv = append(rv, childName)\n\t}\n\treturn rv, nil\n}\n\ntype bintree struct {\n\tFunc     func() (*asset, error)\n\tChildren map[string]*bintree\n}\n\nvar _bintree = &bintree{nil, map[string]*bintree{\n\t\"4byte_tracer.js\":    {_4byte_tracerJs, map[string]*bintree{}},\n\t\"bigram_tracer.js\":   {bigram_tracerJs, map[string]*bintree{}},\n\t\"call_tracer.js\":     {call_tracerJs, map[string]*bintree{}},\n\t\"evmdis_tracer.js\":   {evmdis_tracerJs, map[string]*bintree{}},\n\t\"noop_tracer.js\":     {noop_tracerJs, map[string]*bintree{}},\n\t\"opcount_tracer.js\":  {opcount_tracerJs, map[string]*bintree{}},\n\t\"prestate_tracer.js\": {prestate_tracerJs, map[string]*bintree{}},\n\t\"trigram_tracer.js\":  {trigram_tracerJs, map[string]*bintree{}},\n\t\"unigram_tracer.js\":  {unigram_tracerJs, map[string]*bintree{}},\n}}\n\n// RestoreAsset restores an asset under the given directory.\nfunc RestoreAsset(dir, name string) error {\n\tdata, err := Asset(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfo, err := AssetInfo(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())\n}\n\n// RestoreAssets restores an asset under the given directory recursively.\nfunc RestoreAssets(dir, name string) error {\n\tchildren, err := AssetDir(name)\n\t// File\n\tif err != nil {\n\t\treturn RestoreAsset(dir, name)\n\t}\n\t// Dir\n\tfor _, child := range children {\n\t\terr = RestoreAssets(dir, filepath.Join(name, child))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc _filePath(dir, name string) string {\n\tcanonicalName := strings.Replace(name, \"\\\\\", \"/\", -1)\n\treturn filepath.Join(append([]string{dir}, strings.Split(canonicalName, \"/\")...)...)\n}\n"
  },
  {
    "path": "eth/tracers/internal/tracers/bigram_tracer.js",
    "content": "// Copyright 2018 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n{\n    // hist is the counters of opcode bigrams\n    hist: {},\n    // lastOp is last operation\n    lastOp: '',\n    // execution depth of last op\n    lastDepth: 0,\n    // step is invoked for every opcode that the VM executes.\n    step: function(log, db) {\n        var op = log.op.toString();\n        var depth = log.getDepth();\n        if (depth == this.lastDepth){\n            var key = this.lastOp+'-'+op;\n            if (this.hist[key]){\n                this.hist[key]++;\n            }\n            else {\n                this.hist[key] = 1;\n            }\n        }\n        this.lastOp = op;\n        this.lastDepth = depth;\n    },\n    // fault is invoked when the actual execution of an opcode fails.\n    fault: function(log, db) {},\n    // result is invoked when all the opcodes have been iterated over and returns\n    // the final result of the tracing.\n    result: function(ctx) {\n        return this.hist;\n    },\n}\n"
  },
  {
    "path": "eth/tracers/internal/tracers/call_tracer.js",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// callTracer is a full blown transaction tracer that extracts and reports all\n// the internal calls made by a transaction, along with any useful information.\n{\n\t// callstack is the current recursive call stack of the EVM execution.\n\tcallstack: [{}],\n\n\t// descended tracks whether we've just descended from an outer transaction into\n\t// an inner call.\n\tdescended: false,\n\n\t// step is invoked for every opcode that the VM executes.\n\tstep: function(log, db) {\n\t\t// Capture any errors immediately\n\t\tvar error = log.getError();\n\t\tif (error !== undefined) {\n\t\t\tthis.fault(log, db);\n\t\t\treturn;\n\t\t}\n\t\t// We only care about system opcodes, faster if we pre-check once\n\t\tvar syscall = (log.op.toNumber() & 0xf0) == 0xf0;\n\t\tif (syscall) {\n\t\t\tvar op = log.op.toString();\n\t\t}\n\t\t// If a new contract is being created, add to the call stack\n\t\tif (syscall && (op == 'CREATE' || op == \"CREATE2\")) {\n\t\t\tvar inOff = log.stack.peek(1).valueOf();\n\t\t\tvar inEnd = inOff + log.stack.peek(2).valueOf();\n\n\t\t\t// Assemble the internal call report and store for completion\n\t\t\tvar call = {\n\t\t\t\ttype:    op,\n\t\t\t\tfrom:    toHex(log.contract.getAddress()),\n\t\t\t\tinput:   toHex(log.memory.slice(inOff, inEnd)),\n\t\t\t\tgasIn:   log.getGas(),\n\t\t\t\tgasCost: log.getCost(),\n\t\t\t\tvalue:   '0x' + log.stack.peek(0).toString(16)\n\t\t\t};\n\t\t\tthis.callstack.push(call);\n\t\t\tthis.descended = true\n\t\t\treturn;\n\t\t}\n\t\t// If a contract is being self destructed, gather that as a subcall too\n\t\tif (syscall && op == 'SELFDESTRUCT') {\n\t\t\tvar left = this.callstack.length;\n\t\t\tif (this.callstack[left-1].calls === undefined) {\n\t\t\t\tthis.callstack[left-1].calls = [];\n\t\t\t}\n\t\t\tthis.callstack[left-1].calls.push({\n\t\t\t\ttype:    op,\n\t\t\t\tfrom:    toHex(log.contract.getAddress()),\n\t\t\t\tto:      toHex(toAddress(log.stack.peek(0).toString(16))),\n\t\t\t\tgasIn:   log.getGas(),\n\t\t\t\tgasCost: log.getCost(),\n\t\t\t\tvalue:   '0x' + db.getBalance(log.contract.getAddress()).toString(16)\n\t\t\t});\n\t\t\treturn\n\t\t}\n\t\t// If a new method invocation is being done, add to the call stack\n\t\tif (syscall && (op == 'CALL' || op == 'CALLCODE' || op == 'DELEGATECALL' || op == 'STATICCALL')) {\n\t\t\t// Skip any pre-compile invocations, those are just fancy opcodes\n\t\t\tvar to = toAddress(log.stack.peek(1).toString(16));\n\t\t\tif (isPrecompiled(to)) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvar off = (op == 'DELEGATECALL' || op == 'STATICCALL' ? 0 : 1);\n\n\t\t\tvar inOff = log.stack.peek(2 + off).valueOf();\n\t\t\tvar inEnd = inOff + log.stack.peek(3 + off).valueOf();\n\n\t\t\t// Assemble the internal call report and store for completion\n\t\t\tvar call = {\n\t\t\t\ttype:    op,\n\t\t\t\tfrom:    toHex(log.contract.getAddress()),\n\t\t\t\tto:      toHex(to),\n\t\t\t\tinput:   toHex(log.memory.slice(inOff, inEnd)),\n\t\t\t\tgasIn:   log.getGas(),\n\t\t\t\tgasCost: log.getCost(),\n\t\t\t\toutOff:  log.stack.peek(4 + off).valueOf(),\n\t\t\t\toutLen:  log.stack.peek(5 + off).valueOf()\n\t\t\t};\n\t\t\tif (op != 'DELEGATECALL' && op != 'STATICCALL') {\n\t\t\t\tcall.value = '0x' + log.stack.peek(2).toString(16);\n\t\t\t}\n\t\t\tthis.callstack.push(call);\n\t\t\tthis.descended = true\n\t\t\treturn;\n\t\t}\n\t\t// If we've just descended into an inner call, retrieve it's true allowance. We\n\t\t// need to extract if from within the call as there may be funky gas dynamics\n\t\t// with regard to requested and actually given gas (2300 stipend, 63/64 rule).\n\t\tif (this.descended) {\n\t\t\tif (log.getDepth() >= this.callstack.length) {\n\t\t\t\tthis.callstack[this.callstack.length - 1].gas = log.getGas();\n\t\t\t} else {\n\t\t\t\t// TODO(karalabe): The call was made to a plain account. We currently don't\n\t\t\t\t// have access to the true gas amount inside the call and so any amount will\n\t\t\t\t// mostly be wrong since it depends on a lot of input args. Skip gas for now.\n\t\t\t}\n\t\t\tthis.descended = false;\n\t\t}\n\t\t// If an existing call is returning, pop off the call stack\n\t\tif (syscall && op == 'REVERT') {\n\t\t\tthis.callstack[this.callstack.length - 1].error = \"execution reverted\";\n\t\t\treturn;\n\t\t}\n\t\tif (log.getDepth() == this.callstack.length - 1) {\n\t\t\t// Pop off the last call and get the execution results\n\t\t\tvar call = this.callstack.pop();\n\n\t\t\tif (call.type == 'CREATE' || call.type == \"CREATE2\") {\n\t\t\t\t// If the call was a CREATE, retrieve the contract address and output code\n\t\t\t\tcall.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost - log.getGas()).toString(16);\n\t\t\t\tdelete call.gasIn; delete call.gasCost;\n\n\t\t\t\tvar ret = log.stack.peek(0);\n\t\t\t\tif (!ret.equals(0)) {\n\t\t\t\t\tcall.to     = toHex(toAddress(ret.toString(16)));\n\t\t\t\t\tcall.output = toHex(db.getCode(toAddress(ret.toString(16))));\n\t\t\t\t} else if (call.error === undefined) {\n\t\t\t\t\tcall.error = \"internal failure\"; // TODO(karalabe): surface these faults somehow\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// If the call was a contract call, retrieve the gas usage and output\n\t\t\t\tif (call.gas !== undefined) {\n\t\t\t\t\tcall.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost + call.gas - log.getGas()).toString(16);\n\t\t\t\t}\n\t\t\t\tvar ret = log.stack.peek(0);\n\t\t\t\tif (!ret.equals(0)) {\n\t\t\t\t\tcall.output = toHex(log.memory.slice(call.outOff, call.outOff + call.outLen));\n\t\t\t\t} else if (call.error === undefined) {\n\t\t\t\t\tcall.error = \"internal failure\"; // TODO(karalabe): surface these faults somehow\n\t\t\t\t}\n\t\t\t\tdelete call.gasIn; delete call.gasCost;\n\t\t\t\tdelete call.outOff; delete call.outLen;\n\t\t\t}\n\t\t\tif (call.gas !== undefined) {\n\t\t\t\tcall.gas = '0x' + bigInt(call.gas).toString(16);\n\t\t\t}\n\t\t\t// Inject the call into the previous one\n\t\t\tvar left = this.callstack.length;\n\t\t\tif (this.callstack[left-1].calls === undefined) {\n\t\t\t\tthis.callstack[left-1].calls = [];\n\t\t\t}\n\t\t\tthis.callstack[left-1].calls.push(call);\n\t\t}\n\t},\n\n\t// fault is invoked when the actual execution of an opcode fails.\n\tfault: function(log, db) {\n\t\t// If the topmost call already reverted, don't handle the additional fault again\n\t\tif (this.callstack[this.callstack.length - 1].error !== undefined) {\n\t\t\treturn;\n\t\t}\n\t\t// Pop off the just failed call\n\t\tvar call = this.callstack.pop();\n\t\tcall.error = log.getError();\n\n\t\t// Consume all available gas and clean any leftovers\n\t\tif (call.gas !== undefined) {\n\t\t\tcall.gas = '0x' + bigInt(call.gas).toString(16);\n\t\t\tcall.gasUsed = call.gas\n\t\t}\n\t\tdelete call.gasIn; delete call.gasCost;\n\t\tdelete call.outOff; delete call.outLen;\n\n\t\t// Flatten the failed call into its parent\n\t\tvar left = this.callstack.length;\n\t\tif (left > 0) {\n\t\t\tif (this.callstack[left-1].calls === undefined) {\n\t\t\t\tthis.callstack[left-1].calls = [];\n\t\t\t}\n\t\t\tthis.callstack[left-1].calls.push(call);\n\t\t\treturn;\n\t\t}\n\t\t// Last call failed too, leave it in the stack\n\t\tthis.callstack.push(call);\n\t},\n\n\t// result is invoked when all the opcodes have been iterated over and returns\n\t// the final result of the tracing.\n\tresult: function(ctx, db) {\n\t\tvar result = {\n\t\t\ttype:    ctx.type,\n\t\t\tfrom:    toHex(ctx.from),\n\t\t\tto:      toHex(ctx.to),\n\t\t\tvalue:   '0x' + ctx.value.toString(16),\n\t\t\tgas:     '0x' + bigInt(ctx.gas).toString(16),\n\t\t\tgasUsed: '0x' + bigInt(ctx.gasUsed).toString(16),\n\t\t\tinput:   toHex(ctx.input),\n\t\t\toutput:  toHex(ctx.output),\n\t\t\ttime:    ctx.time,\n\t\t};\n\t\tif (this.callstack[0].calls !== undefined) {\n\t\t\tresult.calls = this.callstack[0].calls;\n\t\t}\n\t\tif (this.callstack[0].error !== undefined) {\n\t\t\tresult.error = this.callstack[0].error;\n\t\t} else if (ctx.error !== undefined) {\n\t\t\tresult.error = ctx.error;\n\t\t}\n\t\tif (result.error !== undefined && (result.error !== \"execution reverted\" || result.output ===\"0x\")) {\n\t\t\tdelete result.output;\n\t\t}\n\t\treturn this.finalize(result);\n\t},\n\n\t// finalize recreates a call object using the final desired field oder for json\n\t// serialization. This is a nicety feature to pass meaningfully ordered results\n\t// to users who don't interpret it, just display it.\n\tfinalize: function(call) {\n\t\tvar sorted = {\n\t\t\ttype:    call.type,\n\t\t\tfrom:    call.from,\n\t\t\tto:      call.to,\n\t\t\tvalue:   call.value,\n\t\t\tgas:     call.gas,\n\t\t\tgasUsed: call.gasUsed,\n\t\t\tinput:   call.input,\n\t\t\toutput:  call.output,\n\t\t\terror:   call.error,\n\t\t\ttime:    call.time,\n\t\t\tcalls:   call.calls,\n\t\t}\n\t\tfor (var key in sorted) {\n\t\t\tif (sorted[key] === undefined) {\n\t\t\t\tdelete sorted[key];\n\t\t\t}\n\t\t}\n\t\tif (sorted.calls !== undefined) {\n\t\t\tfor (var i=0; i<sorted.calls.length; i++) {\n\t\t\t\tsorted.calls[i] = this.finalize(sorted.calls[i]);\n\t\t\t}\n\t\t}\n\t\treturn sorted;\n\t}\n}\n"
  },
  {
    "path": "eth/tracers/internal/tracers/evmdis_tracer.js",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// evmdisTracer returns sufficient information from a trace to perform evmdis-style\n// disassembly.\n{\n\tstack: [{ops: []}],\n\n\tnpushes: {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1, 16: 1, 17: 1, 18: 1, 19: 1, 20: 1, 21: 1, 22: 1, 23: 1, 24: 1, 25: 1, 26: 1, 32: 1, 48: 1, 49: 1, 50: 1, 51: 1, 52: 1, 53: 1, 54: 1, 55: 0, 56: 1, 57: 0, 58: 1, 59: 1, 60: 0, 64: 1, 65: 1, 66: 1, 67: 1, 68: 1, 69: 1, 80: 0, 81: 1, 82: 0, 83: 0, 84: 1, 85: 0, 86: 0, 87: 0, 88: 1, 89: 1, 90: 1, 91: 0, 96: 1, 97: 1, 98: 1, 99: 1, 100: 1, 101: 1, 102: 1, 103: 1, 104: 1, 105: 1, 106: 1, 107: 1, 108: 1, 109: 1, 110: 1, 111: 1, 112: 1, 113: 1, 114: 1, 115: 1, 116: 1, 117: 1, 118: 1, 119: 1, 120: 1, 121: 1, 122: 1, 123: 1, 124: 1, 125: 1, 126: 1, 127: 1, 128: 2, 129: 3, 130: 4, 131: 5, 132: 6, 133: 7, 134: 8, 135: 9, 136: 10, 137: 11, 138: 12, 139: 13, 140: 14, 141: 15, 142: 16, 143: 17, 144: 2, 145: 3, 146: 4, 147: 5, 148: 6, 149: 7, 150: 8, 151: 9, 152: 10, 153: 11, 154: 12, 155: 13, 156: 14, 157: 15, 158: 16, 159: 17, 160: 0, 161: 0, 162: 0, 163: 0, 164: 0, 240: 1, 241: 1, 242: 1, 243: 0, 244: 0, 255: 0},\n\n\t// result is invoked when all the opcodes have been iterated over and returns\n\t// the final result of the tracing.\n\tresult: function() { return this.stack[0].ops; },\n\n\t// fault is invoked when the actual execution of an opcode fails.\n\tfault: function(log, db) { },\n\n\t// step is invoked for every opcode that the VM executes.\n\tstep: function(log, db) {\n\t\tvar frame = this.stack[this.stack.length - 1];\n\n\t\tvar error = log.getError();\n\t\tif (error) {\n\t\t\tframe[\"error\"] = error;\n\t\t} else if (log.getDepth() == this.stack.length) {\n\t\t\topinfo = {\n\t\t\t\top:     log.op.toNumber(),\n\t\t\t\tdepth : log.getDepth(),\n\t\t\t\tresult: [],\n\t\t\t};\n\t\t\tif (frame.ops.length > 0) {\n\t\t\t\tvar prevop = frame.ops[frame.ops.length - 1];\n\t\t\t\tfor(var i = 0; i < this.npushes[prevop.op]; i++)\n\t\t\t\t\tprevop.result.push(log.stack.peek(i).toString(16));\n\t\t\t}\n\t\t\tswitch(log.op.toString()) {\n\t\t\tcase \"CALL\": case \"CALLCODE\":\n\t\t\t\tvar instart = log.stack.peek(3).valueOf();\n\t\t\t\tvar insize = log.stack.peek(4).valueOf();\n\t\t\t\topinfo[\"gas\"] = log.stack.peek(0).valueOf();\n\t\t\t\topinfo[\"to\"] = log.stack.peek(1).toString(16);\n\t\t\t\topinfo[\"value\"] = log.stack.peek(2).toString();\n\t\t\t\topinfo[\"input\"] = log.memory.slice(instart, instart + insize);\n\t\t\t\topinfo[\"error\"] = null;\n\t\t\t\topinfo[\"return\"] = null;\n\t\t\t\topinfo[\"ops\"] = [];\n\t\t\t\tthis.stack.push(opinfo);\n\t\t\t\tbreak;\n\t\t\tcase \"DELEGATECALL\": case \"STATICCALL\":\n\t\t\t\tvar instart = log.stack.peek(2).valueOf();\n\t\t\t\tvar insize = log.stack.peek(3).valueOf();\n\t\t\t\topinfo[\"op\"] =  log.op.toString();\n\t\t\t\topinfo[\"gas\"] =  log.stack.peek(0).valueOf();\n\t\t\t\topinfo[\"to\"] =  log.stack.peek(1).toString(16);\n\t\t\t\topinfo[\"input\"] =  log.memory.slice(instart, instart + insize);\n\t\t\t\topinfo[\"error\"] =  null;\n\t\t\t\topinfo[\"return\"] =  null;\n\t\t\t\topinfo[\"ops\"] = [];\n\t\t\t\tthis.stack.push(opinfo);\n\t\t\t\tbreak;\n\t\t\tcase \"RETURN\":\n\t\t\t\tvar out = log.stack.peek(0).valueOf();\n\t\t\t\tvar outsize = log.stack.peek(1).valueOf();\n\t\t\t\tframe.return = log.memory.slice(out, out + outsize);\n\t\t\t\tbreak;\n\t\t\tcase \"STOP\": case \"SUICIDE\":\n\t\t\t\tframe.return = log.memory.slice(0, 0);\n\t\t\t\tbreak;\n\t\t\tcase \"JUMPDEST\":\n\t\t\t\topinfo[\"pc\"] = log.getPC();\n\t\t\t}\n\t\t\tif(log.op.isPush()) {\n\t\t\t\topinfo[\"len\"] = log.op.toNumber() - 0x5e;\n\t\t\t}\n\t\t\tframe.ops.push(opinfo);\n\t\t} else {\n\t\t\tthis.stack = this.stack.slice(0, log.getDepth());\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "eth/tracers/internal/tracers/noop_tracer.js",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// noopTracer is just the barebone boilerplate code required from a JavaScript\n// object to be usable as a transaction tracer.\n{\n\t// step is invoked for every opcode that the VM executes.\n\tstep: function(log, db) { },\n\n\t// fault is invoked when the actual execution of an opcode fails.\n\tfault: function(log, db) { },\n\n\t// result is invoked when all the opcodes have been iterated over and returns\n\t// the final result of the tracing.\n\tresult: function(ctx, db) { return {}; }\n}\n"
  },
  {
    "path": "eth/tracers/internal/tracers/opcount_tracer.js",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// opcountTracer is a sample tracer that just counts the number of instructions\n// executed by the EVM before the transaction terminated.\n{\n\t// count tracks the number of EVM instructions executed.\n\tcount: 0,\n\n\t// step is invoked for every opcode that the VM executes.\n\tstep: function(log, db) { this.count++ },\n\n\t// fault is invoked when the actual execution of an opcode fails.\n\tfault: function(log, db) { },\n\n\t// result is invoked when all the opcodes have been iterated over and returns\n\t// the final result of the tracing.\n\tresult: function(ctx, db) { return this.count }\n}\n"
  },
  {
    "path": "eth/tracers/internal/tracers/prestate_tracer.js",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// prestateTracer outputs sufficient information to create a local execution of\n// the transaction from a custom assembled genesis block.\n{\n\t// prestate is the genesis that we're building.\n\tprestate: null,\n\n\t// lookupAccount injects the specified account into the prestate object.\n\tlookupAccount: function(addr, db){\n\t\tvar acc = toHex(addr);\n\t\tif (this.prestate[acc] === undefined) {\n\t\t\tthis.prestate[acc] = {\n\t\t\t\tbalance: '0x' + db.getBalance(addr).toString(16),\n\t\t\t\tnonce:   db.getNonce(addr),\n\t\t\t\tcode:    toHex(db.getCode(addr)),\n\t\t\t\tstorage: {}\n\t\t\t};\n\t\t}\n\t},\n\n\t// lookupStorage injects the specified storage entry of the given account into\n\t// the prestate object.\n\tlookupStorage: function(addr, key, db){\n\t\tvar acc = toHex(addr);\n\t\tvar idx = toHex(key);\n\n\t\tif (this.prestate[acc].storage[idx] === undefined) {\n\t\t\tthis.prestate[acc].storage[idx] = toHex(db.getState(addr, key));\n\t\t}\n\t},\n\n\t// result is invoked when all the opcodes have been iterated over and returns\n\t// the final result of the tracing.\n\tresult: function(ctx, db) {\n\t\t// At this point, we need to deduct the 'value' from the\n\t\t// outer transaction, and move it back to the origin\n\t\tthis.lookupAccount(ctx.from, db);\n\n\t\tvar fromBal = bigInt(this.prestate[toHex(ctx.from)].balance.slice(2), 16);\n\t\tvar toBal   = bigInt(this.prestate[toHex(ctx.to)].balance.slice(2), 16);\n\n\t\tthis.prestate[toHex(ctx.to)].balance   = '0x'+toBal.subtract(ctx.value).toString(16);\n\t\tthis.prestate[toHex(ctx.from)].balance = '0x'+fromBal.add(ctx.value).add((ctx.gasUsed + ctx.intrinsicGas) * ctx.gasPrice).toString(16);\n\n\t\t// Decrement the caller's nonce, and remove empty create targets\n\t\tthis.prestate[toHex(ctx.from)].nonce--;\n\t\tif (ctx.type == 'CREATE') {\n\t\t\t// We can blibdly delete the contract prestate, as any existing state would\n\t\t\t// have caused the transaction to be rejected as invalid in the first place.\n\t\t\tdelete this.prestate[toHex(ctx.to)];\n\t\t}\n\t\t// Return the assembled allocations (prestate)\n\t\treturn this.prestate;\n\t},\n\n\t// step is invoked for every opcode that the VM executes.\n\tstep: function(log, db) {\n\t\t// Add the current account if we just started tracing\n\t\tif (this.prestate === null){\n\t\t\tthis.prestate = {};\n\t\t\t// Balance will potentially be wrong here, since this will include the value\n\t\t\t// sent along with the message. We fix that in 'result()'.\n\t\t\tthis.lookupAccount(log.contract.getAddress(), db);\n\t\t}\n\t\t// Whenever new state is accessed, add it to the prestate\n\t\tswitch (log.op.toString()) {\n\t\t\tcase \"EXTCODECOPY\": case \"EXTCODESIZE\": case \"BALANCE\":\n\t\t\t\tthis.lookupAccount(toAddress(log.stack.peek(0).toString(16)), db);\n\t\t\t\tbreak;\n\t\t\tcase \"CREATE\":\n\t\t\t\tvar from = log.contract.getAddress();\n\t\t\t\tthis.lookupAccount(toContract(from, db.getNonce(from)), db);\n\t\t\t\tbreak;\n\t\t\tcase \"CREATE2\":\n\t\t\t\tvar from = log.contract.getAddress();\n\t\t\t\t// stack: salt, size, offset, endowment\n\t\t\t\tvar offset = log.stack.peek(1).valueOf()\n\t\t\t\tvar size = log.stack.peek(2).valueOf()\n\t\t\t\tvar end = offset + size\n\t\t\t\tthis.lookupAccount(toContract2(from, log.stack.peek(3).toString(16), log.memory.slice(offset, end)), db);\n\t\t\t\tbreak;\n\t\t\tcase \"CALL\": case \"CALLCODE\": case \"DELEGATECALL\": case \"STATICCALL\":\n\t\t\t\tthis.lookupAccount(toAddress(log.stack.peek(1).toString(16)), db);\n\t\t\t\tbreak;\n\t\t\tcase 'SSTORE':case 'SLOAD':\n\t\t\t\tthis.lookupStorage(log.contract.getAddress(), toWord(log.stack.peek(0).toString(16)), db);\n\t\t\t\tbreak;\n\t\t}\n\t},\n\n\t// fault is invoked when the actual execution of an opcode fails.\n\tfault: function(log, db) {}\n}\n"
  },
  {
    "path": "eth/tracers/internal/tracers/tracers.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n//go:generate go-bindata -nometadata -o assets.go -pkg tracers -ignore tracers.go -ignore assets.go ./...\n//go:generate gofmt -s -w assets.go\n\n// Package tracers contains the actual JavaScript tracer assets.\npackage tracers\n"
  },
  {
    "path": "eth/tracers/internal/tracers/trigram_tracer.js",
    "content": "// Copyright 2018 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n{\n    // hist is the map of trigram counters\n    hist: {},\n    // lastOp is last operation\n    lastOps: ['',''],\n    lastDepth: 0,\n        // step is invoked for every opcode that the VM executes.\n    step: function(log, db) {\n        var depth = log.getDepth();\n        if (depth != this.lastDepth){\n            this.lastOps = ['',''];\n            this.lastDepth = depth;\n            return;\n        }\n        var op = log.op.toString();\n        var key = this.lastOps[0]+'-'+this.lastOps[1]+'-'+op;\n        if (this.hist[key]){\n            this.hist[key]++;\n        }\n        else {\n            this.hist[key] = 1;\n        }\n        this.lastOps[0] = this.lastOps[1];\n        this.lastOps[1] = op;\n    },\n    // fault is invoked when the actual execution of an opcode fails.\n    fault: function(log, db) {},\n    // result is invoked when all the opcodes have been iterated over and returns\n    // the final result of the tracing.\n    result: function(ctx) {\n        return this.hist;\n    },\n}\n"
  },
  {
    "path": "eth/tracers/internal/tracers/unigram_tracer.js",
    "content": "// Copyright 2018 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n{\n    // hist is the map of opcodes to counters\n    hist: {},\n    // nops counts number of ops\n    nops: 0,\n    // step is invoked for every opcode that the VM executes.\n    step: function(log, db) {\n        var op = log.op.toString();\n        if (this.hist[op]){\n            this.hist[op]++;\n        }\n        else {\n            this.hist[op] = 1;\n        }\n        this.nops++;\n    },\n    // fault is invoked when the actual execution of an opcode fails.\n    fault: function(log, db) {},\n\n    // result is invoked when all the opcodes have been iterated over and returns\n    // the final result of the tracing.\n    result: function(ctx) {\n        return this.hist;\n    },\n}\n"
  },
  {
    "path": "eth/tracers/testdata/call_tracer_create.json",
    "content": "{\n  \"context\": {\n    \"difficulty\": \"3755480783\",\n    \"gasLimit\": \"5401723\",\n    \"miner\": \"0xd049bfd667cb46aa3ef5df0da3e57db3be39e511\",\n    \"number\": \"2294702\",\n    \"timestamp\": \"1513676146\"\n  },\n  \"genesis\": {\n    \"alloc\": {\n      \"0x13e4acefe6a6700604929946e70e6443e4e73447\": {\n        \"balance\": \"0xcf3e0938579f000\",\n        \"code\": \"0x\",\n        \"nonce\": \"9\",\n        \"storage\": {}\n      },\n      \"0x7dc9c9730689ff0b0fd506c67db815f12d90a448\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x\",\n        \"nonce\": \"0\",\n        \"storage\": {}\n      }\n    },\n    \"config\": {\n      \"byzantiumBlock\": 1700000,\n      \"chainId\": 3,\n      \"daoForkSupport\": true,\n      \"eip150Block\": 0,\n      \"eip150Hash\": \"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d\",\n      \"eip155Block\": 10,\n      \"eip158Block\": 10,\n      \"ethash\": {},\n      \"homesteadBlock\": 0\n    },\n    \"difficulty\": \"3757315409\",\n    \"extraData\": \"0x566961425443\",\n    \"gasLimit\": \"5406414\",\n    \"hash\": \"0xae107f592eebdd9ff8d6ba00363676096e6afb0e1007a7d3d0af88173077378d\",\n    \"miner\": \"0xd049bfd667cb46aa3ef5df0da3e57db3be39e511\",\n    \"mixHash\": \"0xc927aa05a38bc3de864e95c33b3ae559d3f39c4ccd51cef6f113f9c50ba0caf1\",\n    \"nonce\": \"0x93363bbd2c95f410\",\n    \"number\": \"2294701\",\n    \"stateRoot\": \"0x6b6737d5bde8058990483e915866bd1578014baeff57bd5e4ed228a2bfad635c\",\n    \"timestamp\": \"1513676127\",\n    \"totalDifficulty\": \"7160808139332585\"\n  },\n  \"input\": \"0xf907ef098504e3b29200830897be8080b9079c606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a1129a01060f46676a5dff6f407f0f51eb6f37f5c8c54e238c70221e18e65fc29d3ea65a0557b01c50ff4ffaac8ed6e5d31237a4ecbac843ab1bfe8bb0165a0060df7c54f\",\n  \"result\": {\n    \"from\": \"0x13e4acefe6a6700604929946e70e6443e4e73447\",\n    \"gas\": \"0x5e106\",\n    \"gasUsed\": \"0x5e106\",\n    \"input\": \"0x606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a11\",\n    \"output\": \"0x606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029\",\n    \"to\": \"0x7dc9c9730689ff0b0fd506c67db815f12d90a448\",\n    \"type\": \"CREATE\",\n    \"value\": \"0x0\"\n  }\n}\n"
  },
  {
    "path": "eth/tracers/testdata/call_tracer_deep_calls.json",
    "content": "{\n  \"context\": {\n    \"difficulty\": \"117066904\",\n    \"gasLimit\": \"4712384\",\n    \"miner\": \"0x1977c248e1014cc103929dd7f154199c916e39ec\",\n    \"number\": \"25001\",\n    \"timestamp\": \"1479891545\"\n  },\n  \"genesis\": {\n    \"alloc\": {\n      \"0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x606060405236156100825760e060020a600035046302d05d3f811461008a5780630accce061461009c5780631ab9075a146100c757806331ed274614610102578063645a3b7214610133578063772fdae314610155578063a7f4377914610180578063ae5f80801461019e578063c9bded21146101ea578063f905c15a14610231575b61023a610002565b61023c600054600160a060020a031681565b61023a600435602435604435606435608435600254600160a060020a03166000141561024657610002565b61023a600435600254600160a060020a03166000148015906100f8575060025433600160a060020a03908116911614155b156102f457610002565b61023a60043560243560443560643560843560a43560c435600254600160a060020a03166000141561031657610002565b61023a600435602435600254600160a060020a0316600014156103d057610002565b61023a600435602435604435606435608435600254600160a060020a03166000141561046157610002565b61023a60025433600160a060020a0390811691161461051657610002565b61023a6004356024356044356060828152600160a060020a0382169060ff8516907fa6c2f0913db6f79ff0a4365762c61718973b3413d6e40382e704782a9a5099f690602090a3505050565b61023a600435602435600160a060020a038116606090815260ff8316907fee6348a7ec70f74e3d6cba55a53e9f9110d180d7698e9117fc466ae29a43e34790602090a25050565b61023c60035481565b005b6060908152602090f35b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f115610002575050604051511515905061029d57610002565b60408051858152602081018390528151600160a060020a03858116939087169260ff8a16927f5a690ecd0cb15c1c1fd6b6f8a32df0d4f56cb41a54fea7e94020f013595de796929181900390910190a45050505050565b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f115610002575050604051511515905061036d57610002565b6040805186815260208101869052808201859052606081018490529051600160a060020a03831691889160ff8b16917fd65d9ddafbad8824e2bbd6f56cc9f4ac27ba60737035c10a321ea2f681c94d47919081900360800190a450505050505050565b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f115610002575050604051511515905061042757610002565b60408051828152905183917fa9c6cbc4bd352a6940479f6d802a1001550581858b310d7f68f7bea51218cda6919081900360200190a25050565b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f11561000257505060405151151590506104b857610002565b80600160a060020a031684600160a060020a03168660ff167f69bdaf789251e1d3a0151259c0c715315496a7404bce9fd0b714674685c2cab78686604051808381526020018281526020019250505060405180910390a45050505050565b600254600160a060020a0316ff\",\n        \"nonce\": \"1\",\n        \"storage\": {\n          \"0x0000000000000000000000000000000000000000000000000000000000000002\": \"0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396\"\n        }\n      },\n      \"0x2cccf5e0538493c235d1c5ef6580f77d99e91396\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x606060405236156100775760e060020a600035046302d05d3f811461007f57806313bc6d4b146100915780633688a877146100b95780635188f9961461012f5780637eadc976146101545780638ad79680146101d3578063a43e04d814610238578063a7f437791461025e578063e16c7d981461027c575b61029f610002565b6102a1600054600160a060020a031681565b6102be600435600160a060020a03811660009081526002602052604090205460ff165b919050565b6102d26004356040805160208181018352600080835284815260038252835190849020805460026001821615610100026000190190911604601f8101849004840283018401909552848252929390929183018282801561037d5780601f106103525761010080835404028352916020019161037d565b61029f6004356024356000805433600160a060020a039081169116146104a957610002565b61034060043560008181526001602090815260408083205481517ff905c15a0000000000000000000000000000000000000000000000000000000081529151600160a060020a03909116928392839263f905c15a92600483810193919291829003018189876161da5a03f1156100025750506040515195945050505050565b60408051602060248035600481810135601f810185900485028601850190965285855261029f9581359591946044949293909201918190840183828082843750949650505050505050600054600160a060020a0390811633909116146104f657610002565b61029f6004355b600080548190600160a060020a0390811633909116146105a457610002565b61029f60005433600160a060020a0390811691161461072957610002565b6102a1600435600081815260016020526040902054600160a060020a03166100b4565b005b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156103325780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60408051918252519081900360200190f35b820191906000526020600020905b81548152906001019060200180831161036057829003601f168201915b505050505090506100b4565b506000828152600160208181526040808420805473ffffffffffffffffffffffffffffffffffffffff191686179055600160a060020a038581168086526002909352818520805460ff191690941790935580517f1ab9075a0000000000000000000000000000000000000000000000000000000081523090931660048401525184939192631ab9075a926024828101939192829003018183876161da5a03f11561000257505060408051602081018690528082019290925243606083015260808083526003908301527f414444000000000000000000000000000000000000000000000000000000000060a0830152517f8ac68d4e97d65912f220b4c5f87978b8186320a5e378c1369850b5b5f90323d39181900360c00190a15b505050565b600083815260016020526040902054600160a060020a03838116911614156104d0576104a4565b600083815260016020526040812054600160a060020a031614610389576103898361023f565b600082815260036020908152604082208054845182855293839020919360026001831615610100026000190190921691909104601f90810184900483019391929186019083901061056a57805160ff19168380011785555b5061059a9291505b808211156105a05760008155600101610556565b8280016001018555821561054e579182015b8281111561054e57825182600050559160200191906001019061057c565b50505050565b5090565b600083815260016020526040812054600160a060020a031614156105c757610002565b50506000818152600160205260408082205481517fa7f437790000000000000000000000000000000000000000000000000000000081529151600160a060020a0391909116928392839263a7f4377992600483810193919291829003018183876161da5a03f11561000257505050600160005060008460001916815260200190815260200160002060006101000a815490600160a060020a0302191690556002600050600083600160a060020a0316815260200190815260200160002060006101000a81549060ff02191690557f8ac68d4e97d65912f220b4c5f87978b8186320a5e378c1369850b5b5f90323d383834360405180806020018560001916815260200184600160a060020a03168152602001838152602001828103825260038152602001807f44454c000000000000000000000000000000000000000000000000000000000081526020015060200194505050505060405180910390a1505050565b600054600160a060020a0316ff\",\n        \"nonce\": \"1\",\n        \"storage\": {\n          \"0x0684ac65a9fa32414dda56996f4183597d695987fdb82b145d722743891a6fe8\": \"0x0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690\",\n          \"0x1cd76f78169a420d99346e3501dd3e541622c38a226f9b63e01cfebc69879dc7\": \"0x000000000000000000000000b4fe7aa695b326c9d219158d2ca50db77b39f99f\",\n          \"0x8e54a4494fe5da016bfc01363f4f6cdc91013bb5434bd2a4a3359f13a23afa2f\": \"0x000000000000000000000000cf00ffd997ad14939736f026006498e3f099baaf\",\n          \"0x94edf7f600ba56655fd65fca1f1424334ce369326c1dc3e53151dcd1ad06bc13\": \"0x0000000000000000000000000000000000000000000000000000000000000001\",\n          \"0xbbee47108b275f55f98482c6800f6372165e88b0330d3f5dae6419df4734366c\": \"0x0000000000000000000000002a98c5f40bfa3dee83431103c535f6fae9a8ad38\",\n          \"0xd38c0c4e84de118cfdcc775130155d83b8bbaaf23dc7f3c83a626b10473213bd\": \"0x0000000000000000000000000000000000000000000000000000000000000001\",\n          \"0xfb3aa5c655c2ec9d40609401f88d505d1da61afaa550e36ef5da0509ada257ba\": \"0x0000000000000000000000007986bad81f4cbd9317f5a46861437dae58d69113\"\n        }\n      },\n      \"0x3e9286eafa2db8101246c2131c09b49080d00690\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x606060405236156100cf5760e060020a600035046302d05d3f81146100d7578063056d4470146100e957806316c66cc61461010c5780631ab9075a146101935780633ae1005c146101ce57806358541662146101fe5780635ed61af014610231578063644e3b791461025457806384dbac3b146102db578063949ae479146102fd5780639859387b14610321578063a7f4377914610340578063ab03fc261461035e578063e8161b7814610385578063e964d4e114610395578063f905c15a146103a5578063f92eb774146103ae575b6103be610002565b6103c0600054600160a060020a031681565b6103be6004356002546000908190600160a060020a031681141561040357610002565b6103dd60043560006108365b6040805160025460e360020a631c2d8fb30282527f636f6e747261637464620000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b6103be600435600254600160a060020a03166000148015906101c4575060025433600160a060020a03908116911614155b1561088d57610002565b6103be600435602435604435606435600254600090819081908190600160a060020a03168114156108af57610002565b6103c0600435602435604435606435608435600254600090819081908190600160a060020a03168114156110e857610002565b6103be6004356002546000908190600160a060020a03168114156115ec57610002565b6103c06004356000611b635b6040805160025460e360020a631c2d8fb30282527f6d61726b6574646200000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b6103be600435602435600254600160a060020a031660001415611bb557610002565b6103be600435602435600254600090600160a060020a0316811415611d2e57610002565b6103be600435600254600160a060020a031660001415611fc657610002565b6103be60025433600160a060020a0390811691161461207e57610002565b6103be600435602435604435600254600090600160a060020a031681141561208c57610002565b6103dd60043560006124b8610260565b6103c0600435600061250a610118565b6103f160035481565b6103f16004356000612561610260565b005b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061046557610002565b8291506104e55b6040805160025460e360020a631c2d8fb30282527f63706f6f6c00000000000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f115610002575050604051519150505b90565b600160a060020a031663b2206e6d83600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fb2206e6d0000000000000000000000000000000000000000000000000000000082526004820152600160a060020a038816602482015290516044808301935060209282900301816000876161da5a03f11561000257505060405151915061059b90506106ba565b600160a060020a031663d5b205ce83600160a060020a03166336da44686040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a636ad902e7028252600160a060020a0390811660048301526024820187905288166044820152905160648281019350600092829003018183876161da5a03f115610002575050506107355b6040805160025460e360020a631c2d8fb30282527f6c6f676d6772000000000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b50826120ee5b6040805160025460e360020a631c2d8fb30282527f6163636f756e7463746c0000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b600160a060020a0316630accce06600684600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6306db488d02825291519192899290916336da446891600482810192602092919082900301816000876161da5a03f1156100025750505060405180519060200150866040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050505050565b600160a060020a03166316c66cc6836040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051519150505b919050565b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061091157610002565b87935061091c610260565b600160a060020a031663bdbdb08685600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fbdbdb0860000000000000000000000000000000000000000000000000000000082526004820152602481018a905290516044808301935060209282900301816000876161da5a03f1156100025750506040515193506109ca90506106ba565b600160a060020a03166381982a7a8885876040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f11561000257505050610a3661046c565b600160a060020a03166308636bdb85600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517f08636bdb000000000000000000000000000000000000000000000000000000008252600482015260248101889052604481019290925251606482810192602092919082900301816000876161da5a03f11561000257505060408051805160e160020a630a5d50db028252600482018190529151919450600160a060020a03871692506314baa1b6916024828101926000929190829003018183876161da5a03f11561000257505050610b3561046c565b600160a060020a0316630a3b6ede85600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63051db76f0282526004820152600160a060020a038d16602482015290516044808301935060209282900301816000876161da5a03f115610002575050604051519150610bd590506106ba565b600160a060020a031663d5b205ce87838b6040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f11561000257505050610c41610118565b600160a060020a031663988db79c888a6040518360e060020a0281526004018083600160a060020a0316815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050610ca5610260565b600160a060020a031663f4f2821b896040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050610d6f5b6040805160025460e360020a631c2d8fb30282527f747261646564620000000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b600160a060020a0316635f539d69896040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050610dc2610639565b600160a060020a0316630accce06600386600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6315b1ea01028252915191928e928e9263ad8f500891600482810192602092919082900301816000876161da5a03f11561000257505050604051805190602001506040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050610ec5610639565b600160a060020a0316630accce06600386600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6315b1ea01028252915191928e928d9263ad8f500891600482810192602092919082900301816000876161da5a03f11561000257505050604051805190602001506040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050610fc8610639565b600160a060020a031663645a3b7285600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151905061101e610260565b600160a060020a031663f92eb77488600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e260020a633e4baddd028252600482015290516024828101935060209282900301816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448381019360009350829003018183876161da5a03f115610002575050505050505050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061114a57610002565b604051600254600160a060020a0316908a908a908a908a908a90611579806125b38339018087600160a060020a0316815260200186600160a060020a03168152602001856000191681526020018481526020018381526020018281526020019650505050505050604051809103906000f092506111c5610118565b600160a060020a031663b9858a288a856040518360e060020a0281526004018083600160a060020a0316815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611229610260565b600160a060020a0316635188f99689856040518360e060020a028152600401808360001916815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611288610260565b600160a060020a031663bdbdb08689896040518360e060020a0281526004018083600019168152602001828152602001925050506020604051808303816000876161da5a03f1156100025750506040515192506112e590506106ba565b600160a060020a03166346d88e7d8a858a6040518460e060020a0281526004018084600160a060020a0316815260200183600160a060020a0316815260200182815260200193505050506000604051808303816000876161da5a03f115610002575050506113516106ba565b600160a060020a03166381982a7a8a84866040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f115610002575050506113bd61046c565b600160a060020a0316632b58469689856040518360e060020a028152600401808360001916815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f1156100025750505061141c61046c565b600160a060020a03166308636bdb8984866040518460e060020a028152600401808460001916815260200183815260200182600160a060020a0316815260200193505050506020604051808303816000876161da5a03f11561000257505060408051805160e160020a630a5d50db028252600482018190529151919350600160a060020a03861692506314baa1b6916024828101926000929190829003018183876161da5a03f115610002575050506114d3610639565b6040805160e160020a630566670302815260016004820152602481018b9052600160a060020a0386811660448301528c811660648301526000608483018190529251931692630accce069260a480840193919291829003018183876161da5a03f11561000257505050611544610639565b600160a060020a031663645a3b728961155b610260565b600160a060020a031663f92eb7748c6040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448084019360009350829003018183876161da5a03f1156100025750939a9950505050505050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061164e57610002565b82915061165961046c565b600160a060020a0316630a3b6ede83600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63051db76f0282526004820152600160a060020a038816602482015290516044808301935060209282900301816000876161da5a03f1156100025750506040515191506116f990506106ba565b600160a060020a031663d5b205ce83600160a060020a03166336da44686040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a636ad902e7028252600160a060020a0390811660048301526024820187905288166044820152905160648281019350600092829003018183876161da5a03f1156100025750505061179b6106ba565b600160a060020a031663d653078983600160a060020a03166336da44686040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517ff1ff78a0000000000000000000000000000000000000000000000000000000008252915191929163f1ff78a09160048181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150866040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f1156100025750505061189f610260565b600160a060020a031663f4f2821b846040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f115610002575050506118f2610118565b600160a060020a031663f4f2821b846040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050611945610639565b600160a060020a0316630accce06600484600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6306db488d02825291519192899290916336da44689181870191602091908190038801816000876161da5a03f115610002575050506040518051906020015060006040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050611a48610639565b600160a060020a031663645a3b7283600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519050611a9e610260565b600160a060020a031663f92eb77486600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e260020a633e4baddd028252600482015290516024828101935060209282900301816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448381019360009350829003018183876161da5a03f11561000257505050505050565b600160a060020a03166381738c59836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f1156100025750506040515191506108889050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f1156100025750506040515115159050611c1757610002565b611c1f610260565b600160a060020a03166338a699a4836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f11561000257505060405151159050611c7457610002565b611c7c610260565b600160a060020a0316632243118a836040518260e060020a02815260040180826000191681526020019150506000604051808303816000876161da5a03f11561000257505050611cca610639565b600160a060020a031663ae5f8080600184846040518460e060020a028152600401808481526020018360001916815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f115610002575050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f1156100025750506040515115159050611d9057610002565b5081611d9a610260565b600160a060020a031663581d5d6084846040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506000604051808303816000876161da5a03f11561000257505050611df5610639565b600160a060020a0316630accce06600283600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a630566670302825260048201949094526024810193909352600160a060020a038816604484015260006064840181905260848401819052905160a4808501949293509091829003018183876161da5a03f11561000257505050611eab610639565b600160a060020a031663645a3b7282600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519050611f01610260565b600160a060020a031663f92eb77485600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e260020a633e4baddd028252600482015290516024828101935060209282900301816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448381019360009350829003018183876161da5a03f11561000257505050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061202857610002565b612030610118565b600160a060020a0316639859387b826040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f1156100025750505050565b600254600160a060020a0316ff5b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f11561000257505060405151151590506106b457610002565b600160a060020a031663d65307898383600160a060020a031663f1ff78a06040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fd6530789000000000000000000000000000000000000000000000000000000008252600160a060020a039485166004830152602482015292891660448401525160648381019360009350829003018183876161da5a03f115610002575050506121a5610118565b600160a060020a031663f4f2821b856040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f115610002575050506121f8610cf4565b600160a060020a031663f4f2821b856040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f1156100025750505061224b610639565b600160a060020a0316630accce06600583600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6306db488d028252915191928a9290916336da446891600482810192602092919082900301816000876161da5a03f1156100025750505060405180519060200150886040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f1156100025750505080600160a060020a031663ea71b02d6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151600160a060020a031660001490506124b25761239f610639565b600160a060020a0316630accce06600583600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fea71b02d000000000000000000000000000000000000000000000000000000008252915191928a92909163ea71b02d91600482810192602092919082900301816000876161da5a03f1156100025750505060405180519060200150886040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f115610002575050505b50505050565b600160a060020a03166338a699a4836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f1156100025750506040515191506108889050565b600160a060020a031663213fe2b7836040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515191506108889050565b600160a060020a031663f92eb774836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f115610002575050604051519150610888905056606060405260405160c08061157983396101206040819052825160805160a051935160e0516101005160008054600160a060020a03199081163317909155600180546005805484168817905560048a90556006869055600b8590556008849055909116861760a060020a60ff02191690554360038190556002558686526101408390526101608190529396929594919390929091600160a060020a033016917f76885d242fb71c6f74a7e717416e42eff4d96faf54f6de75c6a0a6bbd8890c6b91a230600160a060020a03167fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff600b600050546040518082815260200191505060405180910390a250505050505061145e8061011b6000396000f3606060405236156101745760e060020a600035046302d05d3f811461017c57806304a7fdbc1461018e5780630e90f957146101fb5780630fb5a6b41461021257806314baa1b61461021b57806317fc45e21461023a5780632b096926146102435780632e94420f1461025b578063325a19f11461026457806336da44681461026d5780633f81a2c01461027f5780633fc306821461029757806345ecd3d7146102d45780634665096d146102dd5780634e71d92d146102e657806351a34eb8146103085780636111bb951461032d5780636f265b93146103445780637e9014e11461034d57806390ba009114610360578063927df5e014610393578063a7f437791461046c578063ad8f50081461046e578063bc6d909414610477578063bdec3ad114610557578063c19d93fb1461059a578063c9503fe2146105ad578063e0a73a93146105b6578063ea71b02d146105bf578063ea8a1af0146105d1578063ee4a96f9146105f3578063f1ff78a01461065c575b61046c610002565b610665600054600160a060020a031681565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600554600090600160a060020a0390811633909116146106a857610002565b61068260015460a060020a900460ff166000145b90565b61069660085481565b61046c600435600154600160a060020a03166000141561072157610002565b610696600d5481565b610696600435600f8160068110156100025750015481565b61069660045481565b61069660035481565b610665600554600160a060020a031681565b61069660043560158160068110156100025750015481565b6106966004355b600b54600f5460009160028202808203928083039290810191018386101561078357601054840186900394505b50505050919050565b61069660025481565b61069660095481565b61046c600554600090600160a060020a03908116339091161461085857610002565b61046c600435600554600090600160a060020a03908116339091161461092e57610002565b6106826001805460a060020a900460ff161461020f565b610696600b5481565b61068260075460a060020a900460ff1681565b6106966004355b600b54601554600091600282028082039280830392908101910183861015610a6c5760165494506102cb565b61046c6004356024356044356040805160015460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b02600483015291516000928392600160a060020a03919091169163e16c7d9891602481810192602092909190829003018187876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610b4657610002565b005b610696600a5481565b61046c60006000600060006000600160009054906101000a9004600160a060020a0316600160a060020a031663e16c7d986040518160e060020a028152600401808060b260020a691858d8dbdd5b9d18dd1b0281526020015060200190506020604051808303816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f1757610002565b61046c5b60015b60058160ff16101561071e57600f6001820160ff166006811015610002578101549060ff83166006811015610002570154101561129057610002565b61069660015460a060020a900460ff1681565b61069660065481565b610696600c5481565b610665600754600160a060020a031681565b61046c600554600090600160a060020a0390811633909116146112c857610002565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600154600090600160a060020a03168114156113fb57610002565b610696600e5481565b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060005b60068160ff16101561070857828160ff166006811015610002576020020151600f60ff831660068110156100025701558160ff82166006811015610002576020020151601560ff831660068110156100025701556001016106ac565b61071061055b565b505050565b600e8054820190555b50565b6040805160015460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061071557610002565b83861015801561079257508286105b156107b457600f546010546011548689039082030291909104900394506102cb565b8286101580156107c55750600b5486105b156107e757600f546011546012548589039082030291909104900394506102cb565b600b5486108015906107f857508186105b1561081d57600b54600f546012546013549289039281039290920204900394506102cb565b81861015801561082c57508086105b1561084e57600f546013546014548489039082030291909104900394506102cb565b60145494506102cb565b60015460a060020a900460ff1660001461087157610002565b600254600a01431161088257610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663771d50e16040518160e060020a0281526004018090506000604051808303816000876161da5a03f1156100025750505050565b60015460a060020a900460ff1660001461094757610002565b600254600a01431161095857610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180517f51a34eb8000000000000000000000000000000000000000000000000000000008252600482018690529151919350600160a060020a03841692506351a34eb8916024808301926000929190829003018183876161da5a03f11561000257505050600b8290554360025560408051838152905130600160a060020a0316917fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff919081900360200190a25050565b838610158015610a7b57508286105b15610a9d576015546016546017548689039082900302919091040194506102cb565b828610158015610aae5750600b5486105b15610ad0576015546017546018548589039082900302919091040194506102cb565b600b548610801590610ae157508186105b15610b0657600b546015546018546019549289039281900392909202040194506102cb565b818610158015610b1557508086105b15610b3757601554601954601a548489039082900302919091040194506102cb565b601a54860181900394506102cb565b60015460a060020a900460ff16600014610b5f57610002565b6001805460a060020a60ff02191660a060020a17908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919450600160a060020a038516925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604080518051600a556005547ffebf661200000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015216602482015260448101879052905163febf661291606480820192600092909190829003018183876161da5a03f115610002575050508215610cc7576007805473ffffffffffffffffffffffffffffffffffffffff191633179055610dbb565b6040805160055460065460e060020a63599efa6b028352600160a060020a039182166004840152602483015291519184169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050604080516006547f56ccb6f000000000000000000000000000000000000000000000000000000000825233600160a060020a03166004830152602482015290516356ccb6f091604480820192600092909190829003018183876161da5a03f115610002575050600580546007805473ffffffffffffffffffffffffffffffffffffffff19908116600160a060020a038416179091551633179055505b6007805460a060020a60ff02191660a060020a87810291909117918290556008544301600955900460ff1615610df757600a54610e039061029e565b600a54610e0b90610367565b600c55610e0f565b600c555b600c54670de0b6b3a7640000850204600d55600754600554604080517f759297bb000000000000000000000000000000000000000000000000000000008152600160a060020a039384166004820152918316602483015260448201879052519184169163759297bb91606481810192600092909190829003018183876161da5a03f11561000257505060408051600754600a54600d54600554600c5460a060020a850460ff161515865260208601929092528486019290925260608401529251600160a060020a0391821694509281169230909116917f3b3d1986083d191be01d28623dc19604728e29ae28bdb9ba52757fdee1a18de2919081900360800190a45050505050565b600954431015610f2657610002565b6001805460a060020a900460ff1614610f3e57610002565b6001805460a060020a60ff0219167402000000000000000000000000000000000000000017908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919750600160a060020a038816925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604051516007549095506000945060a060020a900460ff1615905061105c57600a5484111561105757600a54600d54670de0b6b3a7640000918603020492505b61107e565b600a5484101561107e57600a54600d54670de0b6b3a764000091869003020492505b60065483111561108e5760065492505b6006548390039150600083111561111857604080516005546007547f5928d37f000000000000000000000000000000000000000000000000000000008352600160a060020a0391821660048401528116602483015260448201869052915191871691635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505b600082111561117a576040805160055460e060020a63599efa6b028252600160a060020a0390811660048301526024820185905291519187169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050505b6040805185815260208101849052808201859052905130600160a060020a0316917f89e690b1d5aaae14f3e85f108dc92d9ab3763a58d45aed8b59daedbbae8fe794919081900360600190a260008311156112285784600160a060020a0316634cc927d785336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611282565b84600160a060020a0316634cc927d7600a60005054336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f115610002575050505b600054600160a060020a0316ff5b60156001820160ff166006811015610002578101549060ff8316600681101561000257015411156112c057610002565b60010161055e565b60015460a060020a900460ff166000146112e157610002565b600254600a0143116112f257610002565b6001546040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f11561000257505060408051805160055460065460e060020a63599efa6b028452600160a060020a03918216600485015260248401529251909450918416925063599efa6b916044808301926000929190829003018183876161da5a03f1156100025750505080600160a060020a0316632b68bb2d6040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050600054600160a060020a03169050ff5b6001546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602480830192602092919082900301816000876161da5a03f11561000257505060405151151590506106a85761000256\",\n        \"nonce\": \"16\",\n        \"storage\": {\n          \"0x0000000000000000000000000000000000000000000000000000000000000002\": \"0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396\"\n        }\n      },\n      \"0x70c9217d814985faef62b124420f8dfbddd96433\": {\n        \"balance\": \"0x4ef436dcbda6cd4a\",\n        \"code\": \"0x\",\n        \"nonce\": \"1634\",\n        \"storage\": {}\n      },\n      \"0x7986bad81f4cbd9317f5a46861437dae58d69113\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x6060604052361561008d5760e060020a600035046302d05d3f811461009557806316c66cc6146100a75780631ab9075a146100d7578063213fe2b7146101125780639859387b1461013f578063988db79c1461015e578063a7f4377914610180578063b9858a281461019e578063c8e40fbf146101c0578063f4f2821b146101e8578063f905c15a14610209575b610212610002565b610214600054600160a060020a031681565b600160a060020a0360043581811660009081526005602052604081205461023193168114610257575060016101e3565b610212600435600254600160a060020a0316600014801590610108575060025433600160a060020a03908116911614155b1561025f57610002565b610214600435600160a060020a03811660009081526004602052604081205460ff16151561027557610002565b610212600435600254600160a060020a03166000141561029b57610002565b610212600435602435600254600160a060020a03166000141561050457610002565b61021260025433600160a060020a0390811691161461056757610002565b610212600435602435600254600160a060020a03166000141561057557610002565b610231600435600160a060020a03811660009081526004602052604090205460ff165b919050565b610212600435600254600090600160a060020a031681141561072057610002565b61024560035481565b005b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060006101e3565b60028054600160a060020a031916821790555b50565b50600160a060020a038181166000908152600460205260409020546101009004166101e3565b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f11561000257505060405151151590506102fe57610002565b600160a060020a03811660009081526004602052604090205460ff161515610272576040516104028061092e833901809050604051809103906000f06004600050600083600160a060020a0316815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555060016004600050600083600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555050565b600160a060020a03821660009081526004602052604090205460ff1615156104725760405161040280610d30833901809050604051809103906000f06004600050600084600160a060020a0316815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555060016004600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff021916908302179055505b600160a060020a03828116600090815260046020819052604080518184205460e060020a630a3b0a4f02825286861693820193909352905161010090920490931692630a3b0a4f926024828101939192829003018183876161da5a03f11561000257505050600160a060020a03811660009081526006602052604090208054600160a060020a031916831790555b5050565b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f11561000257505060405151151590506103b957610002565b600254600160a060020a0316ff5b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f11561000257505060405151151590506105d857610002565b600160a060020a03821660009081526004602052604090205460ff1615156106915760405161040280611132833901809050604051809103906000f06004600050600084600160a060020a0316815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555060016004600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff021916908302179055505b600160a060020a03828116600090815260046020819052604080518184205460e060020a630a3b0a4f02825286861693820193909352905161010090920490931692630a3b0a4f926024828101939192829003018183876161da5a03f11561000257505050600160a060020a031660009081526005602052604090208054600160a060020a0319169091179055565b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f115610002575050604051511515905061078357610002565b50600160a060020a0381811660009081526005602090815260408083205490931680835260049091529190205460ff161561080f576040600081812054825160e260020a632e72bafd028152600160a060020a03868116600483015293516101009092049093169263b9caebf4926024828101939192829003018183876161da5a03f115610002575050505b600160a060020a03828116600090815260056020526040812054909116146108545760406000908120600160a060020a0384169091528054600160a060020a03191690555b50600160a060020a0381811660009081526006602090815260408083205490931680835260049091529190205460ff16156108e657600160a060020a038181166000908152604080518183205460e260020a632e72bafd028252868516600483015291516101009092049093169263b9caebf4926024828101939192829003018183876161da5a03f115610002575050505b600160a060020a03828116600090815260066020526040812054909116146105005760406000908120600160a060020a0384169091528054600160a060020a0319169055505056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056\",\n        \"nonce\": \"7\",\n        \"storage\": {\n          \"0xffc4df2d4f3d2cffad590bed6296406ab7926ca9e74784f74a95191fa069a174\": \"0x00000000000000000000000070c9217d814985faef62b124420f8dfbddd96433\"\n        }\n      },\n      \"0xb4fe7aa695b326c9d219158d2ca50db77b39f99f\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x606060405236156100ae5760e060020a600035046302d05d3f81146100b65780631ab9075a146100c85780632b68bb2d146101035780634cc927d7146101c557806351a34eb81461028e57806356ccb6f0146103545780635928d37f1461041d578063599efa6b146104e9578063759297bb146105b2578063771d50e11461067e578063a7f4377914610740578063f905c15a1461075e578063f92eb77414610767578063febf661214610836575b610902610002565b610904600054600160a060020a031681565b610902600435600254600160a060020a03166000148015906100f9575060025433600160a060020a03908116911614155b1561092057610002565b60025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b02606452610902916000918291600160a060020a03169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051511515905061094257610002565b61090260043560243560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610a0d57610002565b61090260043560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610ae957610002565b61090260043560243560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610bbc57610002565b61090260043560243560443560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610c9657610002565b61090260043560243560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610de057610002565b61090260043560243560443560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610ebb57610002565b60025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b02606452610902916000918291600160a060020a03169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f9e57610002565b61090260025433600160a060020a0390811691161461106957610002565b61090e60035481565b61090e60043560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750506040805180517ff92eb774000000000000000000000000000000000000000000000000000000008252600482018790529151919350600160a060020a038416925063f92eb774916024828101926020929190829003018188876161da5a03f11561000257505060405151949350505050565b61090260043560243560443560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051511515905061107757610002565b005b6060908152602090f35b60408051918252519081900360200190f35b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f5ed61af000000000000000000000000000000000000000000000000000000000825233600160a060020a039081166004840152925190959286169350635ed61af092602483810193919291829003018183876161da5a03f115610002575050505050565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517fab03fc2600000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015260248301899052808816604484015292519095928616935063ab03fc2692606483810193919291829003018183876161da5a03f1156100025750505050505050565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f949ae47900000000000000000000000000000000000000000000000000000000825233600160a060020a0390811660048401526024830188905292519095928616935063949ae47992604483810193919291829003018183876161da5a03f11561000257505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f46d88e7d000000000000000000000000000000000000000000000000000000008252600160a060020a0380891660048401523381166024840152604483018890529251909592861693506346d88e7d92606483810193919291829003018183876161da5a03f1156100025750505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f5315cdde00000000000000000000000000000000000000000000000000000000825233600160a060020a039081166004840152808a16602484015260448301889052925190959286169350635315cdde92606483810193919291829003018183876161da5a03f115610002575050604080517f5928d37f00000000000000000000000000000000000000000000000000000000815233600160a060020a03908116600483015287166024820152604481018690529051635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517fe68e401c00000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015280891660248401526044830188905292519095928616935063e68e401c92606483810193919291829003018183876161da5a03f1156100025750505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f5152f381000000000000000000000000000000000000000000000000000000008252600160a060020a03808a1660048401528089166024840152604483018890523381166064840152925190959286169350635152f38192608483810193919291829003018183876161da5a03f115610002575050505050505050565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f056d447000000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015292519095928616935063056d447092602483810193919291829003018183876161da5a03f115610002575050505050565b600254600160a060020a0316ff5b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f3ae1005c00000000000000000000000000000000000000000000000000000000825233600160a060020a039081166004840152808a166024840152808916604484015260648301889052925190959286169350633ae1005c92608483810193919291829003018183876161da5a03f11561000257505050505050505056\",\n        \"nonce\": \"1\",\n        \"storage\": {\n          \"0x0000000000000000000000000000000000000000000000000000000000000002\": \"0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396\"\n        }\n      },\n      \"0xc212e03b9e060e36facad5fd8f4435412ca22e6b\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x606060405236156101745760e060020a600035046302d05d3f811461017c57806304a7fdbc1461018e5780630e90f957146101fb5780630fb5a6b41461021257806314baa1b61461021b57806317fc45e21461023a5780632b096926146102435780632e94420f1461025b578063325a19f11461026457806336da44681461026d5780633f81a2c01461027f5780633fc306821461029757806345ecd3d7146102d45780634665096d146102dd5780634e71d92d146102e657806351a34eb8146103085780636111bb951461032d5780636f265b93146103445780637e9014e11461034d57806390ba009114610360578063927df5e014610393578063a7f437791461046c578063ad8f50081461046e578063bc6d909414610477578063bdec3ad114610557578063c19d93fb1461059a578063c9503fe2146105ad578063e0a73a93146105b6578063ea71b02d146105bf578063ea8a1af0146105d1578063ee4a96f9146105f3578063f1ff78a01461065c575b61046c610002565b610665600054600160a060020a031681565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600554600090600160a060020a0390811633909116146106a857610002565b61068260015460a060020a900460ff166000145b90565b61069660085481565b61046c600435600154600160a060020a03166000141561072157610002565b610696600d5481565b610696600435600f8160068110156100025750015481565b61069660045481565b61069660035481565b610665600554600160a060020a031681565b61069660043560158160068110156100025750015481565b6106966004355b600b54600f5460009160028202808203928083039290810191018386101561078357601054840186900394505b50505050919050565b61069660025481565b61069660095481565b61046c600554600090600160a060020a03908116339091161461085857610002565b61046c600435600554600090600160a060020a03908116339091161461092e57610002565b6106826001805460a060020a900460ff161461020f565b610696600b5481565b61068260075460a060020a900460ff1681565b6106966004355b600b54601554600091600282028082039280830392908101910183861015610a6c5760165494506102cb565b61046c6004356024356044356040805160015460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b02600483015291516000928392600160a060020a03919091169163e16c7d9891602481810192602092909190829003018187876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610b4657610002565b005b610696600a5481565b61046c60006000600060006000600160009054906101000a9004600160a060020a0316600160a060020a031663e16c7d986040518160e060020a028152600401808060b260020a691858d8dbdd5b9d18dd1b0281526020015060200190506020604051808303816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f1757610002565b61046c5b60015b60058160ff16101561071e57600f6001820160ff166006811015610002578101549060ff83166006811015610002570154101561129057610002565b61069660015460a060020a900460ff1681565b61069660065481565b610696600c5481565b610665600754600160a060020a031681565b61046c600554600090600160a060020a0390811633909116146112c857610002565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600154600090600160a060020a03168114156113fb57610002565b610696600e5481565b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060005b60068160ff16101561070857828160ff166006811015610002576020020151600f60ff831660068110156100025701558160ff82166006811015610002576020020151601560ff831660068110156100025701556001016106ac565b61071061055b565b505050565b600e8054820190555b50565b6040805160015460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061071557610002565b83861015801561079257508286105b156107b457600f546010546011548689039082030291909104900394506102cb565b8286101580156107c55750600b5486105b156107e757600f546011546012548589039082030291909104900394506102cb565b600b5486108015906107f857508186105b1561081d57600b54600f546012546013549289039281039290920204900394506102cb565b81861015801561082c57508086105b1561084e57600f546013546014548489039082030291909104900394506102cb565b60145494506102cb565b60015460a060020a900460ff1660001461087157610002565b600254600a01431161088257610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663771d50e16040518160e060020a0281526004018090506000604051808303816000876161da5a03f1156100025750505050565b60015460a060020a900460ff1660001461094757610002565b600254600a01431161095857610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180517f51a34eb8000000000000000000000000000000000000000000000000000000008252600482018690529151919350600160a060020a03841692506351a34eb8916024808301926000929190829003018183876161da5a03f11561000257505050600b8290554360025560408051838152905130600160a060020a0316917fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff919081900360200190a25050565b838610158015610a7b57508286105b15610a9d576015546016546017548689039082900302919091040194506102cb565b828610158015610aae5750600b5486105b15610ad0576015546017546018548589039082900302919091040194506102cb565b600b548610801590610ae157508186105b15610b0657600b546015546018546019549289039281900392909202040194506102cb565b818610158015610b1557508086105b15610b3757601554601954601a548489039082900302919091040194506102cb565b601a54860181900394506102cb565b60015460a060020a900460ff16600014610b5f57610002565b6001805460a060020a60ff02191660a060020a17908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919450600160a060020a038516925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604080518051600a556005547ffebf661200000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015216602482015260448101879052905163febf661291606480820192600092909190829003018183876161da5a03f115610002575050508215610cc7576007805473ffffffffffffffffffffffffffffffffffffffff191633179055610dbb565b6040805160055460065460e060020a63599efa6b028352600160a060020a039182166004840152602483015291519184169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050604080516006547f56ccb6f000000000000000000000000000000000000000000000000000000000825233600160a060020a03166004830152602482015290516356ccb6f091604480820192600092909190829003018183876161da5a03f115610002575050600580546007805473ffffffffffffffffffffffffffffffffffffffff19908116600160a060020a038416179091551633179055505b6007805460a060020a60ff02191660a060020a87810291909117918290556008544301600955900460ff1615610df757600a54610e039061029e565b600a54610e0b90610367565b600c55610e0f565b600c555b600c54670de0b6b3a7640000850204600d55600754600554604080517f759297bb000000000000000000000000000000000000000000000000000000008152600160a060020a039384166004820152918316602483015260448201879052519184169163759297bb91606481810192600092909190829003018183876161da5a03f11561000257505060408051600754600a54600d54600554600c5460a060020a850460ff161515865260208601929092528486019290925260608401529251600160a060020a0391821694509281169230909116917f3b3d1986083d191be01d28623dc19604728e29ae28bdb9ba52757fdee1a18de2919081900360800190a45050505050565b600954431015610f2657610002565b6001805460a060020a900460ff1614610f3e57610002565b6001805460a060020a60ff0219167402000000000000000000000000000000000000000017908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919750600160a060020a038816925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604051516007549095506000945060a060020a900460ff1615905061105c57600a5484111561105757600a54600d54670de0b6b3a7640000918603020492505b61107e565b600a5484101561107e57600a54600d54670de0b6b3a764000091869003020492505b60065483111561108e5760065492505b6006548390039150600083111561111857604080516005546007547f5928d37f000000000000000000000000000000000000000000000000000000008352600160a060020a0391821660048401528116602483015260448201869052915191871691635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505b600082111561117a576040805160055460e060020a63599efa6b028252600160a060020a0390811660048301526024820185905291519187169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050505b6040805185815260208101849052808201859052905130600160a060020a0316917f89e690b1d5aaae14f3e85f108dc92d9ab3763a58d45aed8b59daedbbae8fe794919081900360600190a260008311156112285784600160a060020a0316634cc927d785336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611282565b84600160a060020a0316634cc927d7600a60005054336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f115610002575050505b600054600160a060020a0316ff5b60156001820160ff166006811015610002578101549060ff8316600681101561000257015411156112c057610002565b60010161055e565b60015460a060020a900460ff166000146112e157610002565b600254600a0143116112f257610002565b6001546040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f11561000257505060408051805160055460065460e060020a63599efa6b028452600160a060020a03918216600485015260248401529251909450918416925063599efa6b916044808301926000929190829003018183876161da5a03f1156100025750505080600160a060020a0316632b68bb2d6040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050600054600160a060020a03169050ff5b6001546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602480830192602092919082900301816000876161da5a03f11561000257505060405151151590506106a85761000256\",\n        \"nonce\": \"1\",\n        \"storage\": {\n          \"0x0000000000000000000000000000000000000000000000000000000000000001\": \"0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396\",\n          \"0x0000000000000000000000000000000000000000000000000000000000000002\": \"0x0000000000000000000000000000000000000000000000000000000000006195\",\n          \"0x0000000000000000000000000000000000000000000000000000000000000004\": \"0x5842545553440000000000000000000000000000000000000000000000000000\",\n          \"0x0000000000000000000000000000000000000000000000000000000000000005\": \"0x00000000000000000000000070c9217d814985faef62b124420f8dfbddd96433\",\n          \"0x0000000000000000000000000000000000000000000000000000000000000006\": \"0x0000000000000000000000000000000000000000000000008ac7230489e80000\",\n          \"0x000000000000000000000000000000000000000000000000000000000000000b\": \"0x0000000000000000000000000000000000000000000000283c7b9181eca20000\"\n        }\n      },\n      \"0xcf00ffd997ad14939736f026006498e3f099baaf\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x606060405236156100cf5760e060020a600035046302d05d3f81146100d7578063031e7f5d146100e95780631ab9075a1461010b5780632243118a1461014657806327aad68a1461016557806338a699a4146101da5780635188f996146101f8578063581d5d601461021e57806381738c5914610246578063977da54014610269578063a07421ce14610288578063a7f43779146102be578063bdbdb086146102dc578063e1c7111914610303578063f4f2821b14610325578063f905c15a1461034a578063f92eb77414610353575b610387610002565b610389600054600160a060020a031681565b610387600435602435600254600160a060020a0316600014156103a857610002565b610387600435600254600160a060020a031660001480159061013c575060025433600160a060020a03908116911614155b1561042957610002565b610387600435600254600160a060020a03166000141561044b57610002565b6102ac60043560008181526004602081815260408320547f524d81d3000000000000000000000000000000000000000000000000000000006060908152610100909104600160a060020a031692839263524d81d3926064928188876161da5a03f1156100025750506040515192506103819050565b61039c60043560008181526004602052604090205460ff165b919050565b6103876004356024356002546000908190600160a060020a031681141561079457610002565b61038760043560243560025460009081908190600160a060020a031681141561080457610002565b61038960043560008181526004602052604081205460ff1615156109e357610002565b610387600435600254600160a060020a0316600014156109fb57610002565b600435600090815260096020526040902054670de0b6b3a764000090810360243502045b60408051918252519081900360200190f35b61038760025433600160a060020a03908116911614610a9257610002565b600435600090815260086020526040902054670de0b6b3a7640000602435909102046102ac565b610387600435602435600254600160a060020a031660001415610aa057610002565b61038760043560025460009081908190600160a060020a0316811415610b3657610002565b6102ac60035481565b6102ac600435600081815260076020908152604080832054600690925290912054670de0b6b3a76400000204805b50919050565b005b600160a060020a03166060908152602090f35b15156060908152602090f35b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f11561000257505060405151151590506103fe57610002565b60008281526004602052604090205460ff16151561041b57610002565b600860205260406000205550565b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f11561000257505060405151151590506104a157610002565b604080516000838152600460205291909120805460ff1916600117905561040280610de2833901809050604051809103906000f0600460005060008360001916815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555066470de4df8200006008600050600083600019168152602001908152602001600020600050819055506703782dace9d9000060096000506000836000191681526020019081526020016000206000508190555050565b600460005060008560001916815260200190815260200160002060005060000160019054906101000a9004600160a060020a0316915081600160a060020a031663524d81d36040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151821415905061060057838152600660209081526040808320839055600790915281208190555b81600160a060020a0316630a3b0a4f846040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050600160a060020a038316808252600560209081526040808420879055805160e160020a6364a81ff102815290518694670de0b6b3a7640000949363c9503fe29360048181019492939183900301908290876161da5a03f11561000257505060408051805160e060020a636f265b930282529151919291636f265b939160048181019260209290919082900301816000876161da5a03f11561000257505050604051805190602001500204600660005060008660001916815260200190815260200160002060008282825054019250508190555080600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050506040518051906020015060076000506000866000191681526020019081526020016000206000828282505401925050819055505b50505050565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b9060849060209060248187876161da5a03f11561000257505060405151151590506107e957610002565b8381526004602052604081205460ff16151561056657610002565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b9060849060209060248187876161da5a03f115610002575050604051511515905061085957610002565b849250670de0b6b3a764000083600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575060408051805160e160020a6364a81ff102825291519189028590049650600481810192602092909190829003018188876161da5a03f11561000257505060408051805160e060020a636f265b930282529151919291636f265b9391600481810192602092909190829003018189876161da5a03f115610002575050506040518051906020015002049050806006600050600085600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750604080518051855260208681528286208054989098039097557f2e94420f00000000000000000000000000000000000000000000000000000000815290518896600483810193919291829003018187876161da5a03f115610002575050604080515183526020939093525020805490910190555050505050565b60409020546101009004600160a060020a03166101f3565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f1156100025750506040515115159050610a5157610002565b60008181526004602052604090205460ff161515610a6e57610002565b6040600020805474ffffffffffffffffffffffffffffffffffffffffff1916905550565b600254600160a060020a0316ff5b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f1156100025750506040515115159050610af657610002565b60008281526004602052604090205460ff161515610b1357610002565b670de0b6b3a7640000811115610b2857610002565b600960205260406000205550565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b9060849060209060248187876161da5a03f1156100025750506040515115159050610b8b57610002565b600160a060020a038416815260056020908152604080832054808452600490925282205490935060ff161515610bc057610002565b600460005060008460001916815260200190815260200160002060005060000160019054906101000a9004600160a060020a0316915081600160a060020a031663b9caebf4856040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f115610002575050506005600050600085600160a060020a0316815260200190815260200160002060005060009055839050600082600160a060020a031663524d81d36040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519190911115905061078e57670de0b6b3a764000081600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e060020a636f265b930282529151919291636f265b939160048181019260209290919082900301816000876161da5a03f11561000257505050604051805190602001500204600660005060008560001916815260200190815260200160002060008282825054039250508190555080600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050506040518051906020015060076000506000856000191681526020019081526020016000206000828282505403925050819055505050505056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056\",\n        \"nonce\": \"3\",\n        \"storage\": {\n          \"0x0000000000000000000000000000000000000000000000000000000000000002\": \"0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396\",\n          \"0x3571d73f14f31a1463bd0a2f92f7fde1653d4e1ead7aedf4b0a5df02f16092ab\": \"0x0000000000000000000000000000000000000000000007d634e4c55188be0000\",\n          \"0x4e64fe2d1b72d95a0a31945cc6e4f4e524ac5ad56d6bd44a85ec7bc9cc0462c0\": \"0x000000000000000000000000000000000000000000000002b5e3af16b1880000\"\n        }\n      }\n    },\n    \"config\": {\n      \"byzantiumBlock\": 1700000,\n      \"chainId\": 3,\n      \"daoForkSupport\": true,\n      \"eip150Block\": 0,\n      \"eip150Hash\": \"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d\",\n      \"eip155Block\": 10,\n      \"eip158Block\": 10,\n      \"ethash\": {},\n      \"homesteadBlock\": 0\n    },\n    \"difficulty\": \"117124093\",\n    \"extraData\": \"0xd5830105008650617269747986312e31322e31826d61\",\n    \"gasLimit\": \"4707788\",\n    \"hash\": \"0xad325e4c49145fb7a4058a68ac741cc8607a71114e23fc88083c7e881dd653e7\",\n    \"miner\": \"0x00714b9ac97fd6bd9325a059a70c9b9fa94ce050\",\n    \"mixHash\": \"0x0af918f65cb4af04b608fc1f14a849707696986a0e7049e97ef3981808bcc65f\",\n    \"nonce\": \"0x38dee147326a8d40\",\n    \"number\": \"25000\",\n    \"stateRoot\": \"0xc5d6bbcd46236fcdcc80b332ffaaa5476b980b01608f9708408cfef01b58bd5b\",\n    \"timestamp\": \"1479891517\",\n    \"totalDifficulty\": \"1895410389427\"\n  },\n  \"input\": \"0xf88b8206628504a817c8008303d09094c212e03b9e060e36facad5fd8f4435412ca22e6b80a451a34eb80000000000000000000000000000000000000000000000280faf689c35ac00002aa0a7ee5b7877811bf671d121b40569462e722657044808dc1d6c4f1e4233ec145ba0417e7543d52b65738d9df419cbe40a708424f4d54b0fc145c0a64545a2bb1065\",\n  \"result\": {\n    \"calls\": [\n      {\n        \"from\": \"0xc212e03b9e060e36facad5fd8f4435412ca22e6b\",\n        \"gas\": \"0x31217\",\n        \"gasUsed\": \"0x334\",\n        \"input\": \"0xe16c7d98636f6e7472616374617069000000000000000000000000000000000000000000\",\n        \"output\": \"0x000000000000000000000000b4fe7aa695b326c9d219158d2ca50db77b39f99f\",\n        \"to\": \"0x2cccf5e0538493c235d1c5ef6580f77d99e91396\",\n        \"type\": \"CALL\",\n        \"value\": \"0x0\"\n      },\n      {\n        \"calls\": [\n          {\n            \"from\": \"0xb4fe7aa695b326c9d219158d2ca50db77b39f99f\",\n            \"gas\": \"0x2a68d\",\n            \"gasUsed\": \"0x334\",\n            \"input\": \"0xe16c7d98636f6e747261637463746c000000000000000000000000000000000000000000\",\n            \"output\": \"0x0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690\",\n            \"to\": \"0x2cccf5e0538493c235d1c5ef6580f77d99e91396\",\n            \"type\": \"CALL\",\n            \"value\": \"0x0\"\n          },\n          {\n            \"calls\": [\n              {\n                \"from\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n                \"gas\": \"0x23ac9\",\n                \"gasUsed\": \"0x334\",\n                \"input\": \"0xe16c7d98636f6e7472616374646200000000000000000000000000000000000000000000\",\n                \"output\": \"0x0000000000000000000000007986bad81f4cbd9317f5a46861437dae58d69113\",\n                \"to\": \"0x2cccf5e0538493c235d1c5ef6580f77d99e91396\",\n                \"type\": \"CALL\",\n                \"value\": \"0x0\"\n              },\n              {\n                \"from\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n                \"gas\": \"0x23366\",\n                \"gasUsed\": \"0x273\",\n                \"input\": \"0x16c66cc6000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b\",\n                \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\",\n                \"to\": \"0x7986bad81f4cbd9317f5a46861437dae58d69113\",\n                \"type\": \"CALL\",\n                \"value\": \"0x0\"\n              }\n            ],\n            \"from\": \"0xb4fe7aa695b326c9d219158d2ca50db77b39f99f\",\n            \"gas\": \"0x29f35\",\n            \"gasUsed\": \"0xf8d\",\n            \"input\": \"0x16c66cc6000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b\",\n            \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\",\n            \"to\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n            \"type\": \"CALL\",\n            \"value\": \"0x0\"\n          },\n          {\n            \"from\": \"0xb4fe7aa695b326c9d219158d2ca50db77b39f99f\",\n            \"gas\": \"0x28a9e\",\n            \"gasUsed\": \"0x334\",\n            \"input\": \"0xe16c7d98636f6e747261637463746c000000000000000000000000000000000000000000\",\n            \"output\": \"0x0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690\",\n            \"to\": \"0x2cccf5e0538493c235d1c5ef6580f77d99e91396\",\n            \"type\": \"CALL\",\n            \"value\": \"0x0\"\n          },\n          {\n            \"calls\": [\n              {\n                \"from\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n                \"gas\": \"0x21d79\",\n                \"gasUsed\": \"0x24d\",\n                \"input\": \"0x13bc6d4b000000000000000000000000b4fe7aa695b326c9d219158d2ca50db77b39f99f\",\n                \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\",\n                \"to\": \"0x2cccf5e0538493c235d1c5ef6580f77d99e91396\",\n                \"type\": \"CALL\",\n                \"value\": \"0x0\"\n              },\n              {\n                \"from\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n                \"gas\": \"0x2165b\",\n                \"gasUsed\": \"0x334\",\n                \"input\": \"0xe16c7d986d61726b65746462000000000000000000000000000000000000000000000000\",\n                \"output\": \"0x000000000000000000000000cf00ffd997ad14939736f026006498e3f099baaf\",\n                \"to\": \"0x2cccf5e0538493c235d1c5ef6580f77d99e91396\",\n                \"type\": \"CALL\",\n                \"value\": \"0x0\"\n              },\n              {\n                \"calls\": [\n                  {\n                    \"from\": \"0xcf00ffd997ad14939736f026006498e3f099baaf\",\n                    \"gas\": \"0x1a8e8\",\n                    \"gasUsed\": \"0x24d\",\n                    \"input\": \"0x13bc6d4b0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690\",\n                    \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\",\n                    \"to\": \"0x2cccf5e0538493c235d1c5ef6580f77d99e91396\",\n                    \"type\": \"CALL\",\n                    \"value\": \"0x0\"\n                  },\n                  {\n                    \"from\": \"0xcf00ffd997ad14939736f026006498e3f099baaf\",\n                    \"gas\": \"0x1a2c6\",\n                    \"gasUsed\": \"0x3cb\",\n                    \"input\": \"0xc9503fe2\",\n                    \"output\": \"0x0000000000000000000000000000000000000000000000008ac7230489e80000\",\n                    \"to\": \"0xc212e03b9e060e36facad5fd8f4435412ca22e6b\",\n                    \"type\": \"CALL\",\n                    \"value\": \"0x0\"\n                  },\n                  {\n                    \"from\": \"0xcf00ffd997ad14939736f026006498e3f099baaf\",\n                    \"gas\": \"0x19b72\",\n                    \"gasUsed\": \"0x3cb\",\n                    \"input\": \"0xc9503fe2\",\n                    \"output\": \"0x0000000000000000000000000000000000000000000000008ac7230489e80000\",\n                    \"to\": \"0xc212e03b9e060e36facad5fd8f4435412ca22e6b\",\n                    \"type\": \"CALL\",\n                    \"value\": \"0x0\"\n                  },\n                  {\n                    \"from\": \"0xcf00ffd997ad14939736f026006498e3f099baaf\",\n                    \"gas\": \"0x19428\",\n                    \"gasUsed\": \"0x305\",\n                    \"input\": \"0x6f265b93\",\n                    \"output\": \"0x0000000000000000000000000000000000000000000000283c7b9181eca20000\",\n                    \"to\": \"0xc212e03b9e060e36facad5fd8f4435412ca22e6b\",\n                    \"type\": \"CALL\",\n                    \"value\": \"0x0\"\n                  },\n                  {\n                    \"from\": \"0xcf00ffd997ad14939736f026006498e3f099baaf\",\n                    \"gas\": \"0x18d45\",\n                    \"gasUsed\": \"0x229\",\n                    \"input\": \"0x2e94420f\",\n                    \"output\": \"0x5842545553440000000000000000000000000000000000000000000000000000\",\n                    \"to\": \"0xc212e03b9e060e36facad5fd8f4435412ca22e6b\",\n                    \"type\": \"CALL\",\n                    \"value\": \"0x0\"\n                  },\n                  {\n                    \"from\": \"0xcf00ffd997ad14939736f026006498e3f099baaf\",\n                    \"gas\": \"0x1734e\",\n                    \"gasUsed\": \"0x229\",\n                    \"input\": \"0x2e94420f\",\n                    \"output\": \"0x5842545553440000000000000000000000000000000000000000000000000000\",\n                    \"to\": \"0xc212e03b9e060e36facad5fd8f4435412ca22e6b\",\n                    \"type\": \"CALL\",\n                    \"value\": \"0x0\"\n                  }\n                ],\n                \"from\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n                \"gas\": \"0x20ee1\",\n                \"gasUsed\": \"0x5374\",\n                \"input\": \"0x581d5d60000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b0000000000000000000000000000000000000000000000280faf689c35ac0000\",\n                \"output\": \"0x\",\n                \"to\": \"0xcf00ffd997ad14939736f026006498e3f099baaf\",\n                \"type\": \"CALL\",\n                \"value\": \"0x0\"\n              },\n              {\n                \"from\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n                \"gas\": \"0x1b6c1\",\n                \"gasUsed\": \"0x334\",\n                \"input\": \"0xe16c7d986c6f676d67720000000000000000000000000000000000000000000000000000\",\n                \"output\": \"0x0000000000000000000000002a98c5f40bfa3dee83431103c535f6fae9a8ad38\",\n                \"to\": \"0x2cccf5e0538493c235d1c5ef6580f77d99e91396\",\n                \"type\": \"CALL\",\n                \"value\": \"0x0\"\n              },\n              {\n                \"from\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n                \"gas\": \"0x1af69\",\n                \"gasUsed\": \"0x229\",\n                \"input\": \"0x2e94420f\",\n                \"output\": \"0x5842545553440000000000000000000000000000000000000000000000000000\",\n                \"to\": \"0xc212e03b9e060e36facad5fd8f4435412ca22e6b\",\n                \"type\": \"CALL\",\n                \"value\": \"0x0\"\n              },\n              {\n                \"calls\": [\n                  {\n                    \"from\": \"0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38\",\n                    \"gas\": \"0x143a5\",\n                    \"gasUsed\": \"0x24d\",\n                    \"input\": \"0x13bc6d4b0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690\",\n                    \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\",\n                    \"to\": \"0x2cccf5e0538493c235d1c5ef6580f77d99e91396\",\n                    \"type\": \"CALL\",\n                    \"value\": \"0x0\"\n                  }\n                ],\n                \"from\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n                \"gas\": \"0x1a91d\",\n                \"gasUsed\": \"0x12fa\",\n                \"input\": \"0x0accce0600000000000000000000000000000000000000000000000000000000000000025842545553440000000000000000000000000000000000000000000000000000000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\n                \"output\": \"0x\",\n                \"to\": \"0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38\",\n                \"type\": \"CALL\",\n                \"value\": \"0x0\"\n              },\n              {\n                \"from\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n                \"gas\": \"0x19177\",\n                \"gasUsed\": \"0x334\",\n                \"input\": \"0xe16c7d986c6f676d67720000000000000000000000000000000000000000000000000000\",\n                \"output\": \"0x0000000000000000000000002a98c5f40bfa3dee83431103c535f6fae9a8ad38\",\n                \"to\": \"0x2cccf5e0538493c235d1c5ef6580f77d99e91396\",\n                \"type\": \"CALL\",\n                \"value\": \"0x0\"\n              },\n              {\n                \"from\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n                \"gas\": \"0x18a22\",\n                \"gasUsed\": \"0x229\",\n                \"input\": \"0x2e94420f\",\n                \"output\": \"0x5842545553440000000000000000000000000000000000000000000000000000\",\n                \"to\": \"0xc212e03b9e060e36facad5fd8f4435412ca22e6b\",\n                \"type\": \"CALL\",\n                \"value\": \"0x0\"\n              },\n              {\n                \"from\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n                \"gas\": \"0x18341\",\n                \"gasUsed\": \"0x334\",\n                \"input\": \"0xe16c7d986d61726b65746462000000000000000000000000000000000000000000000000\",\n                \"output\": \"0x000000000000000000000000cf00ffd997ad14939736f026006498e3f099baaf\",\n                \"to\": \"0x2cccf5e0538493c235d1c5ef6580f77d99e91396\",\n                \"type\": \"CALL\",\n                \"value\": \"0x0\"\n              },\n              {\n                \"from\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n                \"gas\": \"0x17bec\",\n                \"gasUsed\": \"0x229\",\n                \"input\": \"0x2e94420f\",\n                \"output\": \"0x5842545553440000000000000000000000000000000000000000000000000000\",\n                \"to\": \"0xc212e03b9e060e36facad5fd8f4435412ca22e6b\",\n                \"type\": \"CALL\",\n                \"value\": \"0x0\"\n              },\n              {\n                \"from\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n                \"gas\": \"0x1764e\",\n                \"gasUsed\": \"0x45c\",\n                \"input\": \"0xf92eb7745842545553440000000000000000000000000000000000000000000000000000\",\n                \"output\": \"0x00000000000000000000000000000000000000000000002816d180e30c390000\",\n                \"to\": \"0xcf00ffd997ad14939736f026006498e3f099baaf\",\n                \"type\": \"CALL\",\n                \"value\": \"0x0\"\n              },\n              {\n                \"calls\": [\n                  {\n                    \"from\": \"0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38\",\n                    \"gas\": \"0x108ba\",\n                    \"gasUsed\": \"0x24d\",\n                    \"input\": \"0x13bc6d4b0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690\",\n                    \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\",\n                    \"to\": \"0x2cccf5e0538493c235d1c5ef6580f77d99e91396\",\n                    \"type\": \"CALL\",\n                    \"value\": \"0x0\"\n                  }\n                ],\n                \"from\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n                \"gas\": \"0x16e62\",\n                \"gasUsed\": \"0xebb\",\n                \"input\": \"0x645a3b72584254555344000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002816d180e30c390000\",\n                \"output\": \"0x\",\n                \"to\": \"0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38\",\n                \"type\": \"CALL\",\n                \"value\": \"0x0\"\n              }\n            ],\n            \"from\": \"0xb4fe7aa695b326c9d219158d2ca50db77b39f99f\",\n            \"gas\": \"0x283b9\",\n            \"gasUsed\": \"0xc51c\",\n            \"input\": \"0x949ae479000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b0000000000000000000000000000000000000000000000280faf689c35ac0000\",\n            \"output\": \"0x\",\n            \"to\": \"0x3e9286eafa2db8101246c2131c09b49080d00690\",\n            \"type\": \"CALL\",\n            \"value\": \"0x0\"\n          }\n        ],\n        \"from\": \"0xc212e03b9e060e36facad5fd8f4435412ca22e6b\",\n        \"gas\": \"0x30b4a\",\n        \"gasUsed\": \"0xedb7\",\n        \"input\": \"0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000\",\n        \"output\": \"0x\",\n        \"to\": \"0xb4fe7aa695b326c9d219158d2ca50db77b39f99f\",\n        \"type\": \"CALL\",\n        \"value\": \"0x0\"\n      }\n    ],\n    \"from\": \"0x70c9217d814985faef62b124420f8dfbddd96433\",\n    \"gas\": \"0x37b38\",\n    \"gasUsed\": \"0x12bb3\",\n    \"input\": \"0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000\",\n    \"output\": \"0x\",\n    \"to\": \"0xc212e03b9e060e36facad5fd8f4435412ca22e6b\",\n    \"type\": \"CALL\",\n    \"value\": \"0x0\"\n  }\n}\n"
  },
  {
    "path": "eth/tracers/testdata/call_tracer_delegatecall.json",
    "content": "{\n  \"context\": {\n    \"difficulty\": \"31927752\",\n    \"gasLimit\": \"4707788\",\n    \"miner\": \"0x5659922ce141eedbc2733678f9806c77b4eebee8\",\n    \"number\": \"11495\",\n    \"timestamp\": \"1479735917\"\n  },\n  \"genesis\": {\n    \"alloc\": {\n      \"0x13204f5d64c28326fd7bd05fd4ea855302d7f2ff\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x606060405236156100825760e060020a60003504630a0313a981146100875780630a3b0a4f146101095780630cd40fea1461021257806329092d0e1461021f5780634cd06a5f146103295780635dbe47e8146103395780637a9e5410146103d9578063825db5f7146103e6578063a820b44d146103f3578063efa52fb31461047a575b610002565b34610002576104fc600435600060006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a26333556e849091846000604051602001526040518360e060020a028152600401808381526020018281526020019250505060206040518083038186803b156100025760325a03f415610002575050604051519150505b919050565b346100025761051060043560006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a2637d65837a9091336000604051602001526040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515115905061008257604080517f21ce24d4000000000000000000000000000000000000000000000000000000008152600060048201819052600160a060020a038416602483015291517342b02b5deeb78f34cd5ac896473b63e6c99a71a2926321ce24d49260448082019391829003018186803b156100025760325a03f415610002575050505b50565b3461000257610512600181565b346100025761051060043560006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a2637d65837a9091336000604051602001526040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515115905061008257604080517f89489a87000000000000000000000000000000000000000000000000000000008152600060048201819052600160a060020a038416602483015291517342b02b5deeb78f34cd5ac896473b63e6c99a71a2926389489a879260448082019391829003018186803b156100025760325a03f4156100025750505061020f565b3461000257610528600435610403565b34610002576104fc600435604080516000602091820181905282517f7d65837a00000000000000000000000000000000000000000000000000000000815260048101829052600160a060020a0385166024820152925190927342b02b5deeb78f34cd5ac896473b63e6c99a71a292637d65837a92604480840193829003018186803b156100025760325a03f4156100025750506040515191506101049050565b3461000257610512600c81565b3461000257610512600081565b3461000257610528600061055660005b600060006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a263685a1f3c9091846000604051602001526040518360e060020a028152600401808381526020018281526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515191506101049050565b346100025761053a600435600060006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a263f775b6b59091846000604051602001526040518360e060020a028152600401808381526020018281526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515191506101049050565b604080519115158252519081900360200190f35b005b6040805160ff9092168252519081900360200190f35b60408051918252519081900360200190f35b60408051600160a060020a039092168252519081900360200190f35b90509056\",\n        \"nonce\": \"1\",\n        \"storage\": {\n          \"0x4d140b25abf3c71052885c66f73ce07cff141c1afabffdaf5cba04d625b7ebcc\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"\n        }\n      },\n      \"0x269296dddce321a6bcbaa2f0181127593d732cba\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x606060405236156101275760e060020a60003504630cd40fea811461012c578063173825d9146101395780631849cb5a146101c7578063285791371461030f5780632a58b3301461033f5780632cb0d48a146103565780632f54bf6e1461036a578063332b9f061461039d5780633ca8b002146103c55780633df4ddf4146103d557806341c0e1b5146103f457806347799da81461040557806362a51eee1461042457806366907d13146104575780637065cb48146104825780637a9e541014610496578063825db5f7146104a3578063949d225d146104b0578063a51687df146104c7578063b4da4e37146104e6578063b4e6850b146104ff578063bd7474ca14610541578063e75623d814610541578063e9938e1114610555578063f5d241d314610643575b610002565b3461000257610682600181565b34610002576106986004356106ff335b60006001600a9054906101000a9004600160a060020a0316600160a060020a0316635dbe47e8836000604051602001526040518260e060020a0281526004018082600160a060020a03168152602001915050602060405180830381600087803b156100025760325a03f1156100025750506040515191506103989050565b3461000257604080516101008082018352600080835260208084018290528385018290526060808501839052608080860184905260a080870185905260c080880186905260e09788018690526001605060020a0360043581168752600586529589902089519788018a528054808816808a52605060020a91829004600160a060020a0316978a01889052600183015463ffffffff8082169d8c018e905264010000000082048116988c01899052604060020a90910416958a018690526002830154948a01859052600390920154808916938a01849052049096169690970186905293969495949293604080516001605060020a03998a16815297891660208901529590971686860152600160a060020a03909316606086015263ffffffff9182166080860152811660a08501521660c083015260e08201929092529051908190036101000190f35b346100025761069a60043560018054600091829160ff60f060020a909104161515141561063d5761072833610376565b34610002576106ae6004546001605060020a031681565b34610002576106986004356108b333610149565b346100025761069a6004355b600160a060020a03811660009081526002602052604090205460ff1615156001145b919050565b34610002576106986001805460ff60f060020a9091041615151415610913576108ed33610376565b346100025761069a600435610149565b34610002576106ae6003546001605060020a03605060020a9091041681565b346100025761069861091533610149565b34610002576106ae6003546001605060020a0360a060020a9091041681565b346100025761069a60043560243560018054600091829160ff60f060020a909104161515141561095e5761092633610376565b34610002576106986004356001805460ff60f060020a909104161515141561072557610a8b33610376565b3461000257610698600435610aa533610149565b3461000257610682600c81565b3461000257610682600081565b34610002576106ae6003546001605060020a031681565b34610002576106ca600154600160a060020a03605060020a9091041681565b346100025761069a60015460ff60f060020a9091041681565b346100025761069a60043560243560443560643560843560a43560c43560018054600091829160ff60f060020a9091041615151415610b5857610ad233610376565b3461000257610698600435610bd633610149565b34610002576106e6600435604080516101008181018352600080835260208084018290528385018290526060808501839052608080860184905260a080870185905260c080880186905260e09788018690526001605060020a03808b168752600586529589902089519788018a5280548088168952600160a060020a03605060020a918290041696890196909652600181015463ffffffff8082169b8a019b909b5264010000000081048b1695890195909552604060020a90940490981691860182905260028301549086015260039091015480841696850196909652940416918101919091525b50919050565b346100025761069a60043560243560443560643560843560a43560018054600091829160ff60f060020a9091041615151415610c8e57610bfb33610376565b6040805160ff9092168252519081900360200190f35b005b604080519115158252519081900360200190f35b604080516001605060020a039092168252519081900360200190f35b60408051600160a060020a039092168252519081900360200190f35b6040805163ffffffff9092168252519081900360200190f35b1561012757600160a060020a0381166000908152600260205260409020805460ff191690555b50565b1561063d57506001605060020a0380831660009081526005602052604090208054909116151561075b576000915061063d565b604080516101008101825282546001605060020a038082168352600160a060020a03605060020a92839004166020840152600185015463ffffffff80821695850195909552640100000000810485166060850152604060020a90049093166080830152600284015460a0830152600384015480841660c08401520490911660e0820152610817905b8051600354600090819060016001605060020a0390911611610c995760038054605060020a60f060020a0319169055610ddf565b600380546001605060020a031981166000196001605060020a03928316011782558416600090815260056020526040812080547fffff000000000000000000000000000000000000000000000000000000000000168155600181810180546bffffffffffffffffffffffff191690556002820192909255909101805473ffffffffffffffffffffffffffffffffffffffff19169055915061063d565b1561012757600180547fff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1660f060020a8302179055610725565b1561091357600480546001605060020a031981166001605060020a039091166001011790555b565b156101275733600160a060020a0316ff5b1561095e57506001605060020a03808416600090815260056020526040902080549091161515610965576000915061095e565b600191505b5092915050565b60038101546001605060020a0384811691161415610986576001915061095e565b604080516101008101825282546001605060020a038082168352600160a060020a03605060020a92839004166020840152600185015463ffffffff80821695850195909552640100000000810485166060850152604060020a90049093166080830152600284015460a0830152600384015480841660c08401520490911660e0820152610a12906107e3565b61095983825b80546003546001605060020a0391821691600091161515610de55760038054605060020a60a060020a031916605060020a84021760a060020a69ffffffffffffffffffff02191660a060020a84021781558301805473ffffffffffffffffffffffffffffffffffffffff19169055610ddf565b1561072557600480546001605060020a0319168217905550565b1561012757600160a060020a0381166000908152600260205260409020805460ff19166001179055610725565b15610b5857506001605060020a038088166000908152600560205260409020805490911615610b645760009150610b58565b6004546001605060020a0390811690891610610b3057600480546001605060020a03191660018a011790555b6003805460016001605060020a03821681016001605060020a03199092169190911790915591505b50979650505050505050565b80546001605060020a0319168817605060020a60f060020a031916605060020a880217815560018101805463ffffffff1916871767ffffffff0000000019166401000000008702176bffffffff00000000000000001916604060020a860217905560028101839055610b048982610a18565b156101275760018054605060020a60f060020a031916605060020a8302179055610725565b15610c8e57506001605060020a03808816600090815260056020526040902080549091161515610c2e5760009150610c8e565b8054605060020a60f060020a031916605060020a88021781556001808201805463ffffffff1916881767ffffffff0000000019166401000000008802176bffffffff00000000000000001916604060020a87021790556002820184905591505b509695505050505050565b6003546001605060020a03848116605060020a909204161415610d095760e084015160038054605060020a928302605060020a60a060020a031990911617808255919091046001605060020a031660009081526005602052604090200180546001605060020a0319169055610ddf565b6003546001605060020a0384811660a060020a909204161415610d825760c08401516003805460a060020a92830260a060020a69ffffffffffffffffffff021990911617808255919091046001605060020a03166000908152600560205260409020018054605060020a60a060020a0319169055610ddf565b505060c082015160e08301516001605060020a0380831660009081526005602052604080822060039081018054605060020a60a060020a031916605060020a8702179055928416825290200180546001605060020a031916831790555b50505050565b6001605060020a0384161515610e6457600380546001605060020a03605060020a9182900481166000908152600560205260409020830180546001605060020a0319908116871790915583548785018054918590049093168402605060020a60a060020a03199182161790911690915582549185029116179055610ddf565b506001605060020a038381166000908152600560205260409020600390810180549185018054605060020a60a060020a0319908116605060020a94859004909516808502959095176001605060020a0319168817909155815416918402919091179055801515610ef4576003805460a060020a69ffffffffffffffffffff02191660a060020a8402179055610ddf565b6003808401546001605060020a03605060020a9091041660009081526005602052604090200180546001605060020a031916831790555050505056\",\n        \"nonce\": \"1\",\n        \"storage\": {\n          \"0x0000000000000000000000000000000000000000000000000000000000000001\": \"0x000113204f5d64c28326fd7bd05fd4ea855302d7f2ff00000000000000000000\"\n        }\n      },\n      \"0x42b02b5deeb78f34cd5ac896473b63e6c99a71a2\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x6504032353da7150606060405236156100695760e060020a60003504631bf7509d811461006e57806321ce24d41461008157806333556e84146100ec578063685a1f3c146101035780637d65837a1461011757806389489a8714610140578063f775b6b5146101fc575b610007565b61023460043560006100fd82600061010d565b610246600435602435600160a060020a03811660009081526020839052604081205415156102cb57826001016000508054806001018281815481835581811511610278576000838152602090206102789181019083015b808211156102d057600081556001016100d8565b610248600435602435600182015481105b92915050565b6102346004356024355b60018101906100fd565b610248600435602435600160a060020a03811660009081526020839052604090205415156100fd565b61024660043560243580600160a060020a031632600160a060020a03161415156101f857600160a060020a038116600090815260208390526040902054156101f857600160a060020a038116600090815260208390526040902054600183018054909160001901908110156100075760009182526020808320909101805473ffffffffffffffffffffffffffffffffffffffff19169055600160a060020a038316825283905260408120556002820180546000190190555b5050565b61025c60043560243560008260010160005082815481101561000757600091825260209091200154600160a060020a03169392505050565b60408051918252519081900360200190f35b005b604080519115158252519081900360200190f35b60408051600160a060020a039092168252519081900360200190f35b50505060009283526020808420909201805473ffffffffffffffffffffffffffffffffffffffff191686179055600160a060020a0385168352908590526040909120819055600284018054600101905590505b505050565b509056\",\n        \"nonce\": \"1\",\n        \"storage\": {}\n      },\n      \"0xa529806c67cc6486d4d62024471772f47f6fd672\": {\n        \"balance\": \"0x67820e39ac8fe9800\",\n        \"code\": \"0x\",\n        \"nonce\": \"68\",\n        \"storage\": {}\n      }\n    },\n    \"config\": {\n      \"byzantiumBlock\": 1700000,\n      \"chainId\": 3,\n      \"daoForkSupport\": true,\n      \"eip150Block\": 0,\n      \"eip150Hash\": \"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d\",\n      \"eip155Block\": 10,\n      \"eip158Block\": 10,\n      \"ethash\": {},\n      \"homesteadBlock\": 0\n    },\n    \"difficulty\": \"31912170\",\n    \"extraData\": \"0xd783010502846765746887676f312e372e33856c696e7578\",\n    \"gasLimit\": \"4712388\",\n    \"hash\": \"0x0855914bdc581bccdc62591fd438498386ffb59ea4d5361ed5c3702e26e2c72f\",\n    \"miner\": \"0x334391aa808257952a462d1475562ee2106a6c90\",\n    \"mixHash\": \"0x64bb70b8ca883cadb8fbbda2c70a861612407864089ed87b98e5de20acceada6\",\n    \"nonce\": \"0x684129f283aaef18\",\n    \"number\": \"11494\",\n    \"stateRoot\": \"0x7057f31fe3dab1d620771adad35224aae43eb70e94861208bc84c557ff5b9d10\",\n    \"timestamp\": \"1479735912\",\n    \"totalDifficulty\": \"90744064339\"\n  },\n  \"input\": \"0xf889448504a817c800832dc6c094269296dddce321a6bcbaa2f0181127593d732cba80a47065cb480000000000000000000000001523e55a1ca4efbae03355775ae89f8d7699ad9e29a080ed81e4c5e9971a730efab4885566e2c868cd80bd4166d0ed8c287fdf181650a069d7c49215e3d4416ad239cd09dbb71b9f04c16b33b385d14f40b618a7a65115\",\n  \"result\": {\n    \"calls\": [\n      {\n        \"calls\": [\n          {\n            \"from\": \"0x13204f5d64c28326fd7bd05fd4ea855302d7f2ff\",\n            \"gas\": \"0x2bf459\",\n            \"gasUsed\": \"0x2aa\",\n            \"input\": \"0x7d65837a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a529806c67cc6486d4d62024471772f47f6fd672\",\n            \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\",\n            \"to\": \"0x42b02b5deeb78f34cd5ac896473b63e6c99a71a2\",\n            \"type\": \"DELEGATECALL\"\n          }\n        ],\n        \"from\": \"0x269296dddce321a6bcbaa2f0181127593d732cba\",\n        \"gas\": \"0x2cae73\",\n        \"gasUsed\": \"0xa9d\",\n        \"input\": \"0x5dbe47e8000000000000000000000000a529806c67cc6486d4d62024471772f47f6fd672\",\n        \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\",\n        \"to\": \"0x13204f5d64c28326fd7bd05fd4ea855302d7f2ff\",\n        \"type\": \"CALL\",\n        \"value\": \"0x0\"\n      }\n    ],\n    \"from\": \"0xa529806c67cc6486d4d62024471772f47f6fd672\",\n    \"gas\": \"0x2d6e28\",\n    \"gasUsed\": \"0x64bd\",\n    \"input\": \"0x7065cb480000000000000000000000001523e55a1ca4efbae03355775ae89f8d7699ad9e\",\n    \"output\": \"0x\",\n    \"to\": \"0x269296dddce321a6bcbaa2f0181127593d732cba\",\n    \"type\": \"CALL\",\n    \"value\": \"0x0\"\n  }\n}\n"
  },
  {
    "path": "eth/tracers/testdata/call_tracer_inner_create_oog_outer_throw.json",
    "content": "{\n  \"context\": {\n    \"difficulty\": \"3451177886\",\n    \"gasLimit\": \"4709286\",\n    \"miner\": \"0x1585936b53834b021f68cc13eeefdec2efc8e724\",\n    \"number\": \"2290744\",\n    \"timestamp\": \"1513616439\"\n  },\n  \"genesis\": {\n    \"alloc\": {\n      \"0x1d3ddf7caf024f253487e18bc4a15b1a360c170a\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x606060405263ffffffff60e060020a6000350416633b91f50681146100505780635bb47808146100715780635f51fca01461008c578063bc7647a9146100ad578063f1bd0d7a146100c8575b610000565b346100005761006f600160a060020a03600435811690602435166100e9565b005b346100005761006f600160a060020a0360043516610152565b005b346100005761006f600160a060020a036004358116906024351661019c565b005b346100005761006f600160a060020a03600435166101fa565b005b346100005761006f600160a060020a0360043581169060243516610db8565b005b600160a060020a038083166000908152602081905260408120549091908116903316811461011657610000565b839150600160a060020a038316151561012d573392505b6101378284610e2e565b6101418284610db8565b61014a826101fa565b5b5b50505050565b600154600160a060020a03908116903316811461016e57610000565b6002805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0384161790555b5b5050565b600254600160a060020a0390811690331681146101b857610000565b600160a060020a038381166000908152602081905260409020805473ffffffffffffffffffffffffffffffffffffffff19169184169190911790555b5b505050565b6040805160e260020a631a481fc102815260016024820181905260026044830152606482015262093a8060848201819052600060a4830181905260c06004840152601e60c48401527f736574456e7469747953746174757328616464726573732c75696e743829000060e484015292519091600160a060020a038516916369207f049161010480820192879290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526000602482018190526001604483015260606004830152602360648301527f626567696e506f6c6c28616464726573732c75696e7436342c626f6f6c2c626f60848301527f6f6c29000000000000000000000000000000000000000000000000000000000060a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152601960c48201527f61646453746f636b28616464726573732c75696e74323536290000000000000060e48201529051600160a060020a03861692506369207f04916101048082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152601960c48201527f697373756553746f636b2875696e74382c75696e74323536290000000000000060e48201529051600160a060020a03861692506369207f04916101048082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152602160648301527f6772616e7453746f636b2875696e74382c75696e743235362c61646472657373608483015260f860020a60290260a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f115610000575050604080517f010555b8000000000000000000000000000000000000000000000000000000008152600160a060020a03338116602483015260006044830181905260606004840152603c60648401527f6772616e7456657374656453746f636b2875696e74382c75696e743235362c6160848401527f6464726573732c75696e7436342c75696e7436342c75696e743634290000000060a48401529251908716935063010555b89260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152601260c48201527f626567696e53616c65286164647265737329000000000000000000000000000060e48201529051600160a060020a03861692506369207f04916101048082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152601a60648301527f7472616e7366657253616c6546756e64732875696e743235362900000000000060848301529151600160a060020a038716935063de64e15c9260a48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152602d60c48201527f7365744163636f756e74696e6753657474696e67732875696e743235362c756960e48201527f6e7436342c75696e7432353629000000000000000000000000000000000000006101048201529051600160a060020a03861692506369207f04916101248082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152603460648301527f637265617465526563757272696e6752657761726428616464726573732c756960848301527f6e743235362c75696e7436342c737472696e672900000000000000000000000060a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152601b60648301527f72656d6f7665526563757272696e675265776172642875696e7429000000000060848301529151600160a060020a038716935063de64e15c9260a48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152602360648301527f697373756552657761726428616464726573732c75696e743235362c7374726960848301527f6e6729000000000000000000000000000000000000000000000000000000000060a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a6337993857028152600160248201819052604482015260606004820152602260648201527f61737369676e53746f636b2875696e74382c616464726573732c75696e743235608482015260f060020a6136290260a48201529051600160a060020a038616925063de64e15c9160c48082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a6337993857028152600160248201819052604482015260606004820152602260648201527f72656d6f766553746f636b2875696e74382c616464726573732c75696e743235608482015260f060020a6136290260a48201529051600160a060020a038616925063de64e15c9160c48082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260026024808301919091526003604483015260006064830181905267ffffffffffffffff8616608484015260ff871660a484015260c0600484015260c48301919091527f7365744164647265737342796c617728737472696e672c616464726573732c6260e48301527f6f6f6c29000000000000000000000000000000000000000000000000000000006101048301529151600160a060020a03871693506369207f04926101248084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc1028152600260248201526003604482015260006064820181905267ffffffffffffffff8516608483015260ff861660a483015260c06004830152602160c48301527f73657453746174757342796c617728737472696e672c75696e74382c626f6f6c60e483015260f860020a6029026101048301529151600160a060020a03871693506369207f04926101248084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc1028152600260248201526003604482015260006064820181905267ffffffffffffffff8516608483015260ff861660a483015260c06004830152603860c48301527f736574566f74696e6742796c617728737472696e672c75696e743235362c756960e48301527f6e743235362c626f6f6c2c75696e7436342c75696e74382900000000000000006101048301529151600160a060020a03871693506369207f04926101248084019391929182900301818387803b156100005760325a03f115610000575050505b505050565b604080517f225553a4000000000000000000000000000000000000000000000000000000008152600160a060020a0383811660048301526002602483015291519184169163225553a49160448082019260009290919082900301818387803b156100005760325a03f115610000575050505b5050565b600082604051611fd280610f488339600160a060020a03909216910190815260405190819003602001906000f0801561000057905082600160a060020a03166308b027418260016040518363ffffffff1660e060020a0281526004018083600160a060020a0316600160a060020a0316815260200182815260200192505050600060405180830381600087803b156100005760325a03f115610000575050604080517fa14e3ee300000000000000000000000000000000000000000000000000000000815260006004820181905260016024830152600160a060020a0386811660448401529251928716935063a14e3ee39260648084019382900301818387803b156100005760325a03f115610000575050505b5050505600606060405234620000005760405160208062001fd283398101604052515b805b600a8054600160a060020a031916600160a060020a0383161790555b506001600d819055600e81905560408051808201909152600c8082527f566f74696e672053746f636b00000000000000000000000000000000000000006020928301908152600b805460008290528251601860ff1990911617825590947f0175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01db9600291831615610100026000190190921604601f0193909304830192906200010c565b828001600101855582156200010c579182015b828111156200010c578251825591602001919060010190620000ef565b5b50620001309291505b808211156200012c576000815560010162000116565b5090565b50506040805180820190915260038082527f43565300000000000000000000000000000000000000000000000000000000006020928301908152600c805460008290528251600660ff1990911617825590937fdf6966c971051c3d54ec59162606531493a51404a002842f56009d7e5cf4a8c760026001841615610100026000190190931692909204601f010481019291620001f7565b82800160010185558215620001f7579182015b82811115620001f7578251825591602001919060010190620001da565b5b506200021b9291505b808211156200012c576000815560010162000116565b5090565b50505b505b611da280620002306000396000f3006060604052361561019a5763ffffffff60e060020a600035041662e1986d811461019f57806302a72a4c146101d657806306eb4e421461020157806306fdde0314610220578063095ea7b3146102ad578063158ccb99146102dd57806318160ddd146102f85780631cf65a781461031757806323b872dd146103365780632c71e60a1461036c57806333148fd6146103ca578063435ebc2c146103f55780635eeb6e451461041e578063600e85b71461043c5780636103d70b146104a157806362c1e46a146104b05780636c182e99146104ba578063706dc87c146104f057806370a082311461052557806377174f851461055057806395d89b411461056f578063a7771ee3146105fc578063a9059cbb14610629578063ab377daa14610659578063b25dbb5e14610685578063b89a73cb14610699578063ca5eb5e1146106c6578063cbcf2e5a146106e1578063d21f05ba1461070e578063d347c2051461072d578063d96831e114610765578063dd62ed3e14610777578063df3c211b146107a8578063e2982c21146107d6578063eb944e4c14610801575b610000565b34610000576101d4600160a060020a036004351660243567ffffffffffffffff6044358116906064358116906084351661081f565b005b34610000576101ef600160a060020a0360043516610a30565b60408051918252519081900360200190f35b34610000576101ef610a4f565b60408051918252519081900360200190f35b346100005761022d610a55565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516602435610ae3565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516610b4e565b005b34610000576101ef610b89565b60408051918252519081900360200190f35b34610000576101ef610b8f565b60408051918252519081900360200190f35b34610000576102c9600160a060020a0360043581169060243516604435610b95565b604080519115158252519081900360200190f35b3461000057610388600160a060020a0360043516602435610bb7565b60408051600160a060020a039096168652602086019490945267ffffffffffffffff928316858501529082166060850152166080830152519081900360a00190f35b34610000576101ef600160a060020a0360043516610c21565b60408051918252519081900360200190f35b3461000057610402610c40565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d4600160a060020a0360043516602435610c4f565b005b3461000057610458600160a060020a0360043516602435610cc9565b60408051600160a060020a03909716875260208701959095528585019390935267ffffffffffffffff9182166060860152811660808501521660a0830152519081900360c00190f35b34610000576101d4610d9e565b005b6101d4610e1e565b005b34610000576104d3600160a060020a0360043516610e21565b6040805167ffffffffffffffff9092168252519081900360200190f35b3461000057610402600160a060020a0360043516610ead565b60408051600160a060020a039092168252519081900360200190f35b34610000576101ef600160a060020a0360043516610ef9565b60408051918252519081900360200190f35b34610000576101ef610f18565b60408051918252519081900360200190f35b346100005761022d610f1e565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516610fac565b604080519115158252519081900360200190f35b34610000576102c9600160a060020a0360043516602435610fc2565b604080519115158252519081900360200190f35b3461000057610402600435610fe2565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d46004351515610ffd565b005b34610000576102c9600160a060020a036004351661104c565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516611062565b005b34610000576102c9600160a060020a0360043516611070565b604080519115158252519081900360200190f35b34610000576101ef6110f4565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351667ffffffffffffffff602435166110fa565b60408051918252519081900360200190f35b34610000576101d4600435611121565b005b34610000576101ef600160a060020a03600435811690602435166111c6565b60408051918252519081900360200190f35b34610000576101ef6004356024356044356064356084356111f3565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351661128c565b60408051918252519081900360200190f35b34610000576101d4600160a060020a036004351660243561129e565b005b6040805160a08101825260008082526020820181905291810182905260608101829052608081019190915267ffffffffffffffff848116908416101561086457610000565b8367ffffffffffffffff168267ffffffffffffffff16101561088557610000565b8267ffffffffffffffff168267ffffffffffffffff1610156108a657610000565b506040805160a081018252600160a060020a033381168252602080830188905267ffffffffffffffff80871684860152858116606085015287166080840152908816600090815260039091529190912080546001810180835582818380158290116109615760030281600302836000526020600020918201910161096191905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050916000526020600020906003020160005b5082518154600160a060020a031916600160a060020a03909116178155602083015160018201556040830151600290910180546060850151608086015167ffffffffffffffff1990921667ffffffffffffffff948516176fffffffffffffffff00000000000000001916604060020a918516919091021777ffffffffffffffff000000000000000000000000000000001916608060020a939091169290920291909117905550610a268686610fc2565b505b505050505050565b600160a060020a0381166000908152600360205260409020545b919050565b60055481565b600b805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b600160a060020a03338116600081815260026020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b600a5433600160a060020a03908116911614610b6957610000565b600a8054600160a060020a031916600160a060020a0383161790555b5b50565b60005481565b60005b90565b6000610ba2848484611600565b610bad8484846116e2565b90505b9392505050565b600360205281600052604060002081815481101561000057906000526020600020906003020160005b5080546001820154600290920154600160a060020a03909116935090915067ffffffffffffffff80821691604060020a8104821691608060020a9091041685565b600160a060020a0381166000908152600860205260409020545b919050565b600a54600160a060020a031681565b600a5433600160a060020a03908116911614610c6a57610000565b610c7660005482611714565b6000908155600160a060020a038316815260016020526040902054610c9b9082611714565b600160a060020a038316600090815260016020526040812091909155610cc390839083611600565b5b5b5050565b6000600060006000600060006000600360008a600160a060020a0316600160a060020a0316815260200190815260200160002088815481101561000057906000526020600020906003020160005b508054600182015460028301546040805160a081018252600160a060020a039094168085526020850184905267ffffffffffffffff808416928601839052604060020a8404811660608701819052608060020a9094041660808601819052909c50929a509197509095509350909150610d90904261172d565b94505b509295509295509295565b33600160a060020a038116600090815260066020526040902054801515610dc457610000565b8030600160a060020a0316311015610ddb57610000565b600160a060020a0382166000818152600660205260408082208290555183156108fc0291849190818181858888f193505050501515610cc357610000565b5b5050565b5b565b600160a060020a03811660009081526003602052604081205442915b81811015610ea557600160a060020a03841660009081526003602052604090208054610e9a9190839081101561000057906000526020600020906003020160005b5060020154604060020a900467ffffffffffffffff168461177d565b92505b600101610e3d565b5b5050919050565b600160a060020a0380821660009081526007602052604081205490911615610eef57600160a060020a0380831660009081526007602052604090205416610ef1565b815b90505b919050565b600160a060020a0381166000908152600160205260409020545b919050565b600d5481565b600c805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b60006000610fb983610c21565b1190505b919050565b6000610fcf338484611600565b610fd983836117ac565b90505b92915050565b600460205260009081526040902054600160a060020a031681565b8015801561101a575061100f33610ef9565b61101833610c21565b115b1561102457610000565b33600160a060020a03166000908152600960205260409020805460ff19168215151790555b50565b60006000610fb983610ef9565b1190505b919050565b610b8533826117dc565b5b50565b600a54604080516000602091820181905282517fcbcf2e5a000000000000000000000000000000000000000000000000000000008152600160a060020a03868116600483015293519194939093169263cbcf2e5a92602480830193919282900301818787803b156100005760325a03f115610000575050604051519150505b919050565b600e5481565b6000610fd961110984846118b2565b61111385856119b6565b611a05565b90505b92915050565b600a5433600160a060020a0390811691161461113c57610000565b61114860005482611a1f565b600055600554600190101561116c57600a5461116c90600160a060020a0316611a47565b5b600a54600160a060020a03166000908152600160205260409020546111929082611a1f565b600a8054600160a060020a039081166000908152600160205260408120939093559054610b8592911683611600565b5b5b50565b600160a060020a038083166000908152600260209081526040808320938516835292905220545b92915050565b6000600060008487101561120a5760009250611281565b8387111561121a57879250611281565b61123f6112308961122b888a611714565b611a90565b61123a8689611714565b611abc565b915081925061124e8883611714565b905061127e8361127961126a8461122b8c8b611714565b611a90565b61123a888b611714565b611abc565b611a1f565b92505b505095945050505050565b60066020526000908152604090205481565b600160a060020a03821660009081526003602052604081208054829190849081101561000057906000526020600020906003020160005b50805490925033600160a060020a039081169116146112f357610000565b6040805160a0810182528354600160a060020a0316815260018401546020820152600284015467ffffffffffffffff80821693830193909352604060020a810483166060830152608060020a900490911660808201526113539042611af9565b600160a060020a0385166000908152600360205260409020805491925090849081101561000057906000526020600020906003020160005b508054600160a060020a031916815560006001820181905560029091018054600160c060020a0319169055600160a060020a0385168152600360205260409020805460001981019081101561000057906000526020600020906003020160005b50600160a060020a03851660009081526003602052604090208054859081101561000057906000526020600020906003020160005b5081548154600160a060020a031916600160a060020a03918216178255600180840154908301556002928301805493909201805467ffffffffffffffff191667ffffffffffffffff948516178082558354604060020a908190048616026fffffffffffffffff000000000000000019909116178082559254608060020a9081900490941690930277ffffffffffffffff00000000000000000000000000000000199092169190911790915584166000908152600360205260409020805460001981018083559190829080158290116115485760030281600302836000526020600020918201910161154891905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050600160a060020a033316600090815260016020526040902054611570915082611a1f565b600160a060020a03338116600090815260016020526040808220939093559086168152205461159f9082611714565b600160a060020a038086166000818152600160209081526040918290209490945580518581529051339093169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a35b50505050565b600160a060020a0383161561166e576116466008600061161f86610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611714565b6008600061165386610ead565b600160a060020a031681526020810191909152604001600020555b600160a060020a038216156116dc576116b46008600061168d85610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611a1f565b600860006116c185610ead565b600160a060020a031681526020810191909152604001600020555b5b505050565b600083826116f082426110fa565b8111156116fc57610000565b611707868686611b1b565b92505b5b50509392505050565b600061172283831115611b4d565b508082035b92915050565b6000610fd983602001518367ffffffffffffffff16856080015167ffffffffffffffff16866040015167ffffffffffffffff16876060015167ffffffffffffffff166111f3565b90505b92915050565b60008167ffffffffffffffff168367ffffffffffffffff1610156117a15781610fd9565b825b90505b92915050565b600033826117ba82426110fa565b8111156117c657610000565b6117d08585611b5d565b92505b5b505092915050565b6117e582610ef9565b6117ee83610c21565b11156117f957610000565b600160a060020a03811660009081526009602052604090205460ff16158015611834575081600160a060020a031681600160a060020a031614155b1561183e57610000565b61184782611070565b1561185157610000565b611864828261185f85610ef9565b611600565b600160a060020a0382811660009081526007602052604090208054600160a060020a031916918316918217905561189a82610ead565b600160a060020a031614610cc357610000565b5b5050565b600160a060020a038216600090815260036020526040812054815b818110156119885761197d836112796003600089600160a060020a0316600160a060020a0316815260200190815260200160002084815481101561000057906000526020600020906003020160005b506040805160a0810182528254600160a060020a031681526001830154602082015260029092015467ffffffffffffffff80821692840192909252604060020a810482166060840152608060020a900416608082015287611af9565b611a1f565b92505b6001016118cd565b600160a060020a0385166000908152600160205260409020546117d09084611714565b92505b505092915050565b600060006119c384611070565b80156119d157506000600d54115b90506119fb816119e9576119e485610ef9565b6119ec565b60005b6111138686611b7b565b611a05565b91505b5092915050565b60008183106117a15781610fd9565b825b90505b92915050565b6000828201611a3c848210801590611a375750838210155b611b4d565b8091505b5092915050565b611a508161104c565b15611a5a57610b85565b6005805460009081526004602052604090208054600160a060020a031916600160a060020a038416179055805460010190555b50565b6000828202611a3c841580611a37575083858381156100005704145b611b4d565b8091505b5092915050565b60006000611acc60008411611b4d565b8284811561000057049050611a3c838581156100005706828502018514611b4d565b8091505b5092915050565b6000610fd98360200151611b0d858561172d565b611714565b90505b92915050565b60008382611b2982426110fa565b811115611b3557610000565b611707868686611b8f565b92505b5b50509392505050565b801515610b8557610000565b5b50565b6000611b6883611a47565b610fd98383611c92565b90505b92915050565b6000610fd983610ef9565b90505b92915050565b600160a060020a038084166000908152600260209081526040808320338516845282528083205493861683526001909152812054909190611bd09084611a1f565b600160a060020a038086166000908152600160205260408082209390935590871681522054611bff9084611714565b600160a060020a038616600090815260016020526040902055611c228184611714565b600160a060020a038087166000818152600260209081526040808320338616845282529182902094909455805187815290519288169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a3600191505b509392505050565b60003382611ca082426110fa565b811115611cac57610000565b6117d08585611cc2565b92505b5b505092915050565b600160a060020a033316600090815260016020526040812054611ce59083611714565b600160a060020a033381166000908152600160205260408082209390935590851681522054611d149083611a1f565b600160a060020a038085166000818152600160209081526040918290209490945580518681529051919333909316927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef92918290030190a35060015b929150505600a165627a7a72305820bfa5ddd3fecf3f43aed25385ec7ec3ef79638c2e58d99f85d9a3cc494183bf160029a165627a7a723058200e78a5f7e0f91739035d0fbf5eca02f79377210b722f63431f29a22e2880b3bd0029\",\n        \"nonce\": \"789\",\n        \"storage\": {\n          \"0xfe9ec0542a1c009be8b1f3acf43af97100ffff42eb736850fb038fa1151ad4d9\": \"0x000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e8\"\n        }\n      },\n      \"0x5cb4a6b902fcb21588c86c3517e797b07cdaadb9\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x\",\n        \"nonce\": \"0\",\n        \"storage\": {}\n      },\n      \"0xe4a13bc304682a903e9472f469c33801dd18d9e8\": {\n        \"balance\": \"0x33c763c929f62c4f\",\n        \"code\": \"0x\",\n        \"nonce\": \"14\",\n        \"storage\": {}\n      }\n    },\n    \"config\": {\n      \"byzantiumBlock\": 1700000,\n      \"chainId\": 3,\n      \"daoForkSupport\": true,\n      \"eip150Block\": 0,\n      \"eip150Hash\": \"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d\",\n      \"eip155Block\": 10,\n      \"eip158Block\": 10,\n      \"ethash\": {},\n      \"homesteadBlock\": 0\n    },\n    \"difficulty\": \"3451177886\",\n    \"extraData\": \"0x4554482e45544846414e532e4f52472d4641313738394444\",\n    \"gasLimit\": \"4713874\",\n    \"hash\": \"0x5d52a672417cd1269bf4f7095e25dcbf837747bba908cd5ef809dc1bd06144b5\",\n    \"miner\": \"0xbbf5029fd710d227630c8b7d338051b8e76d50b3\",\n    \"mixHash\": \"0x01a12845ed546b94a038a7a03e8df8d7952024ed41ccb3db7a7ade4abc290ce1\",\n    \"nonce\": \"0x28c446f1cb9748c1\",\n    \"number\": \"2290743\",\n    \"stateRoot\": \"0x4898aceede76739daef76448a367d10015a2c022c9e7909b99a10fbf6fb16708\",\n    \"timestamp\": \"1513616414\",\n    \"totalDifficulty\": \"7146523769022564\"\n  },\n  \"input\": \"0xf8aa0e8509502f9000830493e0941d3ddf7caf024f253487e18bc4a15b1a360c170a80b8443b91f506000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e829a0524564944fa419f5c189b5074044f89210c6d6b2d77ee8f7f12a927d59b636dfa0015b28986807a424b18b186ee6642d76739df36cad802d20e8c00e79a61d7281\",\n  \"result\": {\n    \"calls\": [\n      {\n        \"error\": \"internal failure\",\n        \"from\": \"0x1d3ddf7caf024f253487e18bc4a15b1a360c170a\",\n        \"gas\": \"0x39ff0\",\n        \"gasUsed\": \"0x39ff0\",\n        \"input\": \"0x606060405234620000005760405160208062001fd283398101604052515b805b600a8054600160a060020a031916600160a060020a0383161790555b506001600d819055600e81905560408051808201909152600c8082527f566f74696e672053746f636b00000000000000000000000000000000000000006020928301908152600b805460008290528251601860ff1990911617825590947f0175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01db9600291831615610100026000190190921604601f0193909304830192906200010c565b828001600101855582156200010c579182015b828111156200010c578251825591602001919060010190620000ef565b5b50620001309291505b808211156200012c576000815560010162000116565b5090565b50506040805180820190915260038082527f43565300000000000000000000000000000000000000000000000000000000006020928301908152600c805460008290528251600660ff1990911617825590937fdf6966c971051c3d54ec59162606531493a51404a002842f56009d7e5cf4a8c760026001841615610100026000190190931692909204601f010481019291620001f7565b82800160010185558215620001f7579182015b82811115620001f7578251825591602001919060010190620001da565b5b506200021b9291505b808211156200012c576000815560010162000116565b5090565b50505b505b611da280620002306000396000f3006060604052361561019a5763ffffffff60e060020a600035041662e1986d811461019f57806302a72a4c146101d657806306eb4e421461020157806306fdde0314610220578063095ea7b3146102ad578063158ccb99146102dd57806318160ddd146102f85780631cf65a781461031757806323b872dd146103365780632c71e60a1461036c57806333148fd6146103ca578063435ebc2c146103f55780635eeb6e451461041e578063600e85b71461043c5780636103d70b146104a157806362c1e46a146104b05780636c182e99146104ba578063706dc87c146104f057806370a082311461052557806377174f851461055057806395d89b411461056f578063a7771ee3146105fc578063a9059cbb14610629578063ab377daa14610659578063b25dbb5e14610685578063b89a73cb14610699578063ca5eb5e1146106c6578063cbcf2e5a146106e1578063d21f05ba1461070e578063d347c2051461072d578063d96831e114610765578063dd62ed3e14610777578063df3c211b146107a8578063e2982c21146107d6578063eb944e4c14610801575b610000565b34610000576101d4600160a060020a036004351660243567ffffffffffffffff6044358116906064358116906084351661081f565b005b34610000576101ef600160a060020a0360043516610a30565b60408051918252519081900360200190f35b34610000576101ef610a4f565b60408051918252519081900360200190f35b346100005761022d610a55565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516602435610ae3565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516610b4e565b005b34610000576101ef610b89565b60408051918252519081900360200190f35b34610000576101ef610b8f565b60408051918252519081900360200190f35b34610000576102c9600160a060020a0360043581169060243516604435610b95565b604080519115158252519081900360200190f35b3461000057610388600160a060020a0360043516602435610bb7565b60408051600160a060020a039096168652602086019490945267ffffffffffffffff928316858501529082166060850152166080830152519081900360a00190f35b34610000576101ef600160a060020a0360043516610c21565b60408051918252519081900360200190f35b3461000057610402610c40565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d4600160a060020a0360043516602435610c4f565b005b3461000057610458600160a060020a0360043516602435610cc9565b60408051600160a060020a03909716875260208701959095528585019390935267ffffffffffffffff9182166060860152811660808501521660a0830152519081900360c00190f35b34610000576101d4610d9e565b005b6101d4610e1e565b005b34610000576104d3600160a060020a0360043516610e21565b6040805167ffffffffffffffff9092168252519081900360200190f35b3461000057610402600160a060020a0360043516610ead565b60408051600160a060020a039092168252519081900360200190f35b34610000576101ef600160a060020a0360043516610ef9565b60408051918252519081900360200190f35b34610000576101ef610f18565b60408051918252519081900360200190f35b346100005761022d610f1e565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516610fac565b604080519115158252519081900360200190f35b34610000576102c9600160a060020a0360043516602435610fc2565b604080519115158252519081900360200190f35b3461000057610402600435610fe2565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d46004351515610ffd565b005b34610000576102c9600160a060020a036004351661104c565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516611062565b005b34610000576102c9600160a060020a0360043516611070565b604080519115158252519081900360200190f35b34610000576101ef6110f4565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351667ffffffffffffffff602435166110fa565b60408051918252519081900360200190f35b34610000576101d4600435611121565b005b34610000576101ef600160a060020a03600435811690602435166111c6565b60408051918252519081900360200190f35b34610000576101ef6004356024356044356064356084356111f3565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351661128c565b60408051918252519081900360200190f35b34610000576101d4600160a060020a036004351660243561129e565b005b6040805160a08101825260008082526020820181905291810182905260608101829052608081019190915267ffffffffffffffff848116908416101561086457610000565b8367ffffffffffffffff168267ffffffffffffffff16101561088557610000565b8267ffffffffffffffff168267ffffffffffffffff1610156108a657610000565b506040805160a081018252600160a060020a033381168252602080830188905267ffffffffffffffff80871684860152858116606085015287166080840152908816600090815260039091529190912080546001810180835582818380158290116109615760030281600302836000526020600020918201910161096191905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050916000526020600020906003020160005b5082518154600160a060020a031916600160a060020a03909116178155602083015160018201556040830151600290910180546060850151608086015167ffffffffffffffff1990921667ffffffffffffffff948516176fffffffffffffffff00000000000000001916604060020a918516919091021777ffffffffffffffff000000000000000000000000000000001916608060020a939091169290920291909117905550610a268686610fc2565b505b505050505050565b600160a060020a0381166000908152600360205260409020545b919050565b60055481565b600b805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b600160a060020a03338116600081815260026020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b600a5433600160a060020a03908116911614610b6957610000565b600a8054600160a060020a031916600160a060020a0383161790555b5b50565b60005481565b60005b90565b6000610ba2848484611600565b610bad8484846116e2565b90505b9392505050565b600360205281600052604060002081815481101561000057906000526020600020906003020160005b5080546001820154600290920154600160a060020a03909116935090915067ffffffffffffffff80821691604060020a8104821691608060020a9091041685565b600160a060020a0381166000908152600860205260409020545b919050565b600a54600160a060020a031681565b600a5433600160a060020a03908116911614610c6a57610000565b610c7660005482611714565b6000908155600160a060020a038316815260016020526040902054610c9b9082611714565b600160a060020a038316600090815260016020526040812091909155610cc390839083611600565b5b5b5050565b6000600060006000600060006000600360008a600160a060020a0316600160a060020a0316815260200190815260200160002088815481101561000057906000526020600020906003020160005b508054600182015460028301546040805160a081018252600160a060020a039094168085526020850184905267ffffffffffffffff808416928601839052604060020a8404811660608701819052608060020a9094041660808601819052909c50929a509197509095509350909150610d90904261172d565b94505b509295509295509295565b33600160a060020a038116600090815260066020526040902054801515610dc457610000565b8030600160a060020a0316311015610ddb57610000565b600160a060020a0382166000818152600660205260408082208290555183156108fc0291849190818181858888f193505050501515610cc357610000565b5b5050565b5b565b600160a060020a03811660009081526003602052604081205442915b81811015610ea557600160a060020a03841660009081526003602052604090208054610e9a9190839081101561000057906000526020600020906003020160005b5060020154604060020a900467ffffffffffffffff168461177d565b92505b600101610e3d565b5b5050919050565b600160a060020a0380821660009081526007602052604081205490911615610eef57600160a060020a0380831660009081526007602052604090205416610ef1565b815b90505b919050565b600160a060020a0381166000908152600160205260409020545b919050565b600d5481565b600c805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b60006000610fb983610c21565b1190505b919050565b6000610fcf338484611600565b610fd983836117ac565b90505b92915050565b600460205260009081526040902054600160a060020a031681565b8015801561101a575061100f33610ef9565b61101833610c21565b115b1561102457610000565b33600160a060020a03166000908152600960205260409020805460ff19168215151790555b50565b60006000610fb983610ef9565b1190505b919050565b610b8533826117dc565b5b50565b600a54604080516000602091820181905282517fcbcf2e5a000000000000000000000000000000000000000000000000000000008152600160a060020a03868116600483015293519194939093169263cbcf2e5a92602480830193919282900301818787803b156100005760325a03f115610000575050604051519150505b919050565b600e5481565b6000610fd961110984846118b2565b61111385856119b6565b611a05565b90505b92915050565b600a5433600160a060020a0390811691161461113c57610000565b61114860005482611a1f565b600055600554600190101561116c57600a5461116c90600160a060020a0316611a47565b5b600a54600160a060020a03166000908152600160205260409020546111929082611a1f565b600a8054600160a060020a039081166000908152600160205260408120939093559054610b8592911683611600565b5b5b50565b600160a060020a038083166000908152600260209081526040808320938516835292905220545b92915050565b6000600060008487101561120a5760009250611281565b8387111561121a57879250611281565b61123f6112308961122b888a611714565b611a90565b61123a8689611714565b611abc565b915081925061124e8883611714565b905061127e8361127961126a8461122b8c8b611714565b611a90565b61123a888b611714565b611abc565b611a1f565b92505b505095945050505050565b60066020526000908152604090205481565b600160a060020a03821660009081526003602052604081208054829190849081101561000057906000526020600020906003020160005b50805490925033600160a060020a039081169116146112f357610000565b6040805160a0810182528354600160a060020a0316815260018401546020820152600284015467ffffffffffffffff80821693830193909352604060020a810483166060830152608060020a900490911660808201526113539042611af9565b600160a060020a0385166000908152600360205260409020805491925090849081101561000057906000526020600020906003020160005b508054600160a060020a031916815560006001820181905560029091018054600160c060020a0319169055600160a060020a0385168152600360205260409020805460001981019081101561000057906000526020600020906003020160005b50600160a060020a03851660009081526003602052604090208054859081101561000057906000526020600020906003020160005b5081548154600160a060020a031916600160a060020a03918216178255600180840154908301556002928301805493909201805467ffffffffffffffff191667ffffffffffffffff948516178082558354604060020a908190048616026fffffffffffffffff000000000000000019909116178082559254608060020a9081900490941690930277ffffffffffffffff00000000000000000000000000000000199092169190911790915584166000908152600360205260409020805460001981018083559190829080158290116115485760030281600302836000526020600020918201910161154891905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050600160a060020a033316600090815260016020526040902054611570915082611a1f565b600160a060020a03338116600090815260016020526040808220939093559086168152205461159f9082611714565b600160a060020a038086166000818152600160209081526040918290209490945580518581529051339093169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a35b50505050565b600160a060020a0383161561166e576116466008600061161f86610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611714565b6008600061165386610ead565b600160a060020a031681526020810191909152604001600020555b600160a060020a038216156116dc576116b46008600061168d85610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611a1f565b600860006116c185610ead565b600160a060020a031681526020810191909152604001600020555b5b505050565b600083826116f082426110fa565b8111156116fc57610000565b611707868686611b1b565b92505b5b50509392505050565b600061172283831115611b4d565b508082035b92915050565b6000610fd983602001518367ffffffffffffffff16856080015167ffffffffffffffff16866040015167ffffffffffffffff16876060015167ffffffffffffffff166111f3565b90505b92915050565b60008167ffffffffffffffff168367ffffffffffffffff1610156117a15781610fd9565b825b90505b92915050565b600033826117ba82426110fa565b8111156117c657610000565b6117d08585611b5d565b92505b5b505092915050565b6117e582610ef9565b6117ee83610c21565b11156117f957610000565b600160a060020a03811660009081526009602052604090205460ff16158015611834575081600160a060020a031681600160a060020a031614155b1561183e57610000565b61184782611070565b1561185157610000565b611864828261185f85610ef9565b611600565b600160a060020a0382811660009081526007602052604090208054600160a060020a031916918316918217905561189a82610ead565b600160a060020a031614610cc357610000565b5b5050565b600160a060020a038216600090815260036020526040812054815b818110156119885761197d836112796003600089600160a060020a0316600160a060020a0316815260200190815260200160002084815481101561000057906000526020600020906003020160005b506040805160a0810182528254600160a060020a031681526001830154602082015260029092015467ffffffffffffffff80821692840192909252604060020a810482166060840152608060020a900416608082015287611af9565b611a1f565b92505b6001016118cd565b600160a060020a0385166000908152600160205260409020546117d09084611714565b92505b505092915050565b600060006119c384611070565b80156119d157506000600d54115b90506119fb816119e9576119e485610ef9565b6119ec565b60005b6111138686611b7b565b611a05565b91505b5092915050565b60008183106117a15781610fd9565b825b90505b92915050565b6000828201611a3c848210801590611a375750838210155b611b4d565b8091505b5092915050565b611a508161104c565b15611a5a57610b85565b6005805460009081526004602052604090208054600160a060020a031916600160a060020a038416179055805460010190555b50565b6000828202611a3c841580611a37575083858381156100005704145b611b4d565b8091505b5092915050565b60006000611acc60008411611b4d565b8284811561000057049050611a3c838581156100005706828502018514611b4d565b8091505b5092915050565b6000610fd98360200151611b0d858561172d565b611714565b90505b92915050565b60008382611b2982426110fa565b811115611b3557610000565b611707868686611b8f565b92505b5b50509392505050565b801515610b8557610000565b5b50565b6000611b6883611a47565b610fd98383611c92565b90505b92915050565b6000610fd983610ef9565b90505b92915050565b600160a060020a038084166000908152600260209081526040808320338516845282528083205493861683526001909152812054909190611bd09084611a1f565b600160a060020a038086166000908152600160205260408082209390935590871681522054611bff9084611714565b600160a060020a038616600090815260016020526040902055611c228184611714565b600160a060020a038087166000818152600260209081526040808320338616845282529182902094909455805187815290519288169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a3600191505b509392505050565b60003382611ca082426110fa565b811115611cac57610000565b6117d08585611cc2565b92505b5b505092915050565b600160a060020a033316600090815260016020526040812054611ce59083611714565b600160a060020a033381166000908152600160205260408082209390935590851681522054611d149083611a1f565b600160a060020a038085166000818152600160209081526040918290209490945580518681529051919333909316927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef92918290030190a35060015b929150505600a165627a7a72305820bfa5ddd3fecf3f43aed25385ec7ec3ef79638c2e58d99f85d9a3cc494183bf160029000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182\",\n        \"type\": \"CREATE\",\n        \"value\": \"0x0\"\n      }\n    ],\n    \"error\": \"invalid jump destination\",\n    \"from\": \"0xe4a13bc304682a903e9472f469c33801dd18d9e8\",\n    \"gas\": \"0x435c8\",\n    \"gasUsed\": \"0x435c8\",\n    \"input\": \"0x3b91f506000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e8\",\n    \"to\": \"0x1d3ddf7caf024f253487e18bc4a15b1a360c170a\",\n    \"type\": \"CALL\",\n    \"value\": \"0x0\"\n  }\n}\n"
  },
  {
    "path": "eth/tracers/testdata/call_tracer_inner_instafail.json",
    "content": "{\n  \"genesis\": {\n    \"difficulty\": \"117067574\",\n    \"extraData\": \"0xd783010502846765746887676f312e372e33856c696e7578\",\n    \"gasLimit\": \"4712380\",\n    \"hash\": \"0xe05db05eeb3f288041ecb10a787df121c0ed69499355716e17c307de313a4486\",\n    \"miner\": \"0x0c062b329265c965deef1eede55183b3acb8f611\",\n    \"mixHash\": \"0xb669ae39118a53d2c65fd3b1e1d3850dd3f8c6842030698ed846a2762d68b61d\",\n    \"nonce\": \"0x2b469722b8e28c45\",\n    \"number\": \"24973\",\n    \"stateRoot\": \"0x532a5c3f75453a696428db078e32ae283c85cb97e4d8560dbdf022adac6df369\",\n    \"timestamp\": \"1479891145\",\n    \"totalDifficulty\": \"1892250259406\",\n    \"alloc\": {\n      \"0x6c06b16512b332e6cd8293a2974872674716ce18\": {\n        \"balance\": \"0x0\",\n        \"nonce\": \"1\",\n        \"code\": \"0x60606040526000357c0100000000000000000000000000000000000000000000000000000000900480632e1a7d4d146036575b6000565b34600057604e60048080359060200190919050506050565b005b3373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051809050600060405180830381858888f19350505050505b5056\",\n        \"storage\": {}\n      },\n      \"0x66fdfd05e46126a07465ad24e40cc0597bc1ef31\": {\n        \"balance\": \"0x229ebbb36c3e0f20\",\n        \"nonce\": \"3\",\n        \"code\": \"0x\",\n        \"storage\": {}\n      }\n    },\n    \"config\": {\n      \"chainId\": 3,\n      \"homesteadBlock\": 0,\n      \"daoForkSupport\": true,\n      \"eip150Block\": 0,\n      \"eip150Hash\": \"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d\",\n      \"eip155Block\": 10,\n      \"eip158Block\": 10,\n      \"byzantiumBlock\": 1700000,\n      \"constantinopleBlock\": 4230000,\n      \"petersburgBlock\": 4939394,\n      \"istanbulBlock\": 6485846,\n      \"muirGlacierBlock\": 7117117,\n      \"ethash\": {}\n    }\n  },\n  \"context\": {\n    \"number\": \"24974\",\n    \"difficulty\": \"117067574\",\n    \"timestamp\": \"1479891162\",\n    \"gasLimit\": \"4712388\",\n    \"miner\": \"0xc822ef32e6d26e170b70cf761e204c1806265914\"\n  },\n  \"input\": \"0xf889038504a81557008301f97e946c06b16512b332e6cd8293a2974872674716ce1880a42e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b1600002aa0e2a6558040c5d72bc59f2fb62a38993a314c849cd22fb393018d2c5af3112095a01bdb6d7ba32263ccc2ecc880d38c49d9f0c5a72d8b7908e3122b31356d349745\",\n  \"result\": {\n    \"type\": \"CALL\",\n    \"from\": \"0x66fdfd05e46126a07465ad24e40cc0597bc1ef31\",\n    \"to\": \"0x6c06b16512b332e6cd8293a2974872674716ce18\",\n    \"value\": \"0x0\",\n    \"gas\": \"0x1a466\",\n    \"gasUsed\": \"0x1dc6\",\n    \"input\": \"0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000\",\n    \"output\": \"0x\",\n    \"calls\": [\n      {\n        \"type\": \"CALL\",\n        \"from\": \"0x6c06b16512b332e6cd8293a2974872674716ce18\",\n        \"to\": \"0x66fdfd05e46126a07465ad24e40cc0597bc1ef31\",\n        \"value\": \"0x14d1120d7b160000\",\n        \"error\":\"internal failure\",\n        \"input\": \"0x\"\n      }\n    ]\n  }\n}\n"
  },
  {
    "path": "eth/tracers/testdata/call_tracer_inner_throw_outer_revert.json",
    "content": "{\n  \"context\": {\n    \"difficulty\": \"3956606365\",\n    \"gasLimit\": \"5413248\",\n    \"miner\": \"0x00d8ae40d9a06d0e7a2877b62e32eb959afbe16d\",\n    \"number\": \"2295104\",\n    \"timestamp\": \"1513681256\"\n  },\n  \"genesis\": {\n    \"alloc\": {\n      \"0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x60606040526004361061015e576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680625b4487146101a257806311df9995146101cb578063278ecde11461022057806330adce0e146102435780633197cbb61461026c5780634bb278f3146102955780636103d70b146102aa57806363a599a4146102bf5780636a2d1cb8146102d457806375f12b21146102fd57806378e979251461032a578063801db9cc1461035357806386d1a69f1461037c5780638da5cb5b146103915780638ef26a71146103e65780639890220b1461040f5780639b39caef14610424578063b85dfb801461044d578063be9a6555146104a1578063ccb07cef146104b6578063d06c91e4146104e3578063d669e1d414610538578063df40503c14610561578063e2982c2114610576578063f02e030d146105c3578063f2fde38b146105d8578063f3283fba14610611575b600060149054906101000a900460ff1615151561017a57600080fd5b60075442108061018b575060085442115b15151561019757600080fd5b6101a03361064a565b005b34156101ad57600080fd5b6101b5610925565b6040518082815260200191505060405180910390f35b34156101d657600080fd5b6101de61092b565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561022b57600080fd5b6102416004808035906020019091905050610951565b005b341561024e57600080fd5b610256610c48565b6040518082815260200191505060405180910390f35b341561027757600080fd5b61027f610c4e565b6040518082815260200191505060405180910390f35b34156102a057600080fd5b6102a8610c54565b005b34156102b557600080fd5b6102bd610f3e565b005b34156102ca57600080fd5b6102d261105d565b005b34156102df57600080fd5b6102e76110d5565b6040518082815260200191505060405180910390f35b341561030857600080fd5b6103106110e1565b604051808215151515815260200191505060405180910390f35b341561033557600080fd5b61033d6110f4565b6040518082815260200191505060405180910390f35b341561035e57600080fd5b6103666110fa565b6040518082815260200191505060405180910390f35b341561038757600080fd5b61038f611104565b005b341561039c57600080fd5b6103a4611196565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156103f157600080fd5b6103f96111bb565b6040518082815260200191505060405180910390f35b341561041a57600080fd5b6104226111c1565b005b341561042f57600080fd5b610437611296565b6040518082815260200191505060405180910390f35b341561045857600080fd5b610484600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190505061129c565b604051808381526020018281526020019250505060405180910390f35b34156104ac57600080fd5b6104b46112c0565b005b34156104c157600080fd5b6104c9611341565b604051808215151515815260200191505060405180910390f35b34156104ee57600080fd5b6104f6611354565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561054357600080fd5b61054b61137a565b6040518082815260200191505060405180910390f35b341561056c57600080fd5b610574611385565b005b341561058157600080fd5b6105ad600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506116c3565b6040518082815260200191505060405180910390f35b34156105ce57600080fd5b6105d66116db565b005b34156105e357600080fd5b61060f600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050611829565b005b341561061c57600080fd5b610648600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506118fe565b005b600080670de0b6b3a7640000341015151561066457600080fd5b61069b610696670de0b6b3a7640000610688610258346119d990919063ffffffff16565b611a0c90919063ffffffff16565b611a27565b9150660221b262dd80006106ba60065484611a7e90919063ffffffff16565b111515156106c757600080fd5b600a60008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000209050600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a9059cbb84846000604051602001526040518363ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b15156107d557600080fd5b6102c65a03f115156107e657600080fd5b5050506040518051905050610808828260010154611a7e90919063ffffffff16565b8160010181905550610827348260000154611a7e90919063ffffffff16565b816000018190555061084434600554611a7e90919063ffffffff16565b60058190555061085f82600654611a7e90919063ffffffff16565b6006819055503373ffffffffffffffffffffffffffffffffffffffff167ff3c1c7c0eb1328ddc834c4c9e579c06d35f443bf1102b034653624a239c7a40c836040518082815260200191505060405180910390a27fd1dc370699ae69fb860ed754789a4327413ec1cd379b93f2cbedf449a26b0e8583600554604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a1505050565b60025481565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600060085442108061096b5750651b48eb57e00060065410155b15151561097757600080fd5b600a60003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010154821415156109c757600080fd5b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166323b872dd3330856000604051602001526040518463ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019350505050602060405180830381600087803b1515610ac857600080fd5b6102c65a03f11515610ad957600080fd5b5050506040518051905050600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166342966c68836000604051602001526040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180828152602001915050602060405180830381600087803b1515610b7d57600080fd5b6102c65a03f11515610b8e57600080fd5b505050604051805190501515610ba357600080fd5b600a60003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000015490506000600a60003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600001819055506000811115610c4457610c433382611a9c565b5b5050565b60055481565b60085481565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515610cb157600080fd5b600854421015610cd357660221b262dd8000600654141515610cd257600080fd5b5b651b48eb57e000600654108015610cf057506213c6806008540142105b151515610cfc57600080fd5b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc3073ffffffffffffffffffffffffffffffffffffffff16319081150290604051600060405180830381858888f193505050501515610d7557600080fd5b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166370a08231306000604051602001526040518263ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001915050602060405180830381600087803b1515610e3a57600080fd5b6102c65a03f11515610e4b57600080fd5b5050506040518051905090506000811115610f2057600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166342966c68826000604051602001526040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180828152602001915050602060405180830381600087803b1515610ef957600080fd5b6102c65a03f11515610f0a57600080fd5b505050604051805190501515610f1f57600080fd5b5b6001600960006101000a81548160ff02191690831515021790555050565b600080339150600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905060008114151515610f9657600080fd5b803073ffffffffffffffffffffffffffffffffffffffff163110151515610fbc57600080fd5b610fd181600254611b5090919063ffffffff16565b6002819055506000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561105957fe5b5050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156110b857600080fd5b6001600060146101000a81548160ff021916908315150217905550565b670de0b6b3a764000081565b600060149054906101000a900460ff1681565b60075481565b651b48eb57e00081565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561115f57600080fd5b600060149054906101000a900460ff16151561117a57600080fd5b60008060146101000a81548160ff021916908315150217905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60065481565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561121c57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc3073ffffffffffffffffffffffffffffffffffffffff16319081150290604051600060405180830381858888f19350505050151561129457600080fd5b565b61025881565b600a6020528060005260406000206000915090508060000154908060010154905082565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561131b57600080fd5b600060075414151561132c57600080fd5b4260078190555062278d004201600881905550565b600960009054906101000a900460ff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b660221b262dd800081565b60008060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156113e557600080fd5b600654660221b262dd800003925061142b670de0b6b3a764000061141c610258670de0b6b3a76400006119d990919063ffffffff16565b81151561142557fe5b04611a27565b915081831115151561143c57600080fd5b600a60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000209050600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a9059cbb6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff16856000604051602001526040518363ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b151561158c57600080fd5b6102c65a03f1151561159d57600080fd5b50505060405180519050506115bf838260010154611a7e90919063ffffffff16565b81600101819055506115dc83600654611a7e90919063ffffffff16565b6006819055503073ffffffffffffffffffffffffffffffffffffffff167ff3c1c7c0eb1328ddc834c4c9e579c06d35f443bf1102b034653624a239c7a40c846040518082815260200191505060405180910390a27fd1dc370699ae69fb860ed754789a4327413ec1cd379b93f2cbedf449a26b0e856000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff16600554604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a1505050565b60016020528060005260406000206000915090505481565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561173657600080fd5b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663f2fde38b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff166040518263ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001915050600060405180830381600087803b151561181357600080fd5b6102c65a03f1151561182457600080fd5b505050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561188457600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415156118fb57806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b50565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561195957600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415151561199557600080fd5b80600460006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b600080828402905060008414806119fa57508284828115156119f757fe5b04145b1515611a0257fe5b8091505092915050565b6000808284811515611a1a57fe5b0490508091505092915050565b6000611a416202a300600754611a7e90919063ffffffff16565b421015611a7557611a6e611a5f600584611a0c90919063ffffffff16565b83611a7e90919063ffffffff16565b9050611a79565b8190505b919050565b6000808284019050838110151515611a9257fe5b8091505092915050565b611aee81600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054611a7e90919063ffffffff16565b600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550611b4681600254611a7e90919063ffffffff16565b6002819055505050565b6000828211151515611b5e57fe5b8183039050929150505600a165627a7a72305820ec0d82a406896ccf20989b3d6e650abe4dc104e400837f1f58e67ef499493ae90029\",\n        \"nonce\": \"1\",\n        \"storage\": {\n          \"0x0000000000000000000000000000000000000000000000000000000000000000\": \"0x0000000000000000000000008d69d00910d0b2afb2a99ed6c16c8129fa8e1751\",\n          \"0x0000000000000000000000000000000000000000000000000000000000000003\": \"0x000000000000000000000000e819f024b41358d2c08e3a868a5c5dd0566078d4\",\n          \"0x0000000000000000000000000000000000000000000000000000000000000007\": \"0x000000000000000000000000000000000000000000000000000000005a388981\",\n          \"0x0000000000000000000000000000000000000000000000000000000000000008\": \"0x000000000000000000000000000000000000000000000000000000005a3b38e6\"\n        }\n      },\n      \"0xd4fcab9f0a6dc0493af47c864f6f17a8a5e2e826\": {\n        \"balance\": \"0x2a2dd979a35cf000\",\n        \"code\": \"0x\",\n        \"nonce\": \"0\",\n        \"storage\": {}\n      },\n      \"0xe819f024b41358d2c08e3a868a5c5dd0566078d4\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x6060604052600436106100ba576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100bf578063095ea7b31461014d57806318160ddd146101a757806323b872dd146101d0578063313ce5671461024957806342966c681461027257806370a08231146102ad5780638da5cb5b146102fa57806395d89b411461034f578063a9059cbb146103dd578063dd62ed3e14610437578063f2fde38b146104a3575b600080fd5b34156100ca57600080fd5b6100d26104dc565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101125780820151818401526020810190506100f7565b50505050905090810190601f16801561013f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015857600080fd5b61018d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610515565b604051808215151515815260200191505060405180910390f35b34156101b257600080fd5b6101ba61069c565b6040518082815260200191505060405180910390f35b34156101db57600080fd5b61022f600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff169060200190919080359060200190919050506106a2565b604051808215151515815260200191505060405180910390f35b341561025457600080fd5b61025c610952565b6040518082815260200191505060405180910390f35b341561027d57600080fd5b6102936004808035906020019091905050610957565b604051808215151515815260200191505060405180910390f35b34156102b857600080fd5b6102e4600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610abe565b6040518082815260200191505060405180910390f35b341561030557600080fd5b61030d610b07565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561035a57600080fd5b610362610b2d565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156103a2578082015181840152602081019050610387565b50505050905090810190601f1680156103cf5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34156103e857600080fd5b61041d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610b66565b604051808215151515815260200191505060405180910390f35b341561044257600080fd5b61048d600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610d01565b6040518082815260200191505060405180910390f35b34156104ae57600080fd5b6104da600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610d88565b005b6040805190810160405280600b81526020017f416c6c436f6465436f696e00000000000000000000000000000000000000000081525081565b6000808214806105a157506000600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054145b15156105ac57600080fd5b81600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925846040518082815260200191505060405180910390a36001905092915050565b60005481565b600080600260008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905061077683600160008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e5f90919063ffffffff16565b600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000208190555061080b83600160008873ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e7d90919063ffffffff16565b600160008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506108618382610e7d90919063ffffffff16565b600260008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508373ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef856040518082815260200191505060405180910390a360019150509392505050565b600681565b6000600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156109b557600080fd5b610a0782600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e7d90919063ffffffff16565b600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550610a5f82600054610e7d90919063ffffffff16565b60008190555060003373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a360019050919050565b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020549050919050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6040805190810160405280600481526020017f414c4c430000000000000000000000000000000000000000000000000000000081525081565b6000610bba82600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e7d90919063ffffffff16565b600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550610c4f82600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e5f90919063ffffffff16565b600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a36001905092915050565b6000600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905092915050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515610de457600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141515610e5c5780600360006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b50565b6000808284019050838110151515610e7357fe5b8091505092915050565b6000828211151515610e8b57fe5b8183039050929150505600a165627a7a7230582059f3ea3df0b054e9ab711f37969684ba83fe38f255ffe2c8d850d951121c51100029\",\n        \"nonce\": \"1\",\n        \"storage\": {}\n      }\n    },\n    \"config\": {\n      \"byzantiumBlock\": 1700000,\n      \"chainId\": 3,\n      \"daoForkSupport\": true,\n      \"eip150Block\": 0,\n      \"eip150Hash\": \"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d\",\n      \"eip155Block\": 10,\n      \"eip158Block\": 10,\n      \"ethash\": {},\n      \"homesteadBlock\": 0\n    },\n    \"difficulty\": \"3956606365\",\n    \"extraData\": \"0x566961425443\",\n    \"gasLimit\": \"5418523\",\n    \"hash\": \"0x6f37eb930a25da673ea1bb80fd9e32ddac19cdf7cd4bb2eac62cc13598624077\",\n    \"miner\": \"0xd049bfd667cb46aa3ef5df0da3e57db3be39e511\",\n    \"mixHash\": \"0x10971cde68c587c750c23b8589ae868ce82c2c646636b97e7d9856470c5297c7\",\n    \"nonce\": \"0x810f923ff4b450a1\",\n    \"number\": \"2295103\",\n    \"stateRoot\": \"0xff403612573d76dfdaf4fea2429b77dbe9764021ae0e38dc8ac79a3cf551179e\",\n    \"timestamp\": \"1513681246\",\n    \"totalDifficulty\": \"7162347056825919\"\n  },\n  \"input\": \"0xf86d808504e3b292008307dfa69433056b5dcac09a9b4becad0e1dcf92c19bd0af76880e92596fd62900008029a0e5f27bb66431f7081bb7f1f242003056d7f3f35414c352cd3d1848b52716dac2a07d0be78980edb0bd2a0678fc53aa90ea9558ce346b0d947967216918ac74ccea\",\n  \"result\": {\n    \"calls\": [\n      {\n        \"error\": \"invalid opcode: opcode 0xfe not defined\",\n        \"from\": \"0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76\",\n        \"gas\": \"0x75fe3\",\n        \"gasUsed\": \"0x75fe3\",\n        \"input\": \"0xa9059cbb000000000000000000000000d4fcab9f0a6dc0493af47c864f6f17a8a5e2e82600000000000000000000000000000000000000000000000000000000000002f4\",\n        \"to\": \"0xe819f024b41358d2c08e3a868a5c5dd0566078d4\",\n        \"type\": \"CALL\",\n        \"value\": \"0x0\"\n      }\n    ],\n    \"error\": \"execution reverted\",\n    \"from\": \"0xd4fcab9f0a6dc0493af47c864f6f17a8a5e2e826\",\n    \"gas\": \"0x78d9e\",\n    \"gasUsed\": \"0x76fc0\",\n    \"input\": \"0x\",\n    \"to\": \"0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76\",\n    \"type\": \"CALL\",\n    \"value\": \"0xe92596fd6290000\"\n  }\n}\n"
  },
  {
    "path": "eth/tracers/testdata/call_tracer_oog.json",
    "content": "{\n  \"context\": {\n    \"difficulty\": \"3699098917\",\n    \"gasLimit\": \"5258985\",\n    \"miner\": \"0xd049bfd667cb46aa3ef5df0da3e57db3be39e511\",\n    \"number\": \"2294631\",\n    \"timestamp\": \"1513675366\"\n  },\n  \"genesis\": {\n    \"alloc\": {\n      \"0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x6060604052600436106100ba576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100bf578063095ea7b31461014d57806318160ddd146101a757806323b872dd146101d0578063313ce5671461024957806342966c68146102785780635a3b7e42146102b357806370a082311461034157806379cc67901461038e57806395d89b41146103e8578063a9059cbb14610476578063dd62ed3e146104b8575b600080fd5b34156100ca57600080fd5b6100d2610524565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101125780820151818401526020810190506100f7565b50505050905090810190601f16801561013f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015857600080fd5b61018d600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061055d565b604051808215151515815260200191505060405180910390f35b34156101b257600080fd5b6101ba6105ea565b6040518082815260200191505060405180910390f35b34156101db57600080fd5b61022f600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff169060200190919080359060200190919050506105f0565b604051808215151515815260200191505060405180910390f35b341561025457600080fd5b61025c610910565b604051808260ff1660ff16815260200191505060405180910390f35b341561028357600080fd5b6102996004808035906020019091905050610915565b604051808215151515815260200191505060405180910390f35b34156102be57600080fd5b6102c6610a18565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156103065780820151818401526020810190506102eb565b50505050905090810190601f1680156103335780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561034c57600080fd5b610378600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610a51565b6040518082815260200191505060405180910390f35b341561039957600080fd5b6103ce600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610a69565b604051808215151515815260200191505060405180910390f35b34156103f357600080fd5b6103fb610bf8565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561043b578082015181840152602081019050610420565b50505050905090810190601f1680156104685780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561048157600080fd5b6104b6600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610c31565b005b34156104c357600080fd5b61050e600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610e34565b6040518082815260200191505060405180910390f35b6040805190810160405280600881526020017f446f70616d696e6500000000000000000000000000000000000000000000000081525081565b600081600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506001905092915050565b60005481565b6000808373ffffffffffffffffffffffffffffffffffffffff161415151561061757600080fd5b81600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561066557600080fd5b600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205482600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205401101515156106f157fe5b600260008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054821115151561077c57600080fd5b81600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254019250508190555081600260008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190509392505050565b601281565b600081600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561096557600080fd5b81600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508160008082825403925050819055503373ffffffffffffffffffffffffffffffffffffffff167fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5836040518082815260200191505060405180910390a260019050919050565b6040805190810160405280600981526020017f446f706d6e20302e32000000000000000000000000000000000000000000000081525081565b60016020528060005260406000206000915090505481565b600081600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610ab957600080fd5b600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020548211151515610b4457600080fd5b81600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508160008082825403925050819055508273ffffffffffffffffffffffffffffffffffffffff167fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5836040518082815260200191505060405180910390a26001905092915050565b6040805190810160405280600581526020017f444f504d4e00000000000000000000000000000000000000000000000000000081525081565b60008273ffffffffffffffffffffffffffffffffffffffff1614151515610c5757600080fd5b80600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610ca557600080fd5b600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205481600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020540110151515610d3157fe5b80600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555080600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508173ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a35050565b60026020528160005260406000206020528060005260406000206000915091505054815600a165627a7a723058206d93424f4e7b11929b8276a269038402c10c0ddf21800e999916ddd9dff4a7630029\",\n        \"nonce\": \"1\",\n        \"storage\": {\n          \"0x296b66049cc4f9c8bf3d4f14752add261d1a980b39bdd194a7897baf39ac7579\": \"0x0000000000000000000000000000000000000000033b2e3c9fc9653f9e72b1e0\"\n        }\n      },\n      \"0x94194bc2aaf494501d7880b61274a169f6502a54\": {\n        \"balance\": \"0xea8c39a876d19888d\",\n        \"code\": \"0x\",\n        \"nonce\": \"265\",\n        \"storage\": {}\n      }\n    },\n    \"config\": {\n      \"byzantiumBlock\": 1700000,\n      \"chainId\": 3,\n      \"daoForkSupport\": true,\n      \"eip150Block\": 0,\n      \"eip150Hash\": \"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d\",\n      \"eip155Block\": 10,\n      \"eip158Block\": 10,\n      \"ethash\": {},\n      \"homesteadBlock\": 0\n    },\n    \"difficulty\": \"3699098917\",\n    \"extraData\": \"0x4554482e45544846414e532e4f52472d4641313738394444\",\n    \"gasLimit\": \"5263953\",\n    \"hash\": \"0x03a0f62a8106793dafcfae7b75fd2654322062d585a19cea568314d7205790dc\",\n    \"miner\": \"0xbbf5029fd710d227630c8b7d338051b8e76d50b3\",\n    \"mixHash\": \"0x15482cc64b7c00a947f5bf015dfc010db1a6a668c74df61974d6a7848c174408\",\n    \"nonce\": \"0xd1bdb150f6fd170e\",\n    \"number\": \"2294630\",\n    \"stateRoot\": \"0x1ab1a534e84cc787cda1db21e0d5920ab06017948075b759166cfea7274657a1\",\n    \"timestamp\": \"1513675347\",\n    \"totalDifficulty\": \"7160543502214733\"\n  },\n  \"input\": \"0xf8ab820109855d21dba00082ca1d9443064693d3d38ad6a7cb579e0d6d9718c8aa6b6280b844a9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f90001ba0ce3ad83f5530136467b7c2bb225f406bd170f4ad59c254e5103c34eeabb5bd69a0455154527224a42ab405cacf0fe92918a75641ce4152f8db292019a5527aa956\",\n  \"result\": {\n    \"error\": \"out of gas\",\n    \"from\": \"0x94194bc2aaf494501d7880b61274a169f6502a54\",\n    \"gas\": \"0x7045\",\n    \"gasUsed\": \"0x7045\",\n    \"input\": \"0xa9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f9000\",\n    \"to\": \"0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62\",\n    \"type\": \"CALL\",\n    \"value\": \"0x0\"\n  }\n}\n"
  },
  {
    "path": "eth/tracers/testdata/call_tracer_revert.json",
    "content": "{\n  \"context\": {\n    \"difficulty\": \"3665057456\",\n    \"gasLimit\": \"5232723\",\n    \"miner\": \"0xf4d8e706cfb25c0decbbdd4d2e2cc10c66376a3f\",\n    \"number\": \"2294501\",\n    \"timestamp\": \"1513673601\"\n  },\n  \"genesis\": {\n    \"alloc\": {\n      \"0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9\": {\n        \"balance\": \"0x2a3fc32bcc019283\",\n        \"code\": \"0x\",\n        \"nonce\": \"10\",\n        \"storage\": {}\n      },\n      \"0xabbcd5b340c80b5f1c0545c04c987b87310296ae\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x606060405236156100755763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416632d0335ab811461007a578063548db174146100ab5780637f649783146100fc578063b092145e1461014d578063c3f44c0a14610186578063c47cf5de14610203575b600080fd5b341561008557600080fd5b610099600160a060020a0360043516610270565b60405190815260200160405180910390f35b34156100b657600080fd5b6100fa600460248135818101908301358060208181020160405190810160405280939291908181526020018383602002808284375094965061028f95505050505050565b005b341561010757600080fd5b6100fa600460248135818101908301358060208181020160405190810160405280939291908181526020018383602002808284375094965061029e95505050505050565b005b341561015857600080fd5b610172600160a060020a03600435811690602435166102ad565b604051901515815260200160405180910390f35b341561019157600080fd5b6100fa6004803560ff1690602480359160443591606435600160a060020a0316919060a49060843590810190830135806020601f8201819004810201604051908101604052818152929190602084018383808284375094965050509235600160a060020a031692506102cd915050565b005b341561020e57600080fd5b61025460046024813581810190830135806020601f8201819004810201604051908101604052818152929190602084018383808284375094965061056a95505050505050565b604051600160a060020a03909116815260200160405180910390f35b600160a060020a0381166000908152602081905260409020545b919050565b61029a816000610594565b5b50565b61029a816001610594565b5b50565b600160209081526000928352604080842090915290825290205460ff1681565b60008080600160a060020a038416158061030d5750600160a060020a038085166000908152600160209081526040808320339094168352929052205460ff165b151561031857600080fd5b6103218561056a565b600160a060020a038116600090815260208190526040808220549295507f19000000000000000000000000000000000000000000000000000000000000009230918891908b908b90517fff000000000000000000000000000000000000000000000000000000000000008089168252871660018201526c01000000000000000000000000600160a060020a038088168202600284015286811682026016840152602a8301869052841602604a820152605e810182805190602001908083835b6020831061040057805182525b601f1990920191602091820191016103e0565b6001836020036101000a0380198251168184511617909252505050919091019850604097505050505050505051809103902091506001828a8a8a6040516000815260200160405260006040516020015260405193845260ff90921660208085019190915260408085019290925260608401929092526080909201915160208103908084039060008661646e5a03f1151561049957600080fd5b5050602060405103519050600160a060020a03838116908216146104bc57600080fd5b600160a060020a0380841660009081526020819052604090819020805460010190559087169086905180828051906020019080838360005b8381101561050d5780820151818401525b6020016104f4565b50505050905090810190601f16801561053a5780820380516001836020036101000a031916815260200191505b5091505060006040518083038160008661646e5a03f1915050151561055e57600080fd5b5b505050505050505050565b600060248251101561057e5750600061028a565b600160a060020a0360248301511690505b919050565b60005b825181101561060157600160a060020a033316600090815260016020526040812083918584815181106105c657fe5b90602001906020020151600160a060020a031681526020810191909152604001600020805460ff19169115159190911790555b600101610597565b5b5050505600a165627a7a723058200027e8b695e9d2dea9f3629519022a69f3a1d23055ce86406e686ea54f31ee9c0029\",\n        \"nonce\": \"1\",\n        \"storage\": {}\n      }\n    },\n    \"config\": {\n      \"byzantiumBlock\": 1700000,\n      \"chainId\": 3,\n      \"daoForkSupport\": true,\n      \"eip150Block\": 0,\n      \"eip150Hash\": \"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d\",\n      \"eip155Block\": 10,\n      \"eip158Block\": 10,\n      \"ethash\": {},\n      \"homesteadBlock\": 0\n    },\n    \"difficulty\": \"3672229776\",\n    \"extraData\": \"0x4554482e45544846414e532e4f52472d4641313738394444\",\n    \"gasLimit\": \"5227619\",\n    \"hash\": \"0xa07b3d6c6bf63f5f981016db9f2d1d93033833f2c17e8bf7209e85f1faf08076\",\n    \"miner\": \"0xbbf5029fd710d227630c8b7d338051b8e76d50b3\",\n    \"mixHash\": \"0x806e151ce2817be922e93e8d5921fa0f0d0fd213d6b2b9a3fa17458e74a163d0\",\n    \"nonce\": \"0xbc5d43adc2c30c7d\",\n    \"number\": \"2294500\",\n    \"stateRoot\": \"0xca645b335888352ef9d8b1ef083e9019648180b259026572e3139717270de97d\",\n    \"timestamp\": \"1513673552\",\n    \"totalDifficulty\": \"7160066586979149\"\n  },\n  \"input\": \"0xf9018b0a8505d21dba00832dc6c094abbcd5b340c80b5f1c0545c04c987b87310296ae80b9012473b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988000000000000000000000000000000000000000000000000000000000000000000000000000000001ba0fd659d76a4edbd2a823e324c93f78ad6803b30ff4a9c8bce71ba82798975c70ca06571eecc0b765688ec6c78942c5ee8b585e00988c0141b518287e9be919bc48a\",\n  \"result\": {\n    \"error\": \"execution reverted\",\n    \"from\": \"0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9\",\n    \"gas\": \"0x2d55e8\",\n    \"gasUsed\": \"0xc3\",\n    \"input\": \"0x73b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a98800000000000000000000000000000000000000000000000000000000000000000000000000000000\",\n    \"to\": \"0xabbcd5b340c80b5f1c0545c04c987b87310296ae\",\n    \"type\": \"CALL\",\n    \"value\": \"0x0\"\n  }\n}\n"
  },
  {
    "path": "eth/tracers/testdata/call_tracer_revert_reason.json",
    "content": "{\n  \"context\": {\n    \"difficulty\": \"2\",\n    \"gasLimit\": \"8000000\",\n    \"miner\": \"0x0000000000000000000000000000000000000000\",\n    \"number\": \"3212651\",\n    \"timestamp\": \"1597246515\"\n  },\n  \"genesis\": {\n    \"alloc\": {\n      \"0xf58833cf0c791881b494eb79d461e08a1f043f52\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x608060405234801561001057600080fd5b50600436106100a5576000357c010000000000000000000000000000000000000000000000000000000090048063609ff1bd11610078578063609ff1bd146101af5780639e7b8d61146101cd578063a3ec138d14610211578063e2ba53f0146102ae576100a5565b80630121b93f146100aa578063013cf08b146100d85780632e4176cf146101215780635c19a95c1461016b575b600080fd5b6100d6600480360360208110156100c057600080fd5b81019080803590602001909291905050506102cc565b005b610104600480360360208110156100ee57600080fd5b8101908080359060200190929190505050610469565b604051808381526020018281526020019250505060405180910390f35b61012961049a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6101ad6004803603602081101561018157600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506104bf565b005b6101b76108db565b6040518082815260200191505060405180910390f35b61020f600480360360208110156101e357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610952565b005b6102536004803603602081101561022757600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610b53565b60405180858152602001841515151581526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200194505050505060405180910390f35b6102b6610bb0565b6040518082815260200191505060405180910390f35b6000600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020905060008160000154141561038a576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f486173206e6f20726967687420746f20766f746500000000000000000000000081525060200191505060405180910390fd5b8060010160009054906101000a900460ff161561040f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600e8152602001807f416c726561647920766f7465642e00000000000000000000000000000000000081525060200191505060405180910390fd5b60018160010160006101000a81548160ff02191690831515021790555081816002018190555080600001546002838154811061044757fe5b9060005260206000209060020201600101600082825401925050819055505050565b6002818154811061047657fe5b90600052602060002090600202016000915090508060000154908060010154905082565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002090508060010160009054906101000a900460ff1615610587576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f596f7520616c726561647920766f7465642e000000000000000000000000000081525060200191505060405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415610629576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601e8152602001807f53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e000081525060200191505060405180910390fd5b5b600073ffffffffffffffffffffffffffffffffffffffff16600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16146107cc57600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1691503373ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614156107c7576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260198152602001807f466f756e64206c6f6f7020696e2064656c65676174696f6e2e0000000000000081525060200191505060405180910390fd5b61062a565b60018160010160006101000a81548160ff021916908315150217905550818160010160016101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002090508060010160009054906101000a900460ff16156108bf578160000154600282600201548154811061089c57fe5b9060005260206000209060020201600101600082825401925050819055506108d6565b816000015481600001600082825401925050819055505b505050565b6000806000905060008090505b60028054905081101561094d57816002828154811061090357fe5b9060005260206000209060020201600101541115610940576002818154811061092857fe5b90600052602060002090600202016001015491508092505b80806001019150506108e8565b505090565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146109f7576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526028815260200180610bde6028913960400191505060405180910390fd5b600160008273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160009054906101000a900460ff1615610aba576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260188152602001807f54686520766f74657220616c726561647920766f7465642e000000000000000081525060200191505060405180910390fd5b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000015414610b0957600080fd5b60018060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000018190555050565b60016020528060005260406000206000915090508060000154908060010160009054906101000a900460ff16908060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff16908060020154905084565b60006002610bbc6108db565b81548110610bc657fe5b90600052602060002090600202016000015490509056fe4f6e6c79206368616972706572736f6e2063616e206769766520726967687420746f20766f74652ea26469706673582212201d282819f8f06fed792100d60a8b08809b081a34a1ecd225e83a4b41122165ed64736f6c63430006060033\",\n        \"nonce\": \"1\",\n        \"storage\": {\n          \"0x6200beec95762de01ce05f2a0e58ce3299dbb53c68c9f3254a242121223cdf58\": \"0x0000000000000000000000000000000000000000000000000000000000000000\"\n        }\n      },\n      \"0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1\": {\n        \"balance\": \"0x57af9d6b3df812900\",\n        \"code\": \"0x\",\n        \"nonce\": \"6\",\n        \"storage\": {}\n      }\n    },\n    \"config\": {\n      \"byzantiumBlock\": 0,\n      \"constantinopleBlock\": 0,\n      \"petersburgBlock\": 0,\n      \"IstanbulBlock\":1561651,\n      \"chainId\": 5,\n      \"daoForkSupport\": true,\n      \"eip150Block\": 0,\n      \"eip150Hash\": \"0x0000000000000000000000000000000000000000000000000000000000000000\",\n      \"eip155Block\": 10,\n      \"eip158Block\": 10,\n      \"ethash\": {},\n      \"homesteadBlock\": 0\n    },\n    \"difficulty\": \"3509749784\",\n    \"extraData\": \"0x4554482e45544846414e532e4f52472d4641313738394444\",\n    \"gasLimit\": \"4727564\",\n    \"hash\": \"0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440\",\n    \"miner\": \"0xbbf5029fd710d227630c8b7d338051b8e76d50b3\",\n    \"mixHash\": \"0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada\",\n    \"nonce\": \"0x4eb12e19c16d43da\",\n    \"number\": \"2289805\",\n    \"stateRoot\": \"0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f\",\n    \"timestamp\": \"1513601261\",\n    \"totalDifficulty\": \"7143276353481064\"\n  },\n  \"input\": \"0xf888068449504f80832dc6c094f58833cf0c791881b494eb79d461e08a1f043f5280a45c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf12da0264664db3e71fae1dbdaf2f53954be149ad3b7ba8a5054b4d89c70febfacc8b1a0212e8398757963f419681839ae8c5a54b411e252473c82d93dda68405ca63294\",\n  \"result\": {\n    \"error\": \"execution reverted\",\n    \"from\": \"0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1\",\n    \"gas\": \"0x2d7308\",\n    \"gasUsed\": \"0x588\",\n    \"input\": \"0x5c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf1\",\n    \"to\": \"0xf58833cf0c791881b494eb79d461e08a1f043f52\",\n    \"type\": \"CALL\",\n    \"value\": \"0x0\",\n    \"output\": \"0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e0000\"\n  }\n}\n"
  },
  {
    "path": "eth/tracers/testdata/call_tracer_simple.json",
    "content": "{\n  \"context\": {\n    \"difficulty\": \"3502894804\",\n    \"gasLimit\": \"4722976\",\n    \"miner\": \"0x1585936b53834b021f68cc13eeefdec2efc8e724\",\n    \"number\": \"2289806\",\n    \"timestamp\": \"1513601314\"\n  },\n  \"genesis\": {\n    \"alloc\": {\n      \"0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x\",\n        \"nonce\": \"22\",\n        \"storage\": {}\n      },\n      \"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe\": {\n        \"balance\": \"0x4d87094125a369d9bd5\",\n        \"code\": \"0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029\",\n        \"nonce\": \"1\",\n        \"storage\": {\n          \"0x0000000000000000000000000000000000000000000000000000000000000000\": \"0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb\",\n          \"0x0000000000000000000000000000000000000000000000000000000000000001\": \"0x00000000000000000000000000000000000000000000000006f05b59d3b20000\",\n          \"0x0000000000000000000000000000000000000000000000000000000000000002\": \"0x000000000000000000000000000000000000000000000000000000000000003c\",\n          \"0x0000000000000000000000000000000000000000000000000000000000000003\": \"0x000000000000000000000000000000000000000000000000000000005a37b834\"\n        }\n      },\n      \"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb\": {\n        \"balance\": \"0x1780d77678137ac1b775\",\n        \"code\": \"0x\",\n        \"nonce\": \"29072\",\n        \"storage\": {}\n      }\n    },\n    \"config\": {\n      \"byzantiumBlock\": 1700000,\n      \"chainId\": 3,\n      \"daoForkSupport\": true,\n      \"eip150Block\": 0,\n      \"eip150Hash\": \"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d\",\n      \"eip155Block\": 10,\n      \"eip158Block\": 10,\n      \"ethash\": {},\n      \"homesteadBlock\": 0\n    },\n    \"difficulty\": \"3509749784\",\n    \"extraData\": \"0x4554482e45544846414e532e4f52472d4641313738394444\",\n    \"gasLimit\": \"4727564\",\n    \"hash\": \"0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440\",\n    \"miner\": \"0xbbf5029fd710d227630c8b7d338051b8e76d50b3\",\n    \"mixHash\": \"0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada\",\n    \"nonce\": \"0x4eb12e19c16d43da\",\n    \"number\": \"2289805\",\n    \"stateRoot\": \"0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f\",\n    \"timestamp\": \"1513601261\",\n    \"totalDifficulty\": \"7143276353481064\"\n  },\n  \"input\": \"0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4\",\n  \"result\": {\n    \"calls\": [\n      {\n        \"from\": \"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe\",\n        \"input\": \"0x\",\n        \"to\": \"0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5\",\n        \"type\": \"CALL\",\n        \"value\": \"0x6f05b59d3b20000\"\n      }\n    ],\n    \"from\": \"0xb436ba50d378d4bbc8660d312a13df6af6e89dfb\",\n    \"gas\": \"0x10738\",\n    \"gasUsed\": \"0x3ef9\",\n    \"input\": \"0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5\",\n    \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\",\n    \"to\": \"0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe\",\n    \"type\": \"CALL\",\n    \"value\": \"0x0\"\n  }\n}\n"
  },
  {
    "path": "eth/tracers/testdata/call_tracer_throw.json",
    "content": "{\n  \"context\": {\n    \"difficulty\": \"117009631\",\n    \"gasLimit\": \"4712388\",\n    \"miner\": \"0x294e5d6c39a36ce38af1dca70c1060f78dee8070\",\n    \"number\": \"25009\",\n    \"timestamp\": \"1479891666\"\n  },\n  \"genesis\": {\n    \"alloc\": {\n      \"0x70c9217d814985faef62b124420f8dfbddd96433\": {\n        \"balance\": \"0x4ecd70668f5d854a\",\n        \"code\": \"0x\",\n        \"nonce\": \"1638\",\n        \"storage\": {}\n      },\n      \"0xc212e03b9e060e36facad5fd8f4435412ca22e6b\": {\n        \"balance\": \"0x0\",\n        \"code\": \"0x606060405236156101745760e060020a600035046302d05d3f811461017c57806304a7fdbc1461018e5780630e90f957146101fb5780630fb5a6b41461021257806314baa1b61461021b57806317fc45e21461023a5780632b096926146102435780632e94420f1461025b578063325a19f11461026457806336da44681461026d5780633f81a2c01461027f5780633fc306821461029757806345ecd3d7146102d45780634665096d146102dd5780634e71d92d146102e657806351a34eb8146103085780636111bb951461032d5780636f265b93146103445780637e9014e11461034d57806390ba009114610360578063927df5e014610393578063a7f437791461046c578063ad8f50081461046e578063bc6d909414610477578063bdec3ad114610557578063c19d93fb1461059a578063c9503fe2146105ad578063e0a73a93146105b6578063ea71b02d146105bf578063ea8a1af0146105d1578063ee4a96f9146105f3578063f1ff78a01461065c575b61046c610002565b610665600054600160a060020a031681565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600554600090600160a060020a0390811633909116146106a857610002565b61068260015460a060020a900460ff166000145b90565b61069660085481565b61046c600435600154600160a060020a03166000141561072157610002565b610696600d5481565b610696600435600f8160068110156100025750015481565b61069660045481565b61069660035481565b610665600554600160a060020a031681565b61069660043560158160068110156100025750015481565b6106966004355b600b54600f5460009160028202808203928083039290810191018386101561078357601054840186900394505b50505050919050565b61069660025481565b61069660095481565b61046c600554600090600160a060020a03908116339091161461085857610002565b61046c600435600554600090600160a060020a03908116339091161461092e57610002565b6106826001805460a060020a900460ff161461020f565b610696600b5481565b61068260075460a060020a900460ff1681565b6106966004355b600b54601554600091600282028082039280830392908101910183861015610a6c5760165494506102cb565b61046c6004356024356044356040805160015460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b02600483015291516000928392600160a060020a03919091169163e16c7d9891602481810192602092909190829003018187876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610b4657610002565b005b610696600a5481565b61046c60006000600060006000600160009054906101000a9004600160a060020a0316600160a060020a031663e16c7d986040518160e060020a028152600401808060b260020a691858d8dbdd5b9d18dd1b0281526020015060200190506020604051808303816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f1757610002565b61046c5b60015b60058160ff16101561071e57600f6001820160ff166006811015610002578101549060ff83166006811015610002570154101561129057610002565b61069660015460a060020a900460ff1681565b61069660065481565b610696600c5481565b610665600754600160a060020a031681565b61046c600554600090600160a060020a0390811633909116146112c857610002565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600154600090600160a060020a03168114156113fb57610002565b610696600e5481565b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060005b60068160ff16101561070857828160ff166006811015610002576020020151600f60ff831660068110156100025701558160ff82166006811015610002576020020151601560ff831660068110156100025701556001016106ac565b61071061055b565b505050565b600e8054820190555b50565b6040805160015460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061071557610002565b83861015801561079257508286105b156107b457600f546010546011548689039082030291909104900394506102cb565b8286101580156107c55750600b5486105b156107e757600f546011546012548589039082030291909104900394506102cb565b600b5486108015906107f857508186105b1561081d57600b54600f546012546013549289039281039290920204900394506102cb565b81861015801561082c57508086105b1561084e57600f546013546014548489039082030291909104900394506102cb565b60145494506102cb565b60015460a060020a900460ff1660001461087157610002565b600254600a01431161088257610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663771d50e16040518160e060020a0281526004018090506000604051808303816000876161da5a03f1156100025750505050565b60015460a060020a900460ff1660001461094757610002565b600254600a01431161095857610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180517f51a34eb8000000000000000000000000000000000000000000000000000000008252600482018690529151919350600160a060020a03841692506351a34eb8916024808301926000929190829003018183876161da5a03f11561000257505050600b8290554360025560408051838152905130600160a060020a0316917fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff919081900360200190a25050565b838610158015610a7b57508286105b15610a9d576015546016546017548689039082900302919091040194506102cb565b828610158015610aae5750600b5486105b15610ad0576015546017546018548589039082900302919091040194506102cb565b600b548610801590610ae157508186105b15610b0657600b546015546018546019549289039281900392909202040194506102cb565b818610158015610b1557508086105b15610b3757601554601954601a548489039082900302919091040194506102cb565b601a54860181900394506102cb565b60015460a060020a900460ff16600014610b5f57610002565b6001805460a060020a60ff02191660a060020a17908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919450600160a060020a038516925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604080518051600a556005547ffebf661200000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015216602482015260448101879052905163febf661291606480820192600092909190829003018183876161da5a03f115610002575050508215610cc7576007805473ffffffffffffffffffffffffffffffffffffffff191633179055610dbb565b6040805160055460065460e060020a63599efa6b028352600160a060020a039182166004840152602483015291519184169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050604080516006547f56ccb6f000000000000000000000000000000000000000000000000000000000825233600160a060020a03166004830152602482015290516356ccb6f091604480820192600092909190829003018183876161da5a03f115610002575050600580546007805473ffffffffffffffffffffffffffffffffffffffff19908116600160a060020a038416179091551633179055505b6007805460a060020a60ff02191660a060020a87810291909117918290556008544301600955900460ff1615610df757600a54610e039061029e565b600a54610e0b90610367565b600c55610e0f565b600c555b600c54670de0b6b3a7640000850204600d55600754600554604080517f759297bb000000000000000000000000000000000000000000000000000000008152600160a060020a039384166004820152918316602483015260448201879052519184169163759297bb91606481810192600092909190829003018183876161da5a03f11561000257505060408051600754600a54600d54600554600c5460a060020a850460ff161515865260208601929092528486019290925260608401529251600160a060020a0391821694509281169230909116917f3b3d1986083d191be01d28623dc19604728e29ae28bdb9ba52757fdee1a18de2919081900360800190a45050505050565b600954431015610f2657610002565b6001805460a060020a900460ff1614610f3e57610002565b6001805460a060020a60ff0219167402000000000000000000000000000000000000000017908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919750600160a060020a038816925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604051516007549095506000945060a060020a900460ff1615905061105c57600a5484111561105757600a54600d54670de0b6b3a7640000918603020492505b61107e565b600a5484101561107e57600a54600d54670de0b6b3a764000091869003020492505b60065483111561108e5760065492505b6006548390039150600083111561111857604080516005546007547f5928d37f000000000000000000000000000000000000000000000000000000008352600160a060020a0391821660048401528116602483015260448201869052915191871691635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505b600082111561117a576040805160055460e060020a63599efa6b028252600160a060020a0390811660048301526024820185905291519187169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050505b6040805185815260208101849052808201859052905130600160a060020a0316917f89e690b1d5aaae14f3e85f108dc92d9ab3763a58d45aed8b59daedbbae8fe794919081900360600190a260008311156112285784600160a060020a0316634cc927d785336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611282565b84600160a060020a0316634cc927d7600a60005054336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f115610002575050505b600054600160a060020a0316ff5b60156001820160ff166006811015610002578101549060ff8316600681101561000257015411156112c057610002565b60010161055e565b60015460a060020a900460ff166000146112e157610002565b600254600a0143116112f257610002565b6001546040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f11561000257505060408051805160055460065460e060020a63599efa6b028452600160a060020a03918216600485015260248401529251909450918416925063599efa6b916044808301926000929190829003018183876161da5a03f1156100025750505080600160a060020a0316632b68bb2d6040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050600054600160a060020a03169050ff5b6001546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602480830192602092919082900301816000876161da5a03f11561000257505060405151151590506106a85761000256\",\n        \"nonce\": \"1\",\n        \"storage\": {\n          \"0x0000000000000000000000000000000000000000000000000000000000000001\": \"0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396\",\n          \"0x0000000000000000000000000000000000000000000000000000000000000002\": \"0x00000000000000000000000000000000000000000000000000000000000061a9\",\n          \"0x0000000000000000000000000000000000000000000000000000000000000005\": \"0x00000000000000000000000070c9217d814985faef62b124420f8dfbddd96433\"\n        }\n      }\n    },\n    \"config\": {\n      \"byzantiumBlock\": 1700000,\n      \"chainId\": 3,\n      \"daoForkSupport\": true,\n      \"eip150Block\": 0,\n      \"eip150Hash\": \"0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d\",\n      \"eip155Block\": 10,\n      \"eip158Block\": 10,\n      \"ethash\": {},\n      \"homesteadBlock\": 0\n    },\n    \"difficulty\": \"117066792\",\n    \"extraData\": \"0xd783010502846765746887676f312e372e33856c696e7578\",\n    \"gasLimit\": \"4712388\",\n    \"hash\": \"0xe23e8d4562a1045b70cbc99fefb20c101a8f0fc8559a80d65fea8896e2f1d46e\",\n    \"miner\": \"0x71842f946b98800fe6feb49f0ae4e253259031c9\",\n    \"mixHash\": \"0x0aada9d6e93dd4db0d09c0488dc0a048fca2ccdc1f3fc7b83ba2a8d393a3a4ff\",\n    \"nonce\": \"0x70849d5838dee2e9\",\n    \"number\": \"25008\",\n    \"stateRoot\": \"0x1e01d2161794768c5b917069e73d86e8dca80cd7f3168c0597de420ab93a3b7b\",\n    \"timestamp\": \"1479891641\",\n    \"totalDifficulty\": \"1896347038589\"\n  },\n  \"input\": \"0xf88b8206668504a817c8008303d09094c212e03b9e060e36facad5fd8f4435412ca22e6b80a451a34eb8000000000000000000000000000000000000000000000027fad02094277c000029a0692a3b4e7b2842f8dd7832e712c21e09f451f416c8976d5b8d02e8c0c2b4bea9a07645e90fc421b63dd755767fd93d3c03b4ec0c4d8fafa059558d08cf11d59750\",\n  \"result\": {\n    \"error\": \"invalid jump destination\",\n    \"from\": \"0x70c9217d814985faef62b124420f8dfbddd96433\",\n    \"gas\": \"0x37b38\",\n    \"gasUsed\": \"0x37b38\",\n    \"input\": \"0x51a34eb8000000000000000000000000000000000000000000000027fad02094277c0000\",\n    \"to\": \"0xc212e03b9e060e36facad5fd8f4435412ca22e6b\",\n    \"type\": \"CALL\",\n    \"value\": \"0x0\"\n  }\n}\n"
  },
  {
    "path": "eth/tracers/tracer.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage tracers\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"sync/atomic\"\n\t\"time\"\n\t\"unsafe\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"gopkg.in/olebedev/go-duktape.v3\"\n)\n\n// bigIntegerJS is the minified version of https://github.com/peterolson/BigInteger.js.\nconst bigIntegerJS = `var bigInt=function(undefined){\"use strict\";var BASE=1e7,LOG_BASE=7,MAX_INT=9007199254740992,MAX_INT_ARR=smallToArray(MAX_INT),LOG_MAX_INT=Math.log(MAX_INT);function Integer(v,radix){if(typeof v===\"undefined\")return Integer[0];if(typeof radix!==\"undefined\")return+radix===10?parseValue(v):parseBase(v,radix);return parseValue(v)}function BigInteger(value,sign){this.value=value;this.sign=sign;this.isSmall=false}BigInteger.prototype=Object.create(Integer.prototype);function SmallInteger(value){this.value=value;this.sign=value<0;this.isSmall=true}SmallInteger.prototype=Object.create(Integer.prototype);function isPrecise(n){return-MAX_INT<n&&n<MAX_INT}function smallToArray(n){if(n<1e7)return[n];if(n<1e14)return[n%1e7,Math.floor(n/1e7)];return[n%1e7,Math.floor(n/1e7)%1e7,Math.floor(n/1e14)]}function arrayToSmall(arr){trim(arr);var length=arr.length;if(length<4&&compareAbs(arr,MAX_INT_ARR)<0){switch(length){case 0:return 0;case 1:return arr[0];case 2:return arr[0]+arr[1]*BASE;default:return arr[0]+(arr[1]+arr[2]*BASE)*BASE}}return arr}function trim(v){var i=v.length;while(v[--i]===0);v.length=i+1}function createArray(length){var x=new Array(length);var i=-1;while(++i<length){x[i]=0}return x}function truncate(n){if(n>0)return Math.floor(n);return Math.ceil(n)}function add(a,b){var l_a=a.length,l_b=b.length,r=new Array(l_a),carry=0,base=BASE,sum,i;for(i=0;i<l_b;i++){sum=a[i]+b[i]+carry;carry=sum>=base?1:0;r[i]=sum-carry*base}while(i<l_a){sum=a[i]+carry;carry=sum===base?1:0;r[i++]=sum-carry*base}if(carry>0)r.push(carry);return r}function addAny(a,b){if(a.length>=b.length)return add(a,b);return add(b,a)}function addSmall(a,carry){var l=a.length,r=new Array(l),base=BASE,sum,i;for(i=0;i<l;i++){sum=a[i]-base+carry;carry=Math.floor(sum/base);r[i]=sum-carry*base;carry+=1}while(carry>0){r[i++]=carry%base;carry=Math.floor(carry/base)}return r}BigInteger.prototype.add=function(v){var n=parseValue(v);if(this.sign!==n.sign){return this.subtract(n.negate())}var a=this.value,b=n.value;if(n.isSmall){return new BigInteger(addSmall(a,Math.abs(b)),this.sign)}return new BigInteger(addAny(a,b),this.sign)};BigInteger.prototype.plus=BigInteger.prototype.add;SmallInteger.prototype.add=function(v){var n=parseValue(v);var a=this.value;if(a<0!==n.sign){return this.subtract(n.negate())}var b=n.value;if(n.isSmall){if(isPrecise(a+b))return new SmallInteger(a+b);b=smallToArray(Math.abs(b))}return new BigInteger(addSmall(b,Math.abs(a)),a<0)};SmallInteger.prototype.plus=SmallInteger.prototype.add;function subtract(a,b){var a_l=a.length,b_l=b.length,r=new Array(a_l),borrow=0,base=BASE,i,difference;for(i=0;i<b_l;i++){difference=a[i]-borrow-b[i];if(difference<0){difference+=base;borrow=1}else borrow=0;r[i]=difference}for(i=b_l;i<a_l;i++){difference=a[i]-borrow;if(difference<0)difference+=base;else{r[i++]=difference;break}r[i]=difference}for(;i<a_l;i++){r[i]=a[i]}trim(r);return r}function subtractAny(a,b,sign){var value;if(compareAbs(a,b)>=0){value=subtract(a,b)}else{value=subtract(b,a);sign=!sign}value=arrayToSmall(value);if(typeof value===\"number\"){if(sign)value=-value;return new SmallInteger(value)}return new BigInteger(value,sign)}function subtractSmall(a,b,sign){var l=a.length,r=new Array(l),carry=-b,base=BASE,i,difference;for(i=0;i<l;i++){difference=a[i]+carry;carry=Math.floor(difference/base);difference%=base;r[i]=difference<0?difference+base:difference}r=arrayToSmall(r);if(typeof r===\"number\"){if(sign)r=-r;return new SmallInteger(r)}return new BigInteger(r,sign)}BigInteger.prototype.subtract=function(v){var n=parseValue(v);if(this.sign!==n.sign){return this.add(n.negate())}var a=this.value,b=n.value;if(n.isSmall)return subtractSmall(a,Math.abs(b),this.sign);return subtractAny(a,b,this.sign)};BigInteger.prototype.minus=BigInteger.prototype.subtract;SmallInteger.prototype.subtract=function(v){var n=parseValue(v);var a=this.value;if(a<0!==n.sign){return this.add(n.negate())}var b=n.value;if(n.isSmall){return new SmallInteger(a-b)}return subtractSmall(b,Math.abs(a),a>=0)};SmallInteger.prototype.minus=SmallInteger.prototype.subtract;BigInteger.prototype.negate=function(){return new BigInteger(this.value,!this.sign)};SmallInteger.prototype.negate=function(){var sign=this.sign;var small=new SmallInteger(-this.value);small.sign=!sign;return small};BigInteger.prototype.abs=function(){return new BigInteger(this.value,false)};SmallInteger.prototype.abs=function(){return new SmallInteger(Math.abs(this.value))};function multiplyLong(a,b){var a_l=a.length,b_l=b.length,l=a_l+b_l,r=createArray(l),base=BASE,product,carry,i,a_i,b_j;for(i=0;i<a_l;++i){a_i=a[i];for(var j=0;j<b_l;++j){b_j=b[j];product=a_i*b_j+r[i+j];carry=Math.floor(product/base);r[i+j]=product-carry*base;r[i+j+1]+=carry}}trim(r);return r}function multiplySmall(a,b){var l=a.length,r=new Array(l),base=BASE,carry=0,product,i;for(i=0;i<l;i++){product=a[i]*b+carry;carry=Math.floor(product/base);r[i]=product-carry*base}while(carry>0){r[i++]=carry%base;carry=Math.floor(carry/base)}return r}function shiftLeft(x,n){var r=[];while(n-- >0)r.push(0);return r.concat(x)}function multiplyKaratsuba(x,y){var n=Math.max(x.length,y.length);if(n<=30)return multiplyLong(x,y);n=Math.ceil(n/2);var b=x.slice(n),a=x.slice(0,n),d=y.slice(n),c=y.slice(0,n);var ac=multiplyKaratsuba(a,c),bd=multiplyKaratsuba(b,d),abcd=multiplyKaratsuba(addAny(a,b),addAny(c,d));var product=addAny(addAny(ac,shiftLeft(subtract(subtract(abcd,ac),bd),n)),shiftLeft(bd,2*n));trim(product);return product}function useKaratsuba(l1,l2){return-.012*l1-.012*l2+15e-6*l1*l2>0}BigInteger.prototype.multiply=function(v){var n=parseValue(v),a=this.value,b=n.value,sign=this.sign!==n.sign,abs;if(n.isSmall){if(b===0)return Integer[0];if(b===1)return this;if(b===-1)return this.negate();abs=Math.abs(b);if(abs<BASE){return new BigInteger(multiplySmall(a,abs),sign)}b=smallToArray(abs)}if(useKaratsuba(a.length,b.length))return new BigInteger(multiplyKaratsuba(a,b),sign);return new BigInteger(multiplyLong(a,b),sign)};BigInteger.prototype.times=BigInteger.prototype.multiply;function multiplySmallAndArray(a,b,sign){if(a<BASE){return new BigInteger(multiplySmall(b,a),sign)}return new BigInteger(multiplyLong(b,smallToArray(a)),sign)}SmallInteger.prototype._multiplyBySmall=function(a){if(isPrecise(a.value*this.value)){return new SmallInteger(a.value*this.value)}return multiplySmallAndArray(Math.abs(a.value),smallToArray(Math.abs(this.value)),this.sign!==a.sign)};BigInteger.prototype._multiplyBySmall=function(a){if(a.value===0)return Integer[0];if(a.value===1)return this;if(a.value===-1)return this.negate();return multiplySmallAndArray(Math.abs(a.value),this.value,this.sign!==a.sign)};SmallInteger.prototype.multiply=function(v){return parseValue(v)._multiplyBySmall(this)};SmallInteger.prototype.times=SmallInteger.prototype.multiply;function square(a){var l=a.length,r=createArray(l+l),base=BASE,product,carry,i,a_i,a_j;for(i=0;i<l;i++){a_i=a[i];for(var j=0;j<l;j++){a_j=a[j];product=a_i*a_j+r[i+j];carry=Math.floor(product/base);r[i+j]=product-carry*base;r[i+j+1]+=carry}}trim(r);return r}BigInteger.prototype.square=function(){return new BigInteger(square(this.value),false)};SmallInteger.prototype.square=function(){var value=this.value*this.value;if(isPrecise(value))return new SmallInteger(value);return new BigInteger(square(smallToArray(Math.abs(this.value))),false)};function divMod1(a,b){var a_l=a.length,b_l=b.length,base=BASE,result=createArray(b.length),divisorMostSignificantDigit=b[b_l-1],lambda=Math.ceil(base/(2*divisorMostSignificantDigit)),remainder=multiplySmall(a,lambda),divisor=multiplySmall(b,lambda),quotientDigit,shift,carry,borrow,i,l,q;if(remainder.length<=a_l)remainder.push(0);divisor.push(0);divisorMostSignificantDigit=divisor[b_l-1];for(shift=a_l-b_l;shift>=0;shift--){quotientDigit=base-1;if(remainder[shift+b_l]!==divisorMostSignificantDigit){quotientDigit=Math.floor((remainder[shift+b_l]*base+remainder[shift+b_l-1])/divisorMostSignificantDigit)}carry=0;borrow=0;l=divisor.length;for(i=0;i<l;i++){carry+=quotientDigit*divisor[i];q=Math.floor(carry/base);borrow+=remainder[shift+i]-(carry-q*base);carry=q;if(borrow<0){remainder[shift+i]=borrow+base;borrow=-1}else{remainder[shift+i]=borrow;borrow=0}}while(borrow!==0){quotientDigit-=1;carry=0;for(i=0;i<l;i++){carry+=remainder[shift+i]-base+divisor[i];if(carry<0){remainder[shift+i]=carry+base;carry=0}else{remainder[shift+i]=carry;carry=1}}borrow+=carry}result[shift]=quotientDigit}remainder=divModSmall(remainder,lambda)[0];return[arrayToSmall(result),arrayToSmall(remainder)]}function divMod2(a,b){var a_l=a.length,b_l=b.length,result=[],part=[],base=BASE,guess,xlen,highx,highy,check;while(a_l){part.unshift(a[--a_l]);trim(part);if(compareAbs(part,b)<0){result.push(0);continue}xlen=part.length;highx=part[xlen-1]*base+part[xlen-2];highy=b[b_l-1]*base+b[b_l-2];if(xlen>b_l){highx=(highx+1)*base}guess=Math.ceil(highx/highy);do{check=multiplySmall(b,guess);if(compareAbs(check,part)<=0)break;guess--}while(guess);result.push(guess);part=subtract(part,check)}result.reverse();return[arrayToSmall(result),arrayToSmall(part)]}function divModSmall(value,lambda){var length=value.length,quotient=createArray(length),base=BASE,i,q,remainder,divisor;remainder=0;for(i=length-1;i>=0;--i){divisor=remainder*base+value[i];q=truncate(divisor/lambda);remainder=divisor-q*lambda;quotient[i]=q|0}return[quotient,remainder|0]}function divModAny(self,v){var value,n=parseValue(v);var a=self.value,b=n.value;var quotient;if(b===0)throw new Error(\"Cannot divide by zero\");if(self.isSmall){if(n.isSmall){return[new SmallInteger(truncate(a/b)),new SmallInteger(a%b)]}return[Integer[0],self]}if(n.isSmall){if(b===1)return[self,Integer[0]];if(b==-1)return[self.negate(),Integer[0]];var abs=Math.abs(b);if(abs<BASE){value=divModSmall(a,abs);quotient=arrayToSmall(value[0]);var remainder=value[1];if(self.sign)remainder=-remainder;if(typeof quotient===\"number\"){if(self.sign!==n.sign)quotient=-quotient;return[new SmallInteger(quotient),new SmallInteger(remainder)]}return[new BigInteger(quotient,self.sign!==n.sign),new SmallInteger(remainder)]}b=smallToArray(abs)}var comparison=compareAbs(a,b);if(comparison===-1)return[Integer[0],self];if(comparison===0)return[Integer[self.sign===n.sign?1:-1],Integer[0]];if(a.length+b.length<=200)value=divMod1(a,b);else value=divMod2(a,b);quotient=value[0];var qSign=self.sign!==n.sign,mod=value[1],mSign=self.sign;if(typeof quotient===\"number\"){if(qSign)quotient=-quotient;quotient=new SmallInteger(quotient)}else quotient=new BigInteger(quotient,qSign);if(typeof mod===\"number\"){if(mSign)mod=-mod;mod=new SmallInteger(mod)}else mod=new BigInteger(mod,mSign);return[quotient,mod]}BigInteger.prototype.divmod=function(v){var result=divModAny(this,v);return{quotient:result[0],remainder:result[1]}};SmallInteger.prototype.divmod=BigInteger.prototype.divmod;BigInteger.prototype.divide=function(v){return divModAny(this,v)[0]};SmallInteger.prototype.over=SmallInteger.prototype.divide=BigInteger.prototype.over=BigInteger.prototype.divide;BigInteger.prototype.mod=function(v){return divModAny(this,v)[1]};SmallInteger.prototype.remainder=SmallInteger.prototype.mod=BigInteger.prototype.remainder=BigInteger.prototype.mod;BigInteger.prototype.pow=function(v){var n=parseValue(v),a=this.value,b=n.value,value,x,y;if(b===0)return Integer[1];if(a===0)return Integer[0];if(a===1)return Integer[1];if(a===-1)return n.isEven()?Integer[1]:Integer[-1];if(n.sign){return Integer[0]}if(!n.isSmall)throw new Error(\"The exponent \"+n.toString()+\" is too large.\");if(this.isSmall){if(isPrecise(value=Math.pow(a,b)))return new SmallInteger(truncate(value))}x=this;y=Integer[1];while(true){if(b&1===1){y=y.times(x);--b}if(b===0)break;b/=2;x=x.square()}return y};SmallInteger.prototype.pow=BigInteger.prototype.pow;BigInteger.prototype.modPow=function(exp,mod){exp=parseValue(exp);mod=parseValue(mod);if(mod.isZero())throw new Error(\"Cannot take modPow with modulus 0\");var r=Integer[1],base=this.mod(mod);while(exp.isPositive()){if(base.isZero())return Integer[0];if(exp.isOdd())r=r.multiply(base).mod(mod);exp=exp.divide(2);base=base.square().mod(mod)}return r};SmallInteger.prototype.modPow=BigInteger.prototype.modPow;function compareAbs(a,b){if(a.length!==b.length){return a.length>b.length?1:-1}for(var i=a.length-1;i>=0;i--){if(a[i]!==b[i])return a[i]>b[i]?1:-1}return 0}BigInteger.prototype.compareAbs=function(v){var n=parseValue(v),a=this.value,b=n.value;if(n.isSmall)return 1;return compareAbs(a,b)};SmallInteger.prototype.compareAbs=function(v){var n=parseValue(v),a=Math.abs(this.value),b=n.value;if(n.isSmall){b=Math.abs(b);return a===b?0:a>b?1:-1}return-1};BigInteger.prototype.compare=function(v){if(v===Infinity){return-1}if(v===-Infinity){return 1}var n=parseValue(v),a=this.value,b=n.value;if(this.sign!==n.sign){return n.sign?1:-1}if(n.isSmall){return this.sign?-1:1}return compareAbs(a,b)*(this.sign?-1:1)};BigInteger.prototype.compareTo=BigInteger.prototype.compare;SmallInteger.prototype.compare=function(v){if(v===Infinity){return-1}if(v===-Infinity){return 1}var n=parseValue(v),a=this.value,b=n.value;if(n.isSmall){return a==b?0:a>b?1:-1}if(a<0!==n.sign){return a<0?-1:1}return a<0?1:-1};SmallInteger.prototype.compareTo=SmallInteger.prototype.compare;BigInteger.prototype.equals=function(v){return this.compare(v)===0};SmallInteger.prototype.eq=SmallInteger.prototype.equals=BigInteger.prototype.eq=BigInteger.prototype.equals;BigInteger.prototype.notEquals=function(v){return this.compare(v)!==0};SmallInteger.prototype.neq=SmallInteger.prototype.notEquals=BigInteger.prototype.neq=BigInteger.prototype.notEquals;BigInteger.prototype.greater=function(v){return this.compare(v)>0};SmallInteger.prototype.gt=SmallInteger.prototype.greater=BigInteger.prototype.gt=BigInteger.prototype.greater;BigInteger.prototype.lesser=function(v){return this.compare(v)<0};SmallInteger.prototype.lt=SmallInteger.prototype.lesser=BigInteger.prototype.lt=BigInteger.prototype.lesser;BigInteger.prototype.greaterOrEquals=function(v){return this.compare(v)>=0};SmallInteger.prototype.geq=SmallInteger.prototype.greaterOrEquals=BigInteger.prototype.geq=BigInteger.prototype.greaterOrEquals;BigInteger.prototype.lesserOrEquals=function(v){return this.compare(v)<=0};SmallInteger.prototype.leq=SmallInteger.prototype.lesserOrEquals=BigInteger.prototype.leq=BigInteger.prototype.lesserOrEquals;BigInteger.prototype.isEven=function(){return(this.value[0]&1)===0};SmallInteger.prototype.isEven=function(){return(this.value&1)===0};BigInteger.prototype.isOdd=function(){return(this.value[0]&1)===1};SmallInteger.prototype.isOdd=function(){return(this.value&1)===1};BigInteger.prototype.isPositive=function(){return!this.sign};SmallInteger.prototype.isPositive=function(){return this.value>0};BigInteger.prototype.isNegative=function(){return this.sign};SmallInteger.prototype.isNegative=function(){return this.value<0};BigInteger.prototype.isUnit=function(){return false};SmallInteger.prototype.isUnit=function(){return Math.abs(this.value)===1};BigInteger.prototype.isZero=function(){return false};SmallInteger.prototype.isZero=function(){return this.value===0};BigInteger.prototype.isDivisibleBy=function(v){var n=parseValue(v);var value=n.value;if(value===0)return false;if(value===1)return true;if(value===2)return this.isEven();return this.mod(n).equals(Integer[0])};SmallInteger.prototype.isDivisibleBy=BigInteger.prototype.isDivisibleBy;function isBasicPrime(v){var n=v.abs();if(n.isUnit())return false;if(n.equals(2)||n.equals(3)||n.equals(5))return true;if(n.isEven()||n.isDivisibleBy(3)||n.isDivisibleBy(5))return false;if(n.lesser(25))return true}BigInteger.prototype.isPrime=function(){var isPrime=isBasicPrime(this);if(isPrime!==undefined)return isPrime;var n=this.abs(),nPrev=n.prev();var a=[2,3,5,7,11,13,17,19],b=nPrev,d,t,i,x;while(b.isEven())b=b.divide(2);for(i=0;i<a.length;i++){x=bigInt(a[i]).modPow(b,n);if(x.equals(Integer[1])||x.equals(nPrev))continue;for(t=true,d=b;t&&d.lesser(nPrev);d=d.multiply(2)){x=x.square().mod(n);if(x.equals(nPrev))t=false}if(t)return false}return true};SmallInteger.prototype.isPrime=BigInteger.prototype.isPrime;BigInteger.prototype.isProbablePrime=function(iterations){var isPrime=isBasicPrime(this);if(isPrime!==undefined)return isPrime;var n=this.abs();var t=iterations===undefined?5:iterations;for(var i=0;i<t;i++){var a=bigInt.randBetween(2,n.minus(2));if(!a.modPow(n.prev(),n).isUnit())return false}return true};SmallInteger.prototype.isProbablePrime=BigInteger.prototype.isProbablePrime;BigInteger.prototype.modInv=function(n){var t=bigInt.zero,newT=bigInt.one,r=parseValue(n),newR=this.abs(),q,lastT,lastR;while(!newR.equals(bigInt.zero)){q=r.divide(newR);lastT=t;lastR=r;t=newT;r=newR;newT=lastT.subtract(q.multiply(newT));newR=lastR.subtract(q.multiply(newR))}if(!r.equals(1))throw new Error(this.toString()+\" and \"+n.toString()+\" are not co-prime\");if(t.compare(0)===-1){t=t.add(n)}if(this.isNegative()){return t.negate()}return t};SmallInteger.prototype.modInv=BigInteger.prototype.modInv;BigInteger.prototype.next=function(){var value=this.value;if(this.sign){return subtractSmall(value,1,this.sign)}return new BigInteger(addSmall(value,1),this.sign)};SmallInteger.prototype.next=function(){var value=this.value;if(value+1<MAX_INT)return new SmallInteger(value+1);return new BigInteger(MAX_INT_ARR,false)};BigInteger.prototype.prev=function(){var value=this.value;if(this.sign){return new BigInteger(addSmall(value,1),true)}return subtractSmall(value,1,this.sign)};SmallInteger.prototype.prev=function(){var value=this.value;if(value-1>-MAX_INT)return new SmallInteger(value-1);return new BigInteger(MAX_INT_ARR,true)};var powersOfTwo=[1];while(2*powersOfTwo[powersOfTwo.length-1]<=BASE)powersOfTwo.push(2*powersOfTwo[powersOfTwo.length-1]);var powers2Length=powersOfTwo.length,highestPower2=powersOfTwo[powers2Length-1];function shift_isSmall(n){return(typeof n===\"number\"||typeof n===\"string\")&&+Math.abs(n)<=BASE||n instanceof BigInteger&&n.value.length<=1}BigInteger.prototype.shiftLeft=function(n){if(!shift_isSmall(n)){throw new Error(String(n)+\" is too large for shifting.\")}n=+n;if(n<0)return this.shiftRight(-n);var result=this;while(n>=powers2Length){result=result.multiply(highestPower2);n-=powers2Length-1}return result.multiply(powersOfTwo[n])};SmallInteger.prototype.shiftLeft=BigInteger.prototype.shiftLeft;BigInteger.prototype.shiftRight=function(n){var remQuo;if(!shift_isSmall(n)){throw new Error(String(n)+\" is too large for shifting.\")}n=+n;if(n<0)return this.shiftLeft(-n);var result=this;while(n>=powers2Length){if(result.isZero())return result;remQuo=divModAny(result,highestPower2);result=remQuo[1].isNegative()?remQuo[0].prev():remQuo[0];n-=powers2Length-1}remQuo=divModAny(result,powersOfTwo[n]);return remQuo[1].isNegative()?remQuo[0].prev():remQuo[0]};SmallInteger.prototype.shiftRight=BigInteger.prototype.shiftRight;function bitwise(x,y,fn){y=parseValue(y);var xSign=x.isNegative(),ySign=y.isNegative();var xRem=xSign?x.not():x,yRem=ySign?y.not():y;var xDigit=0,yDigit=0;var xDivMod=null,yDivMod=null;var result=[];while(!xRem.isZero()||!yRem.isZero()){xDivMod=divModAny(xRem,highestPower2);xDigit=xDivMod[1].toJSNumber();if(xSign){xDigit=highestPower2-1-xDigit}yDivMod=divModAny(yRem,highestPower2);yDigit=yDivMod[1].toJSNumber();if(ySign){yDigit=highestPower2-1-yDigit}xRem=xDivMod[0];yRem=yDivMod[0];result.push(fn(xDigit,yDigit))}var sum=fn(xSign?1:0,ySign?1:0)!==0?bigInt(-1):bigInt(0);for(var i=result.length-1;i>=0;i-=1){sum=sum.multiply(highestPower2).add(bigInt(result[i]))}return sum}BigInteger.prototype.not=function(){return this.negate().prev()};SmallInteger.prototype.not=BigInteger.prototype.not;BigInteger.prototype.and=function(n){return bitwise(this,n,function(a,b){return a&b})};SmallInteger.prototype.and=BigInteger.prototype.and;BigInteger.prototype.or=function(n){return bitwise(this,n,function(a,b){return a|b})};SmallInteger.prototype.or=BigInteger.prototype.or;BigInteger.prototype.xor=function(n){return bitwise(this,n,function(a,b){return a^b})};SmallInteger.prototype.xor=BigInteger.prototype.xor;var LOBMASK_I=1<<30,LOBMASK_BI=(BASE&-BASE)*(BASE&-BASE)|LOBMASK_I;function roughLOB(n){var v=n.value,x=typeof v===\"number\"?v|LOBMASK_I:v[0]+v[1]*BASE|LOBMASK_BI;return x&-x}function max(a,b){a=parseValue(a);b=parseValue(b);return a.greater(b)?a:b}function min(a,b){a=parseValue(a);b=parseValue(b);return a.lesser(b)?a:b}function gcd(a,b){a=parseValue(a).abs();b=parseValue(b).abs();if(a.equals(b))return a;if(a.isZero())return b;if(b.isZero())return a;var c=Integer[1],d,t;while(a.isEven()&&b.isEven()){d=Math.min(roughLOB(a),roughLOB(b));a=a.divide(d);b=b.divide(d);c=c.multiply(d)}while(a.isEven()){a=a.divide(roughLOB(a))}do{while(b.isEven()){b=b.divide(roughLOB(b))}if(a.greater(b)){t=b;b=a;a=t}b=b.subtract(a)}while(!b.isZero());return c.isUnit()?a:a.multiply(c)}function lcm(a,b){a=parseValue(a).abs();b=parseValue(b).abs();return a.divide(gcd(a,b)).multiply(b)}function randBetween(a,b){a=parseValue(a);b=parseValue(b);var low=min(a,b),high=max(a,b);var range=high.subtract(low).add(1);if(range.isSmall)return low.add(Math.floor(Math.random()*range));var length=range.value.length-1;var result=[],restricted=true;for(var i=length;i>=0;i--){var top=restricted?range.value[i]:BASE;var digit=truncate(Math.random()*top);result.unshift(digit);if(digit<top)restricted=false}result=arrayToSmall(result);return low.add(typeof result===\"number\"?new SmallInteger(result):new BigInteger(result,false))}var parseBase=function(text,base){var length=text.length;var i;var absBase=Math.abs(base);for(var i=0;i<length;i++){var c=text[i].toLowerCase();if(c===\"-\")continue;if(/[a-z0-9]/.test(c)){if(/[0-9]/.test(c)&&+c>=absBase){if(c===\"1\"&&absBase===1)continue;throw new Error(c+\" is not a valid digit in base \"+base+\".\")}else if(c.charCodeAt(0)-87>=absBase){throw new Error(c+\" is not a valid digit in base \"+base+\".\")}}}if(2<=base&&base<=36){if(length<=LOG_MAX_INT/Math.log(base)){var result=parseInt(text,base);if(isNaN(result)){throw new Error(c+\" is not a valid digit in base \"+base+\".\")}return new SmallInteger(parseInt(text,base))}}base=parseValue(base);var digits=[];var isNegative=text[0]===\"-\";for(i=isNegative?1:0;i<text.length;i++){var c=text[i].toLowerCase(),charCode=c.charCodeAt(0);if(48<=charCode&&charCode<=57)digits.push(parseValue(c));else if(97<=charCode&&charCode<=122)digits.push(parseValue(c.charCodeAt(0)-87));else if(c===\"<\"){var start=i;do{i++}while(text[i]!==\">\");digits.push(parseValue(text.slice(start+1,i)))}else throw new Error(c+\" is not a valid character\")}return parseBaseFromArray(digits,base,isNegative)};function parseBaseFromArray(digits,base,isNegative){var val=Integer[0],pow=Integer[1],i;for(i=digits.length-1;i>=0;i--){val=val.add(digits[i].times(pow));pow=pow.times(base)}return isNegative?val.negate():val}function stringify(digit){var v=digit.value;if(typeof v===\"number\")v=[v];if(v.length===1&&v[0]<=35){return\"0123456789abcdefghijklmnopqrstuvwxyz\".charAt(v[0])}return\"<\"+v+\">\"}function toBase(n,base){base=bigInt(base);if(base.isZero()){if(n.isZero())return\"0\";throw new Error(\"Cannot convert nonzero numbers to base 0.\")}if(base.equals(-1)){if(n.isZero())return\"0\";if(n.isNegative())return new Array(1-n).join(\"10\");return\"1\"+new Array(+n).join(\"01\")}var minusSign=\"\";if(n.isNegative()&&base.isPositive()){minusSign=\"-\";n=n.abs()}if(base.equals(1)){if(n.isZero())return\"0\";return minusSign+new Array(+n+1).join(1)}var out=[];var left=n,divmod;while(left.isNegative()||left.compareAbs(base)>=0){divmod=left.divmod(base);left=divmod.quotient;var digit=divmod.remainder;if(digit.isNegative()){digit=base.minus(digit).abs();left=left.next()}out.push(stringify(digit))}out.push(stringify(left));return minusSign+out.reverse().join(\"\")}BigInteger.prototype.toString=function(radix){if(radix===undefined)radix=10;if(radix!==10)return toBase(this,radix);var v=this.value,l=v.length,str=String(v[--l]),zeros=\"0000000\",digit;while(--l>=0){digit=String(v[l]);str+=zeros.slice(digit.length)+digit}var sign=this.sign?\"-\":\"\";return sign+str};SmallInteger.prototype.toString=function(radix){if(radix===undefined)radix=10;if(radix!=10)return toBase(this,radix);return String(this.value)};BigInteger.prototype.toJSON=SmallInteger.prototype.toJSON=function(){return this.toString()};BigInteger.prototype.valueOf=function(){return+this.toString()};BigInteger.prototype.toJSNumber=BigInteger.prototype.valueOf;SmallInteger.prototype.valueOf=function(){return this.value};SmallInteger.prototype.toJSNumber=SmallInteger.prototype.valueOf;function parseStringValue(v){if(isPrecise(+v)){var x=+v;if(x===truncate(x))return new SmallInteger(x);throw\"Invalid integer: \"+v}var sign=v[0]===\"-\";if(sign)v=v.slice(1);var split=v.split(/e/i);if(split.length>2)throw new Error(\"Invalid integer: \"+split.join(\"e\"));if(split.length===2){var exp=split[1];if(exp[0]===\"+\")exp=exp.slice(1);exp=+exp;if(exp!==truncate(exp)||!isPrecise(exp))throw new Error(\"Invalid integer: \"+exp+\" is not a valid exponent.\");var text=split[0];var decimalPlace=text.indexOf(\".\");if(decimalPlace>=0){exp-=text.length-decimalPlace-1;text=text.slice(0,decimalPlace)+text.slice(decimalPlace+1)}if(exp<0)throw new Error(\"Cannot include negative exponent part for integers\");text+=new Array(exp+1).join(\"0\");v=text}var isValid=/^([0-9][0-9]*)$/.test(v);if(!isValid)throw new Error(\"Invalid integer: \"+v);var r=[],max=v.length,l=LOG_BASE,min=max-l;while(max>0){r.push(+v.slice(min,max));min-=l;if(min<0)min=0;max-=l}trim(r);return new BigInteger(r,sign)}function parseNumberValue(v){if(isPrecise(v)){if(v!==truncate(v))throw new Error(v+\" is not an integer.\");return new SmallInteger(v)}return parseStringValue(v.toString())}function parseValue(v){if(typeof v===\"number\"){return parseNumberValue(v)}if(typeof v===\"string\"){return parseStringValue(v)}return v}for(var i=0;i<1e3;i++){Integer[i]=new SmallInteger(i);if(i>0)Integer[-i]=new SmallInteger(-i)}Integer.one=Integer[1];Integer.zero=Integer[0];Integer.minusOne=Integer[-1];Integer.max=max;Integer.min=min;Integer.gcd=gcd;Integer.lcm=lcm;Integer.isInstance=function(x){return x instanceof BigInteger||x instanceof SmallInteger};Integer.randBetween=randBetween;Integer.fromArray=function(digits,base,isNegative){return parseBaseFromArray(digits.map(parseValue),parseValue(base||10),isNegative)};return Integer}();if(typeof module!==\"undefined\"&&module.hasOwnProperty(\"exports\")){module.exports=bigInt}if(typeof define===\"function\"&&define.amd){define(\"big-integer\",[],function(){return bigInt})}; bigInt`\n\n// makeSlice convert an unsafe memory pointer with the given type into a Go byte\n// slice.\n//\n// Note, the returned slice uses the same memory area as the input arguments.\n// If those are duktape stack items, popping them off **will** make the slice\n// contents change.\nfunc makeSlice(ptr unsafe.Pointer, size uint) []byte {\n\tvar sl = struct {\n\t\taddr uintptr\n\t\tlen  int\n\t\tcap  int\n\t}{uintptr(ptr), int(size), int(size)}\n\n\treturn *(*[]byte)(unsafe.Pointer(&sl))\n}\n\n// popSlice pops a buffer off the JavaScript stack and returns it as a slice.\nfunc popSlice(ctx *duktape.Context) []byte {\n\tblob := common.CopyBytes(makeSlice(ctx.GetBuffer(-1)))\n\tctx.Pop()\n\treturn blob\n}\n\n// pushBigInt create a JavaScript BigInteger in the VM.\nfunc pushBigInt(n *big.Int, ctx *duktape.Context) {\n\tctx.GetGlobalString(\"bigInt\")\n\tctx.PushString(n.String())\n\tctx.Call(1)\n}\n\n// opWrapper provides a JavaScript wrapper around OpCode.\ntype opWrapper struct {\n\top vm.OpCode\n}\n\n// pushObject assembles a JSVM object wrapping a swappable opcode and pushes it\n// onto the VM stack.\nfunc (ow *opWrapper) pushObject(vm *duktape.Context) {\n\tobj := vm.PushObject()\n\n\tvm.PushGoFunction(func(ctx *duktape.Context) int { ctx.PushInt(int(ow.op)); return 1 })\n\tvm.PutPropString(obj, \"toNumber\")\n\n\tvm.PushGoFunction(func(ctx *duktape.Context) int { ctx.PushString(ow.op.String()); return 1 })\n\tvm.PutPropString(obj, \"toString\")\n\n\tvm.PushGoFunction(func(ctx *duktape.Context) int { ctx.PushBoolean(ow.op.IsPush()); return 1 })\n\tvm.PutPropString(obj, \"isPush\")\n}\n\n// memoryWrapper provides a JavaScript wrapper around vm.Memory.\ntype memoryWrapper struct {\n\tmemory *vm.Memory\n}\n\n// slice returns the requested range of memory as a byte slice.\nfunc (mw *memoryWrapper) slice(begin, end int64) []byte {\n\tif end == begin {\n\t\treturn []byte{}\n\t}\n\tif end < begin || begin < 0 {\n\t\t// TODO(karalabe): We can't js-throw from Go inside duktape inside Go. The Go\n\t\t// runtime goes belly up https://github.com/golang/go/issues/15639.\n\t\tlog.Warn(\"Tracer accessed out of bound memory\", \"offset\", begin, \"end\", end)\n\t\treturn nil\n\t}\n\tif mw.memory.Len() < int(end) {\n\t\t// TODO(karalabe): We can't js-throw from Go inside duktape inside Go. The Go\n\t\t// runtime goes belly up https://github.com/golang/go/issues/15639.\n\t\tlog.Warn(\"Tracer accessed out of bound memory\", \"available\", mw.memory.Len(), \"offset\", begin, \"size\", end-begin)\n\t\treturn nil\n\t}\n\treturn mw.memory.GetCopy(begin, end-begin)\n}\n\n// getUint returns the 32 bytes at the specified address interpreted as a uint.\nfunc (mw *memoryWrapper) getUint(addr int64) *big.Int {\n\tif mw.memory.Len() < int(addr)+32 || addr < 0 {\n\t\t// TODO(karalabe): We can't js-throw from Go inside duktape inside Go. The Go\n\t\t// runtime goes belly up https://github.com/golang/go/issues/15639.\n\t\tlog.Warn(\"Tracer accessed out of bound memory\", \"available\", mw.memory.Len(), \"offset\", addr, \"size\", 32)\n\t\treturn new(big.Int)\n\t}\n\treturn new(big.Int).SetBytes(mw.memory.GetPtr(addr, 32))\n}\n\n// pushObject assembles a JSVM object wrapping a swappable memory and pushes it\n// onto the VM stack.\nfunc (mw *memoryWrapper) pushObject(vm *duktape.Context) {\n\tobj := vm.PushObject()\n\n\t// Generate the `slice` method which takes two ints and returns a buffer\n\tvm.PushGoFunction(func(ctx *duktape.Context) int {\n\t\tblob := mw.slice(int64(ctx.GetInt(-2)), int64(ctx.GetInt(-1)))\n\t\tctx.Pop2()\n\n\t\tptr := ctx.PushFixedBuffer(len(blob))\n\t\tcopy(makeSlice(ptr, uint(len(blob))), blob)\n\t\treturn 1\n\t})\n\tvm.PutPropString(obj, \"slice\")\n\n\t// Generate the `getUint` method which takes an int and returns a bigint\n\tvm.PushGoFunction(func(ctx *duktape.Context) int {\n\t\toffset := int64(ctx.GetInt(-1))\n\t\tctx.Pop()\n\n\t\tpushBigInt(mw.getUint(offset), ctx)\n\t\treturn 1\n\t})\n\tvm.PutPropString(obj, \"getUint\")\n}\n\n// stackWrapper provides a JavaScript wrapper around vm.Stack.\ntype stackWrapper struct {\n\tstack *vm.Stack\n}\n\n// peek returns the nth-from-the-top element of the stack.\nfunc (sw *stackWrapper) peek(idx int) *big.Int {\n\tif len(sw.stack.Data()) <= idx || idx < 0 {\n\t\t// TODO(karalabe): We can't js-throw from Go inside duktape inside Go. The Go\n\t\t// runtime goes belly up https://github.com/golang/go/issues/15639.\n\t\tlog.Warn(\"Tracer accessed out of bound stack\", \"size\", len(sw.stack.Data()), \"index\", idx)\n\t\treturn new(big.Int)\n\t}\n\treturn sw.stack.Back(idx).ToBig()\n}\n\n// pushObject assembles a JSVM object wrapping a swappable stack and pushes it\n// onto the VM stack.\nfunc (sw *stackWrapper) pushObject(vm *duktape.Context) {\n\tobj := vm.PushObject()\n\n\tvm.PushGoFunction(func(ctx *duktape.Context) int { ctx.PushInt(len(sw.stack.Data())); return 1 })\n\tvm.PutPropString(obj, \"length\")\n\n\t// Generate the `peek` method which takes an int and returns a bigint\n\tvm.PushGoFunction(func(ctx *duktape.Context) int {\n\t\toffset := ctx.GetInt(-1)\n\t\tctx.Pop()\n\n\t\tpushBigInt(sw.peek(offset), ctx)\n\t\treturn 1\n\t})\n\tvm.PutPropString(obj, \"peek\")\n}\n\n// dbWrapper provides a JavaScript wrapper around vm.Database.\ntype dbWrapper struct {\n\tdb vm.StateDB\n}\n\n// pushObject assembles a JSVM object wrapping a swappable database and pushes it\n// onto the VM stack.\nfunc (dw *dbWrapper) pushObject(vm *duktape.Context) {\n\tobj := vm.PushObject()\n\n\t// Push the wrapper for statedb.GetBalance\n\tvm.PushGoFunction(func(ctx *duktape.Context) int {\n\t\tpushBigInt(dw.db.GetBalance(common.BytesToAddress(popSlice(ctx))), ctx)\n\t\treturn 1\n\t})\n\tvm.PutPropString(obj, \"getBalance\")\n\n\t// Push the wrapper for statedb.GetNonce\n\tvm.PushGoFunction(func(ctx *duktape.Context) int {\n\t\tctx.PushInt(int(dw.db.GetNonce(common.BytesToAddress(popSlice(ctx)))))\n\t\treturn 1\n\t})\n\tvm.PutPropString(obj, \"getNonce\")\n\n\t// Push the wrapper for statedb.GetCode\n\tvm.PushGoFunction(func(ctx *duktape.Context) int {\n\t\tcode := dw.db.GetCode(common.BytesToAddress(popSlice(ctx)))\n\n\t\tptr := ctx.PushFixedBuffer(len(code))\n\t\tcopy(makeSlice(ptr, uint(len(code))), code)\n\t\treturn 1\n\t})\n\tvm.PutPropString(obj, \"getCode\")\n\n\t// Push the wrapper for statedb.GetState\n\tvm.PushGoFunction(func(ctx *duktape.Context) int {\n\t\thash := popSlice(ctx)\n\t\taddr := popSlice(ctx)\n\n\t\tstate := dw.db.GetState(common.BytesToAddress(addr), common.BytesToHash(hash))\n\n\t\tptr := ctx.PushFixedBuffer(len(state))\n\t\tcopy(makeSlice(ptr, uint(len(state))), state[:])\n\t\treturn 1\n\t})\n\tvm.PutPropString(obj, \"getState\")\n\n\t// Push the wrapper for statedb.Exists\n\tvm.PushGoFunction(func(ctx *duktape.Context) int {\n\t\tctx.PushBoolean(dw.db.Exist(common.BytesToAddress(popSlice(ctx))))\n\t\treturn 1\n\t})\n\tvm.PutPropString(obj, \"exists\")\n}\n\n// contractWrapper provides a JavaScript wrapper around vm.Contract\ntype contractWrapper struct {\n\tcontract *vm.Contract\n}\n\n// pushObject assembles a JSVM object wrapping a swappable contract and pushes it\n// onto the VM stack.\nfunc (cw *contractWrapper) pushObject(vm *duktape.Context) {\n\tobj := vm.PushObject()\n\n\t// Push the wrapper for contract.Caller\n\tvm.PushGoFunction(func(ctx *duktape.Context) int {\n\t\tptr := ctx.PushFixedBuffer(20)\n\t\tcopy(makeSlice(ptr, 20), cw.contract.Caller().Bytes())\n\t\treturn 1\n\t})\n\tvm.PutPropString(obj, \"getCaller\")\n\n\t// Push the wrapper for contract.Address\n\tvm.PushGoFunction(func(ctx *duktape.Context) int {\n\t\tptr := ctx.PushFixedBuffer(20)\n\t\tcopy(makeSlice(ptr, 20), cw.contract.Address().Bytes())\n\t\treturn 1\n\t})\n\tvm.PutPropString(obj, \"getAddress\")\n\n\t// Push the wrapper for contract.Value\n\tvm.PushGoFunction(func(ctx *duktape.Context) int {\n\t\tpushBigInt(cw.contract.Value(), ctx)\n\t\treturn 1\n\t})\n\tvm.PutPropString(obj, \"getValue\")\n\n\t// Push the wrapper for contract.Input\n\tvm.PushGoFunction(func(ctx *duktape.Context) int {\n\t\tblob := cw.contract.Input\n\n\t\tptr := ctx.PushFixedBuffer(len(blob))\n\t\tcopy(makeSlice(ptr, uint(len(blob))), blob)\n\t\treturn 1\n\t})\n\tvm.PutPropString(obj, \"getInput\")\n}\n\n// Tracer provides an implementation of Tracer that evaluates a Javascript\n// function for each VM execution step.\ntype Tracer struct {\n\tvm *duktape.Context // Javascript VM instance\n\n\ttracerObject int // Stack index of the tracer JavaScript object\n\tstateObject  int // Stack index of the global state to pull arguments from\n\n\topWrapper       *opWrapper       // Wrapper around the VM opcode\n\tstackWrapper    *stackWrapper    // Wrapper around the VM stack\n\tmemoryWrapper   *memoryWrapper   // Wrapper around the VM memory\n\tcontractWrapper *contractWrapper // Wrapper around the contract object\n\tdbWrapper       *dbWrapper       // Wrapper around the VM environment\n\n\tpcValue     *uint   // Swappable pc value wrapped by a log accessor\n\tgasValue    *uint   // Swappable gas value wrapped by a log accessor\n\tcostValue   *uint   // Swappable cost value wrapped by a log accessor\n\tdepthValue  *uint   // Swappable depth value wrapped by a log accessor\n\terrorValue  *string // Swappable error value wrapped by a log accessor\n\trefundValue *uint   // Swappable refund value wrapped by a log accessor\n\n\tctx map[string]interface{} // Transaction context gathered throughout execution\n\terr error                  // Error, if one has occurred\n\n\tinterrupt uint32 // Atomic flag to signal execution interruption\n\treason    error  // Textual reason for the interruption\n}\n\n// New instantiates a new tracer instance. code specifies a Javascript snippet,\n// which must evaluate to an expression returning an object with 'step', 'fault'\n// and 'result' functions.\nfunc New(code string, txCtx vm.TxContext) (*Tracer, error) {\n\t// Resolve any tracers by name and assemble the tracer object\n\tif tracer, ok := tracer(code); ok {\n\t\tcode = tracer\n\t}\n\ttracer := &Tracer{\n\t\tvm:              duktape.New(),\n\t\tctx:             make(map[string]interface{}),\n\t\topWrapper:       new(opWrapper),\n\t\tstackWrapper:    new(stackWrapper),\n\t\tmemoryWrapper:   new(memoryWrapper),\n\t\tcontractWrapper: new(contractWrapper),\n\t\tdbWrapper:       new(dbWrapper),\n\t\tpcValue:         new(uint),\n\t\tgasValue:        new(uint),\n\t\tcostValue:       new(uint),\n\t\tdepthValue:      new(uint),\n\t\trefundValue:     new(uint),\n\t}\n\ttracer.ctx[\"gasPrice\"] = txCtx.GasPrice\n\n\t// Set up builtins for this environment\n\ttracer.vm.PushGlobalGoFunction(\"toHex\", func(ctx *duktape.Context) int {\n\t\tctx.PushString(hexutil.Encode(popSlice(ctx)))\n\t\treturn 1\n\t})\n\ttracer.vm.PushGlobalGoFunction(\"toWord\", func(ctx *duktape.Context) int {\n\t\tvar word common.Hash\n\t\tif ptr, size := ctx.GetBuffer(-1); ptr != nil {\n\t\t\tword = common.BytesToHash(makeSlice(ptr, size))\n\t\t} else {\n\t\t\tword = common.HexToHash(ctx.GetString(-1))\n\t\t}\n\t\tctx.Pop()\n\t\tcopy(makeSlice(ctx.PushFixedBuffer(32), 32), word[:])\n\t\treturn 1\n\t})\n\ttracer.vm.PushGlobalGoFunction(\"toAddress\", func(ctx *duktape.Context) int {\n\t\tvar addr common.Address\n\t\tif ptr, size := ctx.GetBuffer(-1); ptr != nil {\n\t\t\taddr = common.BytesToAddress(makeSlice(ptr, size))\n\t\t} else {\n\t\t\taddr = common.HexToAddress(ctx.GetString(-1))\n\t\t}\n\t\tctx.Pop()\n\t\tcopy(makeSlice(ctx.PushFixedBuffer(20), 20), addr[:])\n\t\treturn 1\n\t})\n\ttracer.vm.PushGlobalGoFunction(\"toContract\", func(ctx *duktape.Context) int {\n\t\tvar from common.Address\n\t\tif ptr, size := ctx.GetBuffer(-2); ptr != nil {\n\t\t\tfrom = common.BytesToAddress(makeSlice(ptr, size))\n\t\t} else {\n\t\t\tfrom = common.HexToAddress(ctx.GetString(-2))\n\t\t}\n\t\tnonce := uint64(ctx.GetInt(-1))\n\t\tctx.Pop2()\n\n\t\tcontract := crypto.CreateAddress(from, nonce)\n\t\tcopy(makeSlice(ctx.PushFixedBuffer(20), 20), contract[:])\n\t\treturn 1\n\t})\n\ttracer.vm.PushGlobalGoFunction(\"toContract2\", func(ctx *duktape.Context) int {\n\t\tvar from common.Address\n\t\tif ptr, size := ctx.GetBuffer(-3); ptr != nil {\n\t\t\tfrom = common.BytesToAddress(makeSlice(ptr, size))\n\t\t} else {\n\t\t\tfrom = common.HexToAddress(ctx.GetString(-3))\n\t\t}\n\t\t// Retrieve salt hex string from js stack\n\t\tsalt := common.HexToHash(ctx.GetString(-2))\n\t\t// Retrieve code slice from js stack\n\t\tvar code []byte\n\t\tif ptr, size := ctx.GetBuffer(-1); ptr != nil {\n\t\t\tcode = common.CopyBytes(makeSlice(ptr, size))\n\t\t} else {\n\t\t\tcode = common.FromHex(ctx.GetString(-1))\n\t\t}\n\t\tcodeHash := crypto.Keccak256(code)\n\t\tctx.Pop3()\n\t\tcontract := crypto.CreateAddress2(from, salt, codeHash)\n\t\tcopy(makeSlice(ctx.PushFixedBuffer(20), 20), contract[:])\n\t\treturn 1\n\t})\n\ttracer.vm.PushGlobalGoFunction(\"isPrecompiled\", func(ctx *duktape.Context) int {\n\t\t_, ok := vm.PrecompiledContractsIstanbul[common.BytesToAddress(popSlice(ctx))]\n\t\tctx.PushBoolean(ok)\n\t\treturn 1\n\t})\n\ttracer.vm.PushGlobalGoFunction(\"slice\", func(ctx *duktape.Context) int {\n\t\tstart, end := ctx.GetInt(-2), ctx.GetInt(-1)\n\t\tctx.Pop2()\n\n\t\tblob := popSlice(ctx)\n\t\tsize := end - start\n\n\t\tif start < 0 || start > end || end > len(blob) {\n\t\t\t// TODO(karalabe): We can't js-throw from Go inside duktape inside Go. The Go\n\t\t\t// runtime goes belly up https://github.com/golang/go/issues/15639.\n\t\t\tlog.Warn(\"Tracer accessed out of bound memory\", \"available\", len(blob), \"offset\", start, \"size\", size)\n\t\t\tctx.PushFixedBuffer(0)\n\t\t\treturn 1\n\t\t}\n\t\tcopy(makeSlice(ctx.PushFixedBuffer(size), uint(size)), blob[start:end])\n\t\treturn 1\n\t})\n\t// Push the JavaScript tracer as object #0 onto the JSVM stack and validate it\n\tif err := tracer.vm.PevalString(\"(\" + code + \")\"); err != nil {\n\t\tlog.Warn(\"Failed to compile tracer\", \"err\", err)\n\t\treturn nil, err\n\t}\n\ttracer.tracerObject = 0 // yeah, nice, eval can't return the index itself\n\n\tif !tracer.vm.GetPropString(tracer.tracerObject, \"step\") {\n\t\treturn nil, fmt.Errorf(\"trace object must expose a function step()\")\n\t}\n\ttracer.vm.Pop()\n\n\tif !tracer.vm.GetPropString(tracer.tracerObject, \"fault\") {\n\t\treturn nil, fmt.Errorf(\"trace object must expose a function fault()\")\n\t}\n\ttracer.vm.Pop()\n\n\tif !tracer.vm.GetPropString(tracer.tracerObject, \"result\") {\n\t\treturn nil, fmt.Errorf(\"trace object must expose a function result()\")\n\t}\n\ttracer.vm.Pop()\n\n\t// Tracer is valid, inject the big int library to access large numbers\n\ttracer.vm.EvalString(bigIntegerJS)\n\ttracer.vm.PutGlobalString(\"bigInt\")\n\n\t// Push the global environment state as object #1 into the JSVM stack\n\ttracer.stateObject = tracer.vm.PushObject()\n\n\tlogObject := tracer.vm.PushObject()\n\n\ttracer.opWrapper.pushObject(tracer.vm)\n\ttracer.vm.PutPropString(logObject, \"op\")\n\n\ttracer.stackWrapper.pushObject(tracer.vm)\n\ttracer.vm.PutPropString(logObject, \"stack\")\n\n\ttracer.memoryWrapper.pushObject(tracer.vm)\n\ttracer.vm.PutPropString(logObject, \"memory\")\n\n\ttracer.contractWrapper.pushObject(tracer.vm)\n\ttracer.vm.PutPropString(logObject, \"contract\")\n\n\ttracer.vm.PushGoFunction(func(ctx *duktape.Context) int { ctx.PushUint(*tracer.pcValue); return 1 })\n\ttracer.vm.PutPropString(logObject, \"getPC\")\n\n\ttracer.vm.PushGoFunction(func(ctx *duktape.Context) int { ctx.PushUint(*tracer.gasValue); return 1 })\n\ttracer.vm.PutPropString(logObject, \"getGas\")\n\n\ttracer.vm.PushGoFunction(func(ctx *duktape.Context) int { ctx.PushUint(*tracer.costValue); return 1 })\n\ttracer.vm.PutPropString(logObject, \"getCost\")\n\n\ttracer.vm.PushGoFunction(func(ctx *duktape.Context) int { ctx.PushUint(*tracer.depthValue); return 1 })\n\ttracer.vm.PutPropString(logObject, \"getDepth\")\n\n\ttracer.vm.PushGoFunction(func(ctx *duktape.Context) int { ctx.PushUint(*tracer.refundValue); return 1 })\n\ttracer.vm.PutPropString(logObject, \"getRefund\")\n\n\ttracer.vm.PushGoFunction(func(ctx *duktape.Context) int {\n\t\tif tracer.errorValue != nil {\n\t\t\tctx.PushString(*tracer.errorValue)\n\t\t} else {\n\t\t\tctx.PushUndefined()\n\t\t}\n\t\treturn 1\n\t})\n\ttracer.vm.PutPropString(logObject, \"getError\")\n\n\ttracer.vm.PutPropString(tracer.stateObject, \"log\")\n\n\ttracer.dbWrapper.pushObject(tracer.vm)\n\ttracer.vm.PutPropString(tracer.stateObject, \"db\")\n\n\treturn tracer, nil\n}\n\n// Stop terminates execution of the tracer at the first opportune moment.\nfunc (jst *Tracer) Stop(err error) {\n\tjst.reason = err\n\tatomic.StoreUint32(&jst.interrupt, 1)\n}\n\n// call executes a method on a JS object, catching any errors, formatting and\n// returning them as error objects.\nfunc (jst *Tracer) call(method string, args ...string) (json.RawMessage, error) {\n\t// Execute the JavaScript call and return any error\n\tjst.vm.PushString(method)\n\tfor _, arg := range args {\n\t\tjst.vm.GetPropString(jst.stateObject, arg)\n\t}\n\tcode := jst.vm.PcallProp(jst.tracerObject, len(args))\n\tdefer jst.vm.Pop()\n\n\tif code != 0 {\n\t\terr := jst.vm.SafeToString(-1)\n\t\treturn nil, errors.New(err)\n\t}\n\t// No error occurred, extract return value and return\n\treturn json.RawMessage(jst.vm.JsonEncode(-1)), nil\n}\n\nfunc wrapError(context string, err error) error {\n\treturn fmt.Errorf(\"%v    in server-side tracer function '%v'\", err, context)\n}\n\n// CaptureStart implements the Tracer interface to initialize the tracing operation.\nfunc (jst *Tracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) {\n\tjst.ctx[\"type\"] = \"CALL\"\n\tif create {\n\t\tjst.ctx[\"type\"] = \"CREATE\"\n\t}\n\tjst.ctx[\"from\"] = from\n\tjst.ctx[\"to\"] = to\n\tjst.ctx[\"input\"] = input\n\tjst.ctx[\"gas\"] = gas\n\tjst.ctx[\"value\"] = value\n\n\t// Initialize the context\n\tjst.ctx[\"block\"] = env.Context.BlockNumber.Uint64()\n\tjst.dbWrapper.db = env.StateDB\n\t// Compute intrinsic gas\n\tisHomestead := env.ChainConfig().IsHomestead(env.Context.BlockNumber)\n\tisIstanbul := env.ChainConfig().IsIstanbul(env.Context.BlockNumber)\n\tintrinsicGas, err := core.IntrinsicGas(input, nil, jst.ctx[\"type\"] == \"CREATE\", isHomestead, isIstanbul)\n\tif err != nil {\n\t\treturn\n\t}\n\tjst.ctx[\"intrinsicGas\"] = intrinsicGas\n}\n\n// CaptureState implements the Tracer interface to trace a single step of VM execution.\nfunc (jst *Tracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) {\n\tif jst.err != nil {\n\t\treturn\n\t}\n\t// If tracing was interrupted, set the error and stop\n\tif atomic.LoadUint32(&jst.interrupt) > 0 {\n\t\tjst.err = jst.reason\n\t\treturn\n\t}\n\tjst.opWrapper.op = op\n\tjst.stackWrapper.stack = scope.Stack\n\tjst.memoryWrapper.memory = scope.Memory\n\tjst.contractWrapper.contract = scope.Contract\n\n\t*jst.pcValue = uint(pc)\n\t*jst.gasValue = uint(gas)\n\t*jst.costValue = uint(cost)\n\t*jst.depthValue = uint(depth)\n\t*jst.refundValue = uint(env.StateDB.GetRefund())\n\n\tjst.errorValue = nil\n\tif err != nil {\n\t\tjst.errorValue = new(string)\n\t\t*jst.errorValue = err.Error()\n\t}\n\n\tif _, err := jst.call(\"step\", \"log\", \"db\"); err != nil {\n\t\tjst.err = wrapError(\"step\", err)\n\t}\n}\n\n// CaptureFault implements the Tracer interface to trace an execution fault\nfunc (jst *Tracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) {\n\tif jst.err != nil {\n\t\treturn\n\t}\n\t// Apart from the error, everything matches the previous invocation\n\tjst.errorValue = new(string)\n\t*jst.errorValue = err.Error()\n\n\tif _, err := jst.call(\"fault\", \"log\", \"db\"); err != nil {\n\t\tjst.err = wrapError(\"fault\", err)\n\t}\n}\n\n// CaptureEnd is called after the call finishes to finalize the tracing.\nfunc (jst *Tracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) {\n\tjst.ctx[\"output\"] = output\n\tjst.ctx[\"time\"] = t.String()\n\tjst.ctx[\"gasUsed\"] = gasUsed\n\n\tif err != nil {\n\t\tjst.ctx[\"error\"] = err.Error()\n\t}\n}\n\n// GetResult calls the Javascript 'result' function and returns its value, or any accumulated error\nfunc (jst *Tracer) GetResult() (json.RawMessage, error) {\n\t// Transform the context into a JavaScript object and inject into the state\n\tobj := jst.vm.PushObject()\n\n\tfor key, val := range jst.ctx {\n\t\tswitch val := val.(type) {\n\t\tcase uint64:\n\t\t\tjst.vm.PushUint(uint(val))\n\n\t\tcase string:\n\t\t\tjst.vm.PushString(val)\n\n\t\tcase []byte:\n\t\t\tptr := jst.vm.PushFixedBuffer(len(val))\n\t\t\tcopy(makeSlice(ptr, uint(len(val))), val)\n\n\t\tcase common.Address:\n\t\t\tptr := jst.vm.PushFixedBuffer(20)\n\t\t\tcopy(makeSlice(ptr, 20), val[:])\n\n\t\tcase *big.Int:\n\t\t\tpushBigInt(val, jst.vm)\n\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"unsupported type: %T\", val))\n\t\t}\n\t\tjst.vm.PutPropString(obj, key)\n\t}\n\tjst.vm.PutPropString(jst.stateObject, \"ctx\")\n\n\t// Finalize the trace and return the results\n\tresult, err := jst.call(\"result\", \"ctx\", \"db\")\n\tif err != nil {\n\t\tjst.err = wrapError(\"result\", err)\n\t}\n\t// Clean up the JavaScript environment\n\tjst.vm.DestroyHeap()\n\tjst.vm.Destroy()\n\n\treturn result, jst.err\n}\n"
  },
  {
    "path": "eth/tracers/tracer_test.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage tracers\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"math/big\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/params\"\n)\n\ntype account struct{}\n\nfunc (account) SubBalance(amount *big.Int)                          {}\nfunc (account) AddBalance(amount *big.Int)                          {}\nfunc (account) SetAddress(common.Address)                           {}\nfunc (account) Value() *big.Int                                     { return nil }\nfunc (account) SetBalance(*big.Int)                                 {}\nfunc (account) SetNonce(uint64)                                     {}\nfunc (account) Balance() *big.Int                                   { return nil }\nfunc (account) Address() common.Address                             { return common.Address{} }\nfunc (account) ReturnGas(*big.Int)                                  {}\nfunc (account) SetCode(common.Hash, []byte)                         {}\nfunc (account) ForEachStorage(cb func(key, value common.Hash) bool) {}\n\ntype dummyStatedb struct {\n\tstate.StateDB\n}\n\nfunc (*dummyStatedb) GetRefund() uint64                       { return 1337 }\nfunc (*dummyStatedb) GetBalance(addr common.Address) *big.Int { return new(big.Int) }\n\ntype vmContext struct {\n\tblockCtx vm.BlockContext\n\ttxCtx    vm.TxContext\n}\n\nfunc testCtx() *vmContext {\n\treturn &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}}\n}\n\nfunc runTrace(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) {\n\tenv := vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer})\n\tvar (\n\t\tstartGas uint64 = 10000\n\t\tvalue           = big.NewInt(0)\n\t)\n\tcontract := vm.NewContract(account{}, account{}, value, startGas)\n\tcontract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0}\n\n\ttracer.CaptureStart(env, contract.Caller(), contract.Address(), false, []byte{}, startGas, value)\n\tret, err := env.Interpreter().Run(contract, []byte{}, false)\n\ttracer.CaptureEnd(ret, startGas-contract.Gas, 1, err)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn tracer.GetResult()\n}\n\nfunc TestTracer(t *testing.T) {\n\texecTracer := func(code string) []byte {\n\t\tt.Helper()\n\t\tctx := &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}}\n\t\ttracer, err := New(code, ctx.txCtx)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tret, err := runTrace(tracer, ctx)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\treturn ret\n\t}\n\tfor i, tt := range []struct {\n\t\tcode string\n\t\twant string\n\t}{\n\t\t{ // tests that we don't panic on bad arguments to memory access\n\t\t\tcode: \"{depths: [], step: function(log) { this.depths.push(log.memory.slice(-1,-2)); }, fault: function() {}, result: function() { return this.depths; }}\",\n\t\t\twant: `[{},{},{}]`,\n\t\t}, { // tests that we don't panic on bad arguments to stack peeks\n\t\t\tcode: \"{depths: [], step: function(log) { this.depths.push(log.stack.peek(-1)); }, fault: function() {}, result: function() { return this.depths; }}\",\n\t\t\twant: `[\"0\",\"0\",\"0\"]`,\n\t\t}, { //  tests that we don't panic on bad arguments to memory getUint\n\t\t\tcode: \"{ depths: [], step: function(log, db) { this.depths.push(log.memory.getUint(-64));}, fault: function() {}, result: function() { return this.depths; }}\",\n\t\t\twant: `[\"0\",\"0\",\"0\"]`,\n\t\t}, { // tests some general counting\n\t\t\tcode: \"{count: 0, step: function() { this.count += 1; }, fault: function() {}, result: function() { return this.count; }}\",\n\t\t\twant: `3`,\n\t\t}, { // tests that depth is reported correctly\n\t\t\tcode: \"{depths: [], step: function(log) { this.depths.push(log.stack.length()); }, fault: function() {}, result: function() { return this.depths; }}\",\n\t\t\twant: `[0,1,2]`,\n\t\t}, { // tests to-string of opcodes\n\t\t\tcode: \"{opcodes: [], step: function(log) { this.opcodes.push(log.op.toString()); }, fault: function() {}, result: function() { return this.opcodes; }}\",\n\t\t\twant: `[\"PUSH1\",\"PUSH1\",\"STOP\"]`,\n\t\t}, { // tests intrinsic gas\n\t\t\tcode: \"{depths: [], step: function() {}, fault: function() {}, result: function(ctx) { return ctx.gasPrice+'.'+ctx.gasUsed+'.'+ctx.intrinsicGas; }}\",\n\t\t\twant: `\"100000.6.21000\"`,\n\t\t},\n\t} {\n\t\tif have := execTracer(tt.code); tt.want != string(have) {\n\t\t\tt.Errorf(\"testcase %d: expected return value to be %s got %s\\n\\tcode: %v\", i, tt.want, string(have), tt.code)\n\t\t}\n\t}\n}\n\nfunc TestHalt(t *testing.T) {\n\tt.Skip(\"duktape doesn't support abortion\")\n\n\ttimeout := errors.New(\"stahp\")\n\tvmctx := testCtx()\n\ttracer, err := New(\"{step: function() { while(1); }, result: function() { return null; }}\", vmctx.txCtx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tgo func() {\n\t\ttime.Sleep(1 * time.Second)\n\t\ttracer.Stop(timeout)\n\t}()\n\n\tif _, err = runTrace(tracer, vmctx); err.Error() != \"stahp    in server-side tracer function 'step'\" {\n\t\tt.Errorf(\"Expected timeout error, got %v\", err)\n\t}\n}\n\nfunc TestHaltBetweenSteps(t *testing.T) {\n\tvmctx := testCtx()\n\ttracer, err := New(\"{step: function() {}, fault: function() {}, result: function() { return null; }}\", vmctx.txCtx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tenv := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer})\n\tscope := &vm.ScopeContext{\n\t\tContract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0),\n\t}\n\n\ttracer.CaptureState(env, 0, 0, 0, 0, scope, nil, 0, nil)\n\ttimeout := errors.New(\"stahp\")\n\ttracer.Stop(timeout)\n\ttracer.CaptureState(env, 0, 0, 0, 0, scope, nil, 0, nil)\n\n\tif _, err := tracer.GetResult(); err.Error() != timeout.Error() {\n\t\tt.Errorf(\"Expected timeout error, got %v\", err)\n\t}\n}\n\n// TestNoStepExec tests a regular value transfer (no exec), and accessing the statedb\n// in 'result'\nfunc TestNoStepExec(t *testing.T) {\n\trunEmptyTrace := func(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) {\n\t\tenv := vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer})\n\t\tstartGas := uint64(10000)\n\t\tcontract := vm.NewContract(account{}, account{}, big.NewInt(0), startGas)\n\t\ttracer.CaptureStart(env, contract.Caller(), contract.Address(), false, []byte{}, startGas, big.NewInt(0))\n\t\ttracer.CaptureEnd(nil, startGas-contract.Gas, 1, nil)\n\t\treturn tracer.GetResult()\n\t}\n\texecTracer := func(code string) []byte {\n\t\tt.Helper()\n\t\tctx := &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}}\n\t\ttracer, err := New(code, ctx.txCtx)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tret, err := runEmptyTrace(tracer, ctx)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\treturn ret\n\t}\n\tfor i, tt := range []struct {\n\t\tcode string\n\t\twant string\n\t}{\n\t\t{ // tests that we don't panic on accessing the db methods\n\t\t\tcode: \"{depths: [], step: function() {}, fault: function() {},  result: function(ctx, db){ return db.getBalance(ctx.to)} }\",\n\t\t\twant: `\"0\"`,\n\t\t},\n\t} {\n\t\tif have := execTracer(tt.code); tt.want != string(have) {\n\t\t\tt.Errorf(\"testcase %d: expected return value to be %s got %s\\n\\tcode: %v\", i, tt.want, string(have), tt.code)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "eth/tracers/tracers.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Package tracers is a collection of JavaScript transaction tracers.\npackage tracers\n\nimport (\n\t\"strings\"\n\t\"unicode\"\n\n\t\"github.com/ethereum/go-ethereum/eth/tracers/internal/tracers\"\n)\n\n// all contains all the built in JavaScript tracers by name.\nvar all = make(map[string]string)\n\n// camel converts a snake cased input string into a camel cased output.\nfunc camel(str string) string {\n\tpieces := strings.Split(str, \"_\")\n\tfor i := 1; i < len(pieces); i++ {\n\t\tpieces[i] = string(unicode.ToUpper(rune(pieces[i][0]))) + pieces[i][1:]\n\t}\n\treturn strings.Join(pieces, \"\")\n}\n\n// init retrieves the JavaScript transaction tracers included in go-ethereum.\nfunc init() {\n\tfor _, file := range tracers.AssetNames() {\n\t\tname := camel(strings.TrimSuffix(file, \".js\"))\n\t\tall[name] = string(tracers.MustAsset(file))\n\t}\n}\n\n// tracer retrieves a specific JavaScript tracer by name.\nfunc tracer(name string) (string, bool) {\n\tif tracer, ok := all[name]; ok {\n\t\treturn tracer, true\n\t}\n\treturn \"\", false\n}\n"
  },
  {
    "path": "eth/tracers/tracers_test.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage tracers\n\nimport (\n\t\"crypto/ecdsa\"\n\t\"crypto/rand\"\n\t\"encoding/json\"\n\t\"io/ioutil\"\n\t\"math/big\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n\t\"github.com/ethereum/go-ethereum/common/math\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n\t\"github.com/ethereum/go-ethereum/tests\"\n)\n\n// To generate a new callTracer test, copy paste the makeTest method below into\n// a Geth console and call it with a transaction hash you which to export.\n\n/*\n// makeTest generates a callTracer test by running a prestate reassembled and a\n// call trace run, assembling all the gathered information into a test case.\nvar makeTest = function(tx, rewind) {\n  // Generate the genesis block from the block, transaction and prestate data\n  var block   = eth.getBlock(eth.getTransaction(tx).blockHash);\n  var genesis = eth.getBlock(block.parentHash);\n\n  delete genesis.gasUsed;\n  delete genesis.logsBloom;\n  delete genesis.parentHash;\n  delete genesis.receiptsRoot;\n  delete genesis.sha3Uncles;\n  delete genesis.size;\n  delete genesis.transactions;\n  delete genesis.transactionsRoot;\n  delete genesis.uncles;\n\n  genesis.gasLimit  = genesis.gasLimit.toString();\n  genesis.number    = genesis.number.toString();\n  genesis.timestamp = genesis.timestamp.toString();\n\n  genesis.alloc = debug.traceTransaction(tx, {tracer: \"prestateTracer\", rewind: rewind});\n  for (var key in genesis.alloc) {\n    genesis.alloc[key].nonce = genesis.alloc[key].nonce.toString();\n  }\n  genesis.config = admin.nodeInfo.protocols.eth.config;\n\n  // Generate the call trace and produce the test input\n  var result = debug.traceTransaction(tx, {tracer: \"callTracer\", rewind: rewind});\n  delete result.time;\n\n  console.log(JSON.stringify({\n    genesis: genesis,\n    context: {\n      number:     block.number.toString(),\n      difficulty: block.difficulty,\n      timestamp:  block.timestamp.toString(),\n      gasLimit:   block.gasLimit.toString(),\n      miner:      block.miner,\n    },\n    input:  eth.getRawTransaction(tx),\n    result: result,\n  }, null, 2));\n}\n*/\n\n// callTrace is the result of a callTracer run.\ntype callTrace struct {\n\tType    string          `json:\"type\"`\n\tFrom    common.Address  `json:\"from\"`\n\tTo      common.Address  `json:\"to\"`\n\tInput   hexutil.Bytes   `json:\"input\"`\n\tOutput  hexutil.Bytes   `json:\"output\"`\n\tGas     *hexutil.Uint64 `json:\"gas,omitempty\"`\n\tGasUsed *hexutil.Uint64 `json:\"gasUsed,omitempty\"`\n\tValue   *hexutil.Big    `json:\"value,omitempty\"`\n\tError   string          `json:\"error,omitempty\"`\n\tCalls   []callTrace     `json:\"calls,omitempty\"`\n}\n\ntype callContext struct {\n\tNumber     math.HexOrDecimal64   `json:\"number\"`\n\tDifficulty *math.HexOrDecimal256 `json:\"difficulty\"`\n\tTime       math.HexOrDecimal64   `json:\"timestamp\"`\n\tGasLimit   math.HexOrDecimal64   `json:\"gasLimit\"`\n\tMiner      common.Address        `json:\"miner\"`\n}\n\n// callTracerTest defines a single test to check the call tracer against.\ntype callTracerTest struct {\n\tGenesis *core.Genesis `json:\"genesis\"`\n\tContext *callContext  `json:\"context\"`\n\tInput   string        `json:\"input\"`\n\tResult  *callTrace    `json:\"result\"`\n}\n\nfunc TestPrestateTracerCreate2(t *testing.T) {\n\tunsignedTx := types.NewTransaction(1, common.HexToAddress(\"0x00000000000000000000000000000000deadbeef\"),\n\t\tnew(big.Int), 5000000, big.NewInt(1), []byte{})\n\n\tprivateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), rand.Reader)\n\tif err != nil {\n\t\tt.Fatalf(\"err %v\", err)\n\t}\n\tsigner := types.NewEIP155Signer(big.NewInt(1))\n\ttx, err := types.SignTx(unsignedTx, signer, privateKeyECDSA)\n\tif err != nil {\n\t\tt.Fatalf(\"err %v\", err)\n\t}\n\t/**\n\t\tThis comes from one of the test-vectors on the Skinny Create2 - EIP\n\n\t    address 0x00000000000000000000000000000000deadbeef\n\t    salt 0x00000000000000000000000000000000000000000000000000000000cafebabe\n\t    init_code 0xdeadbeef\n\t    gas (assuming no mem expansion): 32006\n\t    result: 0x60f3f640a8508fC6a86d45DF051962668E1e8AC7\n\t*/\n\torigin, _ := signer.Sender(tx)\n\ttxContext := vm.TxContext{\n\t\tOrigin:   origin,\n\t\tGasPrice: big.NewInt(1),\n\t}\n\tcontext := vm.BlockContext{\n\t\tCanTransfer: core.CanTransfer,\n\t\tTransfer:    core.Transfer,\n\t\tCoinbase:    common.Address{},\n\t\tBlockNumber: new(big.Int).SetUint64(8000000),\n\t\tTime:        new(big.Int).SetUint64(5),\n\t\tDifficulty:  big.NewInt(0x30000),\n\t\tGasLimit:    uint64(6000000),\n\t}\n\talloc := core.GenesisAlloc{}\n\n\t// The code pushes 'deadbeef' into memory, then the other params, and calls CREATE2, then returns\n\t// the address\n\talloc[common.HexToAddress(\"0x00000000000000000000000000000000deadbeef\")] = core.GenesisAccount{\n\t\tNonce:   1,\n\t\tCode:    hexutil.MustDecode(\"0x63deadbeef60005263cafebabe6004601c6000F560005260206000F3\"),\n\t\tBalance: big.NewInt(1),\n\t}\n\talloc[origin] = core.GenesisAccount{\n\t\tNonce:   1,\n\t\tCode:    []byte{},\n\t\tBalance: big.NewInt(500000000000000),\n\t}\n\t_, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false)\n\n\t// Create the tracer, the EVM environment and run it\n\ttracer, err := New(\"prestateTracer\", txContext)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create call tracer: %v\", err)\n\t}\n\tevm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer})\n\n\tmsg, err := tx.AsMessage(signer)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to prepare transaction for tracing: %v\", err)\n\t}\n\tst := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))\n\tif _, err = st.TransitionDb(); err != nil {\n\t\tt.Fatalf(\"failed to execute transaction: %v\", err)\n\t}\n\t// Retrieve the trace result and compare against the etalon\n\tres, err := tracer.GetResult()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to retrieve trace result: %v\", err)\n\t}\n\tret := make(map[string]interface{})\n\tif err := json.Unmarshal(res, &ret); err != nil {\n\t\tt.Fatalf(\"failed to unmarshal trace result: %v\", err)\n\t}\n\tif _, has := ret[\"0x60f3f640a8508fc6a86d45df051962668e1e8ac7\"]; !has {\n\t\tt.Fatalf(\"Expected 0x60f3f640a8508fc6a86d45df051962668e1e8ac7 in result\")\n\t}\n}\n\n// Iterates over all the input-output datasets in the tracer test harness and\n// runs the JavaScript tracers against them.\nfunc TestCallTracer(t *testing.T) {\n\tfiles, err := ioutil.ReadDir(\"testdata\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to retrieve tracer test suite: %v\", err)\n\t}\n\tfor _, file := range files {\n\t\tif !strings.HasPrefix(file.Name(), \"call_tracer_\") {\n\t\t\tcontinue\n\t\t}\n\t\tfile := file // capture range variable\n\t\tt.Run(camel(strings.TrimSuffix(strings.TrimPrefix(file.Name(), \"call_tracer_\"), \".json\")), func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\t// Call tracer test found, read if from disk\n\t\t\tblob, err := ioutil.ReadFile(filepath.Join(\"testdata\", file.Name()))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"failed to read testcase: %v\", err)\n\t\t\t}\n\t\t\ttest := new(callTracerTest)\n\t\t\tif err := json.Unmarshal(blob, test); err != nil {\n\t\t\t\tt.Fatalf(\"failed to parse testcase: %v\", err)\n\t\t\t}\n\t\t\t// Configure a blockchain with the given prestate\n\t\t\ttx := new(types.Transaction)\n\t\t\tif err := rlp.DecodeBytes(common.FromHex(test.Input), tx); err != nil {\n\t\t\t\tt.Fatalf(\"failed to parse testcase input: %v\", err)\n\t\t\t}\n\t\t\tsigner := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)))\n\t\t\torigin, _ := signer.Sender(tx)\n\t\t\ttxContext := vm.TxContext{\n\t\t\t\tOrigin:   origin,\n\t\t\t\tGasPrice: tx.GasPrice(),\n\t\t\t}\n\t\t\tcontext := vm.BlockContext{\n\t\t\t\tCanTransfer: core.CanTransfer,\n\t\t\t\tTransfer:    core.Transfer,\n\t\t\t\tCoinbase:    test.Context.Miner,\n\t\t\t\tBlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)),\n\t\t\t\tTime:        new(big.Int).SetUint64(uint64(test.Context.Time)),\n\t\t\t\tDifficulty:  (*big.Int)(test.Context.Difficulty),\n\t\t\t\tGasLimit:    uint64(test.Context.GasLimit),\n\t\t\t}\n\t\t\t_, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false)\n\n\t\t\t// Create the tracer, the EVM environment and run it\n\t\t\ttracer, err := New(\"callTracer\", txContext)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"failed to create call tracer: %v\", err)\n\t\t\t}\n\t\t\tevm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer})\n\n\t\t\tmsg, err := tx.AsMessage(signer)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"failed to prepare transaction for tracing: %v\", err)\n\t\t\t}\n\t\t\tst := core.NewStateTransition(evm, msg, new(core.GasPool).AddGas(tx.Gas()))\n\t\t\tif _, err = st.TransitionDb(); err != nil {\n\t\t\t\tt.Fatalf(\"failed to execute transaction: %v\", err)\n\t\t\t}\n\t\t\t// Retrieve the trace result and compare against the etalon\n\t\t\tres, err := tracer.GetResult()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"failed to retrieve trace result: %v\", err)\n\t\t\t}\n\t\t\tret := new(callTrace)\n\t\t\tif err := json.Unmarshal(res, ret); err != nil {\n\t\t\t\tt.Fatalf(\"failed to unmarshal trace result: %v\", err)\n\t\t\t}\n\n\t\t\tif !jsonEqual(ret, test.Result) {\n\t\t\t\t// uncomment this for easier debugging\n\t\t\t\t//have, _ := json.MarshalIndent(ret, \"\", \" \")\n\t\t\t\t//want, _ := json.MarshalIndent(test.Result, \"\", \" \")\n\t\t\t\t//t.Fatalf(\"trace mismatch: \\nhave %+v\\nwant %+v\", string(have), string(want))\n\t\t\t\tt.Fatalf(\"trace mismatch: \\nhave %+v\\nwant %+v\", ret, test.Result)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to\n// comparison\nfunc jsonEqual(x, y interface{}) bool {\n\txTrace := new(callTrace)\n\tyTrace := new(callTrace)\n\tif xj, err := json.Marshal(x); err == nil {\n\t\tjson.Unmarshal(xj, xTrace)\n\t} else {\n\t\treturn false\n\t}\n\tif yj, err := json.Marshal(y); err == nil {\n\t\tjson.Unmarshal(yj, yTrace)\n\t} else {\n\t\treturn false\n\t}\n\treturn reflect.DeepEqual(xTrace, yTrace)\n}\n"
  },
  {
    "path": "ethclient/ethclient.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Package ethclient provides a client for the Ethereum RPC API.\npackage ethclient\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\n\t\"github.com/ethereum/go-ethereum\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\n// Client defines typed wrappers for the Ethereum RPC API.\ntype Client struct {\n\tc *rpc.Client\n}\n\n// Dial connects a client to the given URL.\nfunc Dial(rawurl string) (*Client, error) {\n\treturn DialContext(context.Background(), rawurl)\n}\n\nfunc DialContext(ctx context.Context, rawurl string) (*Client, error) {\n\tc, err := rpc.DialContext(ctx, rawurl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewClient(c), nil\n}\n\n// NewClient creates a client that uses the given RPC client.\nfunc NewClient(c *rpc.Client) *Client {\n\treturn &Client{c}\n}\n\nfunc (ec *Client) Close() {\n\tec.c.Close()\n}\n\n// Blockchain Access\n\n// ChainId retrieves the current chain ID for transaction replay protection.\nfunc (ec *Client) ChainID(ctx context.Context) (*big.Int, error) {\n\tvar result hexutil.Big\n\terr := ec.c.CallContext(ctx, &result, \"eth_chainId\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn (*big.Int)(&result), err\n}\n\n// BlockByHash returns the given full block.\n//\n// Note that loading full blocks requires two requests. Use HeaderByHash\n// if you don't need all transactions or uncle headers.\nfunc (ec *Client) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {\n\treturn ec.getBlock(ctx, \"eth_getBlockByHash\", hash, true)\n}\n\n// BlockByNumber returns a block from the current canonical chain. If number is nil, the\n// latest known block is returned.\n//\n// Note that loading full blocks requires two requests. Use HeaderByNumber\n// if you don't need all transactions or uncle headers.\nfunc (ec *Client) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {\n\treturn ec.getBlock(ctx, \"eth_getBlockByNumber\", toBlockNumArg(number), true)\n}\n\n// BlockNumber returns the most recent block number\nfunc (ec *Client) BlockNumber(ctx context.Context) (uint64, error) {\n\tvar result hexutil.Uint64\n\terr := ec.c.CallContext(ctx, &result, \"eth_blockNumber\")\n\treturn uint64(result), err\n}\n\ntype rpcBlock struct {\n\tHash         common.Hash      `json:\"hash\"`\n\tTransactions []rpcTransaction `json:\"transactions\"`\n\tUncleHashes  []common.Hash    `json:\"uncles\"`\n}\n\nfunc (ec *Client) getBlock(ctx context.Context, method string, args ...interface{}) (*types.Block, error) {\n\tvar raw json.RawMessage\n\terr := ec.c.CallContext(ctx, &raw, method, args...)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if len(raw) == 0 {\n\t\treturn nil, ethereum.NotFound\n\t}\n\t// Decode header and transactions.\n\tvar head *types.Header\n\tvar body rpcBlock\n\tif err := json.Unmarshal(raw, &head); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := json.Unmarshal(raw, &body); err != nil {\n\t\treturn nil, err\n\t}\n\t// Quick-verify transaction and uncle lists. This mostly helps with debugging the server.\n\tif head.UncleHash == types.EmptyUncleHash && len(body.UncleHashes) > 0 {\n\t\treturn nil, fmt.Errorf(\"server returned non-empty uncle list but block header indicates no uncles\")\n\t}\n\tif head.UncleHash != types.EmptyUncleHash && len(body.UncleHashes) == 0 {\n\t\treturn nil, fmt.Errorf(\"server returned empty uncle list but block header indicates uncles\")\n\t}\n\tif head.TxHash == types.EmptyRootHash && len(body.Transactions) > 0 {\n\t\treturn nil, fmt.Errorf(\"server returned non-empty transaction list but block header indicates no transactions\")\n\t}\n\tif head.TxHash != types.EmptyRootHash && len(body.Transactions) == 0 {\n\t\treturn nil, fmt.Errorf(\"server returned empty transaction list but block header indicates transactions\")\n\t}\n\t// Load uncles because they are not included in the block response.\n\tvar uncles []*types.Header\n\tif len(body.UncleHashes) > 0 {\n\t\tuncles = make([]*types.Header, len(body.UncleHashes))\n\t\treqs := make([]rpc.BatchElem, len(body.UncleHashes))\n\t\tfor i := range reqs {\n\t\t\treqs[i] = rpc.BatchElem{\n\t\t\t\tMethod: \"eth_getUncleByBlockHashAndIndex\",\n\t\t\t\tArgs:   []interface{}{body.Hash, hexutil.EncodeUint64(uint64(i))},\n\t\t\t\tResult: &uncles[i],\n\t\t\t}\n\t\t}\n\t\tif err := ec.c.BatchCallContext(ctx, reqs); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor i := range reqs {\n\t\t\tif reqs[i].Error != nil {\n\t\t\t\treturn nil, reqs[i].Error\n\t\t\t}\n\t\t\tif uncles[i] == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"got null header for uncle %d of block %x\", i, body.Hash[:])\n\t\t\t}\n\t\t}\n\t}\n\t// Fill the sender cache of transactions in the block.\n\ttxs := make([]*types.Transaction, len(body.Transactions))\n\tfor i, tx := range body.Transactions {\n\t\tif tx.From != nil {\n\t\t\tsetSenderFromServer(tx.tx, *tx.From, body.Hash)\n\t\t}\n\t\ttxs[i] = tx.tx\n\t}\n\treturn types.NewBlockWithHeader(head).WithBody(txs, uncles), nil\n}\n\n// HeaderByHash returns the block header with the given hash.\nfunc (ec *Client) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {\n\tvar head *types.Header\n\terr := ec.c.CallContext(ctx, &head, \"eth_getBlockByHash\", hash, false)\n\tif err == nil && head == nil {\n\t\terr = ethereum.NotFound\n\t}\n\treturn head, err\n}\n\n// HeaderByNumber returns a block header from the current canonical chain. If number is\n// nil, the latest known header is returned.\nfunc (ec *Client) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) {\n\tvar head *types.Header\n\terr := ec.c.CallContext(ctx, &head, \"eth_getBlockByNumber\", toBlockNumArg(number), false)\n\tif err == nil && head == nil {\n\t\terr = ethereum.NotFound\n\t}\n\treturn head, err\n}\n\ntype rpcTransaction struct {\n\ttx *types.Transaction\n\ttxExtraInfo\n}\n\ntype txExtraInfo struct {\n\tBlockNumber *string         `json:\"blockNumber,omitempty\"`\n\tBlockHash   *common.Hash    `json:\"blockHash,omitempty\"`\n\tFrom        *common.Address `json:\"from,omitempty\"`\n}\n\nfunc (tx *rpcTransaction) UnmarshalJSON(msg []byte) error {\n\tif err := json.Unmarshal(msg, &tx.tx); err != nil {\n\t\treturn err\n\t}\n\treturn json.Unmarshal(msg, &tx.txExtraInfo)\n}\n\n// TransactionByHash returns the transaction with the given hash.\nfunc (ec *Client) TransactionByHash(ctx context.Context, hash common.Hash) (tx *types.Transaction, isPending bool, err error) {\n\tvar json *rpcTransaction\n\terr = ec.c.CallContext(ctx, &json, \"eth_getTransactionByHash\", hash)\n\tif err != nil {\n\t\treturn nil, false, err\n\t} else if json == nil {\n\t\treturn nil, false, ethereum.NotFound\n\t} else if _, r, _ := json.tx.RawSignatureValues(); r == nil {\n\t\treturn nil, false, fmt.Errorf(\"server returned transaction without signature\")\n\t}\n\tif json.From != nil && json.BlockHash != nil {\n\t\tsetSenderFromServer(json.tx, *json.From, *json.BlockHash)\n\t}\n\treturn json.tx, json.BlockNumber == nil, nil\n}\n\n// TransactionSender returns the sender address of the given transaction. The transaction\n// must be known to the remote node and included in the blockchain at the given block and\n// index. The sender is the one derived by the protocol at the time of inclusion.\n//\n// There is a fast-path for transactions retrieved by TransactionByHash and\n// TransactionInBlock. Getting their sender address can be done without an RPC interaction.\nfunc (ec *Client) TransactionSender(ctx context.Context, tx *types.Transaction, block common.Hash, index uint) (common.Address, error) {\n\t// Try to load the address from the cache.\n\tsender, err := types.Sender(&senderFromServer{blockhash: block}, tx)\n\tif err == nil {\n\t\treturn sender, nil\n\t}\n\tvar meta struct {\n\t\tHash common.Hash\n\t\tFrom common.Address\n\t}\n\tif err = ec.c.CallContext(ctx, &meta, \"eth_getTransactionByBlockHashAndIndex\", block, hexutil.Uint64(index)); err != nil {\n\t\treturn common.Address{}, err\n\t}\n\tif meta.Hash == (common.Hash{}) || meta.Hash != tx.Hash() {\n\t\treturn common.Address{}, errors.New(\"wrong inclusion block/index\")\n\t}\n\treturn meta.From, nil\n}\n\n// TransactionCount returns the total number of transactions in the given block.\nfunc (ec *Client) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) {\n\tvar num hexutil.Uint\n\terr := ec.c.CallContext(ctx, &num, \"eth_getBlockTransactionCountByHash\", blockHash)\n\treturn uint(num), err\n}\n\n// TransactionInBlock returns a single transaction at index in the given block.\nfunc (ec *Client) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) {\n\tvar json *rpcTransaction\n\terr := ec.c.CallContext(ctx, &json, \"eth_getTransactionByBlockHashAndIndex\", blockHash, hexutil.Uint64(index))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif json == nil {\n\t\treturn nil, ethereum.NotFound\n\t} else if _, r, _ := json.tx.RawSignatureValues(); r == nil {\n\t\treturn nil, fmt.Errorf(\"server returned transaction without signature\")\n\t}\n\tif json.From != nil && json.BlockHash != nil {\n\t\tsetSenderFromServer(json.tx, *json.From, *json.BlockHash)\n\t}\n\treturn json.tx, err\n}\n\n// TransactionReceipt returns the receipt of a transaction by transaction hash.\n// Note that the receipt is not available for pending transactions.\nfunc (ec *Client) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {\n\tvar r *types.Receipt\n\terr := ec.c.CallContext(ctx, &r, \"eth_getTransactionReceipt\", txHash)\n\tif err == nil {\n\t\tif r == nil {\n\t\t\treturn nil, ethereum.NotFound\n\t\t}\n\t}\n\treturn r, err\n}\n\nfunc toBlockNumArg(number *big.Int) string {\n\tif number == nil {\n\t\treturn \"latest\"\n\t}\n\tpending := big.NewInt(-1)\n\tif number.Cmp(pending) == 0 {\n\t\treturn \"pending\"\n\t}\n\treturn hexutil.EncodeBig(number)\n}\n\ntype rpcProgress struct {\n\tStartingBlock hexutil.Uint64\n\tCurrentBlock  hexutil.Uint64\n\tHighestBlock  hexutil.Uint64\n\tPulledStates  hexutil.Uint64\n\tKnownStates   hexutil.Uint64\n}\n\n// SyncProgress retrieves the current progress of the sync algorithm. If there's\n// no sync currently running, it returns nil.\nfunc (ec *Client) SyncProgress(ctx context.Context) (*ethereum.SyncProgress, error) {\n\tvar raw json.RawMessage\n\tif err := ec.c.CallContext(ctx, &raw, \"eth_syncing\"); err != nil {\n\t\treturn nil, err\n\t}\n\t// Handle the possible response types\n\tvar syncing bool\n\tif err := json.Unmarshal(raw, &syncing); err == nil {\n\t\treturn nil, nil // Not syncing (always false)\n\t}\n\tvar progress *rpcProgress\n\tif err := json.Unmarshal(raw, &progress); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ethereum.SyncProgress{\n\t\tStartingBlock: uint64(progress.StartingBlock),\n\t\tCurrentBlock:  uint64(progress.CurrentBlock),\n\t\tHighestBlock:  uint64(progress.HighestBlock),\n\t\tPulledStates:  uint64(progress.PulledStates),\n\t\tKnownStates:   uint64(progress.KnownStates),\n\t}, nil\n}\n\n// SubscribeNewHead subscribes to notifications about the current blockchain head\n// on the given channel.\nfunc (ec *Client) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) {\n\treturn ec.c.EthSubscribe(ctx, ch, \"newHeads\")\n}\n\n// State Access\n\n// NetworkID returns the network ID (also known as the chain ID) for this chain.\nfunc (ec *Client) NetworkID(ctx context.Context) (*big.Int, error) {\n\tversion := new(big.Int)\n\tvar ver string\n\tif err := ec.c.CallContext(ctx, &ver, \"net_version\"); err != nil {\n\t\treturn nil, err\n\t}\n\tif _, ok := version.SetString(ver, 10); !ok {\n\t\treturn nil, fmt.Errorf(\"invalid net_version result %q\", ver)\n\t}\n\treturn version, nil\n}\n\n// BalanceAt returns the wei balance of the given account.\n// The block number can be nil, in which case the balance is taken from the latest known block.\nfunc (ec *Client) BalanceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (*big.Int, error) {\n\tvar result hexutil.Big\n\terr := ec.c.CallContext(ctx, &result, \"eth_getBalance\", account, toBlockNumArg(blockNumber))\n\treturn (*big.Int)(&result), err\n}\n\n// StorageAt returns the value of key in the contract storage of the given account.\n// The block number can be nil, in which case the value is taken from the latest known block.\nfunc (ec *Client) StorageAt(ctx context.Context, account common.Address, key common.Hash, blockNumber *big.Int) ([]byte, error) {\n\tvar result hexutil.Bytes\n\terr := ec.c.CallContext(ctx, &result, \"eth_getStorageAt\", account, key, toBlockNumArg(blockNumber))\n\treturn result, err\n}\n\n// CodeAt returns the contract code of the given account.\n// The block number can be nil, in which case the code is taken from the latest known block.\nfunc (ec *Client) CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error) {\n\tvar result hexutil.Bytes\n\terr := ec.c.CallContext(ctx, &result, \"eth_getCode\", account, toBlockNumArg(blockNumber))\n\treturn result, err\n}\n\n// NonceAt returns the account nonce of the given account.\n// The block number can be nil, in which case the nonce is taken from the latest known block.\nfunc (ec *Client) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) {\n\tvar result hexutil.Uint64\n\terr := ec.c.CallContext(ctx, &result, \"eth_getTransactionCount\", account, toBlockNumArg(blockNumber))\n\treturn uint64(result), err\n}\n\n// Filters\n\n// FilterLogs executes a filter query.\nfunc (ec *Client) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) {\n\tvar result []types.Log\n\targ, err := toFilterArg(q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = ec.c.CallContext(ctx, &result, \"eth_getLogs\", arg)\n\treturn result, err\n}\n\n// SubscribeFilterLogs subscribes to the results of a streaming filter query.\nfunc (ec *Client) SubscribeFilterLogs(ctx context.Context, q ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) {\n\targ, err := toFilterArg(q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ec.c.EthSubscribe(ctx, ch, \"logs\", arg)\n}\n\nfunc toFilterArg(q ethereum.FilterQuery) (interface{}, error) {\n\targ := map[string]interface{}{\n\t\t\"address\": q.Addresses,\n\t\t\"topics\":  q.Topics,\n\t}\n\tif q.BlockHash != nil {\n\t\targ[\"blockHash\"] = *q.BlockHash\n\t\tif q.FromBlock != nil || q.ToBlock != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot specify both BlockHash and FromBlock/ToBlock\")\n\t\t}\n\t} else {\n\t\tif q.FromBlock == nil {\n\t\t\targ[\"fromBlock\"] = \"0x0\"\n\t\t} else {\n\t\t\targ[\"fromBlock\"] = toBlockNumArg(q.FromBlock)\n\t\t}\n\t\targ[\"toBlock\"] = toBlockNumArg(q.ToBlock)\n\t}\n\treturn arg, nil\n}\n\n// Pending State\n\n// PendingBalanceAt returns the wei balance of the given account in the pending state.\nfunc (ec *Client) PendingBalanceAt(ctx context.Context, account common.Address) (*big.Int, error) {\n\tvar result hexutil.Big\n\terr := ec.c.CallContext(ctx, &result, \"eth_getBalance\", account, \"pending\")\n\treturn (*big.Int)(&result), err\n}\n\n// PendingStorageAt returns the value of key in the contract storage of the given account in the pending state.\nfunc (ec *Client) PendingStorageAt(ctx context.Context, account common.Address, key common.Hash) ([]byte, error) {\n\tvar result hexutil.Bytes\n\terr := ec.c.CallContext(ctx, &result, \"eth_getStorageAt\", account, key, \"pending\")\n\treturn result, err\n}\n\n// PendingCodeAt returns the contract code of the given account in the pending state.\nfunc (ec *Client) PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error) {\n\tvar result hexutil.Bytes\n\terr := ec.c.CallContext(ctx, &result, \"eth_getCode\", account, \"pending\")\n\treturn result, err\n}\n\n// PendingNonceAt returns the account nonce of the given account in the pending state.\n// This is the nonce that should be used for the next transaction.\nfunc (ec *Client) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) {\n\tvar result hexutil.Uint64\n\terr := ec.c.CallContext(ctx, &result, \"eth_getTransactionCount\", account, \"pending\")\n\treturn uint64(result), err\n}\n\n// PendingTransactionCount returns the total number of transactions in the pending state.\nfunc (ec *Client) PendingTransactionCount(ctx context.Context) (uint, error) {\n\tvar num hexutil.Uint\n\terr := ec.c.CallContext(ctx, &num, \"eth_getBlockTransactionCountByNumber\", \"pending\")\n\treturn uint(num), err\n}\n\n// TODO: SubscribePendingTransactions (needs server side)\n\n// Contract Calling\n\n// CallContract executes a message call transaction, which is directly executed in the VM\n// of the node, but never mined into the blockchain.\n//\n// blockNumber selects the block height at which the call runs. It can be nil, in which\n// case the code is taken from the latest known block. Note that state from very old\n// blocks might not be available.\nfunc (ec *Client) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) {\n\tvar hex hexutil.Bytes\n\terr := ec.c.CallContext(ctx, &hex, \"eth_call\", toCallArg(msg), toBlockNumArg(blockNumber))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn hex, nil\n}\n\n// PendingCallContract executes a message call transaction using the EVM.\n// The state seen by the contract call is the pending state.\nfunc (ec *Client) PendingCallContract(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) {\n\tvar hex hexutil.Bytes\n\terr := ec.c.CallContext(ctx, &hex, \"eth_call\", toCallArg(msg), \"pending\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn hex, nil\n}\n\n// SuggestGasPrice retrieves the currently suggested gas price to allow a timely\n// execution of a transaction.\nfunc (ec *Client) SuggestGasPrice(ctx context.Context) (*big.Int, error) {\n\tvar hex hexutil.Big\n\tif err := ec.c.CallContext(ctx, &hex, \"eth_gasPrice\"); err != nil {\n\t\treturn nil, err\n\t}\n\treturn (*big.Int)(&hex), nil\n}\n\n// EstimateGas tries to estimate the gas needed to execute a specific transaction based on\n// the current pending state of the backend blockchain. There is no guarantee that this is\n// the true gas limit requirement as other transactions may be added or removed by miners,\n// but it should provide a basis for setting a reasonable default.\nfunc (ec *Client) EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64, error) {\n\tvar hex hexutil.Uint64\n\terr := ec.c.CallContext(ctx, &hex, \"eth_estimateGas\", toCallArg(msg))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn uint64(hex), nil\n}\n\n// SendTransaction injects a signed transaction into the pending pool for execution.\n//\n// If the transaction was a contract creation use the TransactionReceipt method to get the\n// contract address after the transaction has been mined.\nfunc (ec *Client) SendTransaction(ctx context.Context, tx *types.Transaction) error {\n\tdata, err := tx.MarshalBinary()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ec.c.CallContext(ctx, nil, \"eth_sendRawTransaction\", hexutil.Encode(data))\n}\n\nfunc toCallArg(msg ethereum.CallMsg) interface{} {\n\targ := map[string]interface{}{\n\t\t\"from\": msg.From,\n\t\t\"to\":   msg.To,\n\t}\n\tif len(msg.Data) > 0 {\n\t\targ[\"data\"] = hexutil.Bytes(msg.Data)\n\t}\n\tif msg.Value != nil {\n\t\targ[\"value\"] = (*hexutil.Big)(msg.Value)\n\t}\n\tif msg.Gas != 0 {\n\t\targ[\"gas\"] = hexutil.Uint64(msg.Gas)\n\t}\n\tif msg.GasPrice != nil {\n\t\targ[\"gasPrice\"] = (*hexutil.Big)(msg.GasPrice)\n\t}\n\treturn arg\n}\n"
  },
  {
    "path": "ethclient/ethclient_test.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage ethclient\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/eth\"\n\t\"github.com/ethereum/go-ethereum/eth/ethconfig\"\n\t\"github.com/ethereum/go-ethereum/node\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\n// Verify that Client implements the ethereum interfaces.\nvar (\n\t_ = ethereum.ChainReader(&Client{})\n\t_ = ethereum.TransactionReader(&Client{})\n\t_ = ethereum.ChainStateReader(&Client{})\n\t_ = ethereum.ChainSyncReader(&Client{})\n\t_ = ethereum.ContractCaller(&Client{})\n\t_ = ethereum.GasEstimator(&Client{})\n\t_ = ethereum.GasPricer(&Client{})\n\t_ = ethereum.LogFilterer(&Client{})\n\t_ = ethereum.PendingStateReader(&Client{})\n\t// _ = ethereum.PendingStateEventer(&Client{})\n\t_ = ethereum.PendingContractCaller(&Client{})\n)\n\nfunc TestToFilterArg(t *testing.T) {\n\tblockHashErr := fmt.Errorf(\"cannot specify both BlockHash and FromBlock/ToBlock\")\n\taddresses := []common.Address{\n\t\tcommon.HexToAddress(\"0xD36722ADeC3EdCB29c8e7b5a47f352D701393462\"),\n\t}\n\tblockHash := common.HexToHash(\n\t\t\"0xeb94bb7d78b73657a9d7a99792413f50c0a45c51fc62bdcb08a53f18e9a2b4eb\",\n\t)\n\n\tfor _, testCase := range []struct {\n\t\tname   string\n\t\tinput  ethereum.FilterQuery\n\t\toutput interface{}\n\t\terr    error\n\t}{\n\t\t{\n\t\t\t\"without BlockHash\",\n\t\t\tethereum.FilterQuery{\n\t\t\t\tAddresses: addresses,\n\t\t\t\tFromBlock: big.NewInt(1),\n\t\t\t\tToBlock:   big.NewInt(2),\n\t\t\t\tTopics:    [][]common.Hash{},\n\t\t\t},\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"address\":   addresses,\n\t\t\t\t\"fromBlock\": \"0x1\",\n\t\t\t\t\"toBlock\":   \"0x2\",\n\t\t\t\t\"topics\":    [][]common.Hash{},\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"with nil fromBlock and nil toBlock\",\n\t\t\tethereum.FilterQuery{\n\t\t\t\tAddresses: addresses,\n\t\t\t\tTopics:    [][]common.Hash{},\n\t\t\t},\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"address\":   addresses,\n\t\t\t\t\"fromBlock\": \"0x0\",\n\t\t\t\t\"toBlock\":   \"latest\",\n\t\t\t\t\"topics\":    [][]common.Hash{},\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"with negative fromBlock and negative toBlock\",\n\t\t\tethereum.FilterQuery{\n\t\t\t\tAddresses: addresses,\n\t\t\t\tFromBlock: big.NewInt(-1),\n\t\t\t\tToBlock:   big.NewInt(-1),\n\t\t\t\tTopics:    [][]common.Hash{},\n\t\t\t},\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"address\":   addresses,\n\t\t\t\t\"fromBlock\": \"pending\",\n\t\t\t\t\"toBlock\":   \"pending\",\n\t\t\t\t\"topics\":    [][]common.Hash{},\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"with blockhash\",\n\t\t\tethereum.FilterQuery{\n\t\t\t\tAddresses: addresses,\n\t\t\t\tBlockHash: &blockHash,\n\t\t\t\tTopics:    [][]common.Hash{},\n\t\t\t},\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"address\":   addresses,\n\t\t\t\t\"blockHash\": blockHash,\n\t\t\t\t\"topics\":    [][]common.Hash{},\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t\t{\n\t\t\t\"with blockhash and from block\",\n\t\t\tethereum.FilterQuery{\n\t\t\t\tAddresses: addresses,\n\t\t\t\tBlockHash: &blockHash,\n\t\t\t\tFromBlock: big.NewInt(1),\n\t\t\t\tTopics:    [][]common.Hash{},\n\t\t\t},\n\t\t\tnil,\n\t\t\tblockHashErr,\n\t\t},\n\t\t{\n\t\t\t\"with blockhash and to block\",\n\t\t\tethereum.FilterQuery{\n\t\t\t\tAddresses: addresses,\n\t\t\t\tBlockHash: &blockHash,\n\t\t\t\tToBlock:   big.NewInt(1),\n\t\t\t\tTopics:    [][]common.Hash{},\n\t\t\t},\n\t\t\tnil,\n\t\t\tblockHashErr,\n\t\t},\n\t\t{\n\t\t\t\"with blockhash and both from / to block\",\n\t\t\tethereum.FilterQuery{\n\t\t\t\tAddresses: addresses,\n\t\t\t\tBlockHash: &blockHash,\n\t\t\t\tFromBlock: big.NewInt(1),\n\t\t\t\tToBlock:   big.NewInt(2),\n\t\t\t\tTopics:    [][]common.Hash{},\n\t\t\t},\n\t\t\tnil,\n\t\t\tblockHashErr,\n\t\t},\n\t} {\n\t\tt.Run(testCase.name, func(t *testing.T) {\n\t\t\toutput, err := toFilterArg(testCase.input)\n\t\t\tif (testCase.err == nil) != (err == nil) {\n\t\t\t\tt.Fatalf(\"expected error %v but got %v\", testCase.err, err)\n\t\t\t}\n\t\t\tif testCase.err != nil {\n\t\t\t\tif testCase.err.Error() != err.Error() {\n\t\t\t\t\tt.Fatalf(\"expected error %v but got %v\", testCase.err, err)\n\t\t\t\t}\n\t\t\t} else if !reflect.DeepEqual(testCase.output, output) {\n\t\t\t\tt.Fatalf(\"expected filter arg %v but got %v\", testCase.output, output)\n\t\t\t}\n\t\t})\n\t}\n}\n\nvar (\n\ttestKey, _  = crypto.HexToECDSA(\"b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291\")\n\ttestAddr    = crypto.PubkeyToAddress(testKey.PublicKey)\n\ttestBalance = big.NewInt(2e10)\n)\n\nfunc newTestBackend(t *testing.T) (*node.Node, []*types.Block) {\n\t// Generate test chain.\n\tgenesis, blocks := generateTestChain()\n\t// Create node\n\tn, err := node.New(&node.Config{})\n\tif err != nil {\n\t\tt.Fatalf(\"can't create new node: %v\", err)\n\t}\n\t// Create Ethereum Service\n\tconfig := &ethconfig.Config{Genesis: genesis}\n\tconfig.Ethash.PowMode = ethash.ModeFake\n\tethservice, err := eth.New(n, config)\n\tif err != nil {\n\t\tt.Fatalf(\"can't create new ethereum service: %v\", err)\n\t}\n\t// Import the test chain.\n\tif err := n.Start(); err != nil {\n\t\tt.Fatalf(\"can't start test node: %v\", err)\n\t}\n\tif _, err := ethservice.BlockChain().InsertChain(blocks[1:]); err != nil {\n\t\tt.Fatalf(\"can't import test blocks: %v\", err)\n\t}\n\treturn n, blocks\n}\n\nfunc generateTestChain() (*core.Genesis, []*types.Block) {\n\tdb := rawdb.NewMemoryDatabase()\n\tconfig := params.AllEthashProtocolChanges\n\tgenesis := &core.Genesis{\n\t\tConfig:    config,\n\t\tAlloc:     core.GenesisAlloc{testAddr: {Balance: testBalance}},\n\t\tExtraData: []byte(\"test genesis\"),\n\t\tTimestamp: 9000,\n\t}\n\tgenerate := func(i int, g *core.BlockGen) {\n\t\tg.OffsetTime(5)\n\t\tg.SetExtra([]byte(\"test\"))\n\t}\n\tgblock := genesis.ToBlock(db)\n\tengine := ethash.NewFaker()\n\tblocks, _ := core.GenerateChain(config, gblock, engine, db, 1, generate)\n\tblocks = append([]*types.Block{gblock}, blocks...)\n\treturn genesis, blocks\n}\n\nfunc TestEthClient(t *testing.T) {\n\tbackend, chain := newTestBackend(t)\n\tclient, _ := backend.Attach()\n\tdefer backend.Close()\n\tdefer client.Close()\n\n\ttests := map[string]struct {\n\t\ttest func(t *testing.T)\n\t}{\n\t\t\"TestHeader\": {\n\t\t\tfunc(t *testing.T) { testHeader(t, chain, client) },\n\t\t},\n\t\t\"TestBalanceAt\": {\n\t\t\tfunc(t *testing.T) { testBalanceAt(t, client) },\n\t\t},\n\t\t\"TestTxInBlockInterrupted\": {\n\t\t\tfunc(t *testing.T) { testTransactionInBlockInterrupted(t, client) },\n\t\t},\n\t\t\"TestChainID\": {\n\t\t\tfunc(t *testing.T) { testChainID(t, client) },\n\t\t},\n\t\t\"TestGetBlock\": {\n\t\t\tfunc(t *testing.T) { testGetBlock(t, client) },\n\t\t},\n\t\t\"TestStatusFunctions\": {\n\t\t\tfunc(t *testing.T) { testStatusFunctions(t, client) },\n\t\t},\n\t\t\"TestCallContract\": {\n\t\t\tfunc(t *testing.T) { testCallContract(t, client) },\n\t\t},\n\t\t\"TestAtFunctions\": {\n\t\t\tfunc(t *testing.T) { testAtFunctions(t, client) },\n\t\t},\n\t}\n\n\tt.Parallel()\n\tfor name, tt := range tests {\n\t\tt.Run(name, tt.test)\n\t}\n}\n\nfunc testHeader(t *testing.T, chain []*types.Block, client *rpc.Client) {\n\ttests := map[string]struct {\n\t\tblock   *big.Int\n\t\twant    *types.Header\n\t\twantErr error\n\t}{\n\t\t\"genesis\": {\n\t\t\tblock: big.NewInt(0),\n\t\t\twant:  chain[0].Header(),\n\t\t},\n\t\t\"first_block\": {\n\t\t\tblock: big.NewInt(1),\n\t\t\twant:  chain[1].Header(),\n\t\t},\n\t\t\"future_block\": {\n\t\t\tblock:   big.NewInt(1000000000),\n\t\t\twant:    nil,\n\t\t\twantErr: ethereum.NotFound,\n\t\t},\n\t}\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tec := NewClient(client)\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\t\t\tdefer cancel()\n\n\t\t\tgot, err := ec.HeaderByNumber(ctx, tt.block)\n\t\t\tif !errors.Is(err, tt.wantErr) {\n\t\t\t\tt.Fatalf(\"HeaderByNumber(%v) error = %q, want %q\", tt.block, err, tt.wantErr)\n\t\t\t}\n\t\t\tif got != nil && got.Number != nil && got.Number.Sign() == 0 {\n\t\t\t\tgot.Number = big.NewInt(0) // hack to make DeepEqual work\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Fatalf(\"HeaderByNumber(%v)\\n   = %v\\nwant %v\", tt.block, got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc testBalanceAt(t *testing.T, client *rpc.Client) {\n\ttests := map[string]struct {\n\t\taccount common.Address\n\t\tblock   *big.Int\n\t\twant    *big.Int\n\t\twantErr error\n\t}{\n\t\t\"valid_account\": {\n\t\t\taccount: testAddr,\n\t\t\tblock:   big.NewInt(1),\n\t\t\twant:    testBalance,\n\t\t},\n\t\t\"non_existent_account\": {\n\t\t\taccount: common.Address{1},\n\t\t\tblock:   big.NewInt(1),\n\t\t\twant:    big.NewInt(0),\n\t\t},\n\t\t\"future_block\": {\n\t\t\taccount: testAddr,\n\t\t\tblock:   big.NewInt(1000000000),\n\t\t\twant:    big.NewInt(0),\n\t\t\twantErr: errors.New(\"header not found\"),\n\t\t},\n\t}\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tec := NewClient(client)\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\t\t\tdefer cancel()\n\n\t\t\tgot, err := ec.BalanceAt(ctx, tt.account, tt.block)\n\t\t\tif tt.wantErr != nil && (err == nil || err.Error() != tt.wantErr.Error()) {\n\t\t\t\tt.Fatalf(\"BalanceAt(%x, %v) error = %q, want %q\", tt.account, tt.block, err, tt.wantErr)\n\t\t\t}\n\t\t\tif got.Cmp(tt.want) != 0 {\n\t\t\t\tt.Fatalf(\"BalanceAt(%x, %v) = %v, want %v\", tt.account, tt.block, got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc testTransactionInBlockInterrupted(t *testing.T, client *rpc.Client) {\n\tec := NewClient(client)\n\n\t// Get current block by number\n\tblock, err := ec.BlockByNumber(context.Background(), nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\t// Test tx in block interupted\n\tctx, cancel := context.WithCancel(context.Background())\n\tcancel()\n\ttx, err := ec.TransactionInBlock(ctx, block.Hash(), 1)\n\tif tx != nil {\n\t\tt.Fatal(\"transaction should be nil\")\n\t}\n\tif err == nil || err == ethereum.NotFound {\n\t\tt.Fatal(\"error should not be nil/notfound\")\n\t}\n\t// Test tx in block not found\n\tif _, err := ec.TransactionInBlock(context.Background(), block.Hash(), 1); err != ethereum.NotFound {\n\t\tt.Fatal(\"error should be ethereum.NotFound\")\n\t}\n}\n\nfunc testChainID(t *testing.T, client *rpc.Client) {\n\tec := NewClient(client)\n\tid, err := ec.ChainID(context.Background())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif id == nil || id.Cmp(params.AllEthashProtocolChanges.ChainID) != 0 {\n\t\tt.Fatalf(\"ChainID returned wrong number: %+v\", id)\n\t}\n}\n\nfunc testGetBlock(t *testing.T, client *rpc.Client) {\n\tec := NewClient(client)\n\t// Get current block number\n\tblockNumber, err := ec.BlockNumber(context.Background())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif blockNumber != 1 {\n\t\tt.Fatalf(\"BlockNumber returned wrong number: %d\", blockNumber)\n\t}\n\t// Get current block by number\n\tblock, err := ec.BlockByNumber(context.Background(), new(big.Int).SetUint64(blockNumber))\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif block.NumberU64() != blockNumber {\n\t\tt.Fatalf(\"BlockByNumber returned wrong block: want %d got %d\", blockNumber, block.NumberU64())\n\t}\n\t// Get current block by hash\n\tblockH, err := ec.BlockByHash(context.Background(), block.Hash())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif block.Hash() != blockH.Hash() {\n\t\tt.Fatalf(\"BlockByHash returned wrong block: want %v got %v\", block.Hash().Hex(), blockH.Hash().Hex())\n\t}\n\t// Get header by number\n\theader, err := ec.HeaderByNumber(context.Background(), new(big.Int).SetUint64(blockNumber))\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif block.Header().Hash() != header.Hash() {\n\t\tt.Fatalf(\"HeaderByNumber returned wrong header: want %v got %v\", block.Header().Hash().Hex(), header.Hash().Hex())\n\t}\n\t// Get header by hash\n\theaderH, err := ec.HeaderByHash(context.Background(), block.Hash())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif block.Header().Hash() != headerH.Hash() {\n\t\tt.Fatalf(\"HeaderByHash returned wrong header: want %v got %v\", block.Header().Hash().Hex(), headerH.Hash().Hex())\n\t}\n}\n\nfunc testStatusFunctions(t *testing.T, client *rpc.Client) {\n\tec := NewClient(client)\n\n\t// Sync progress\n\tprogress, err := ec.SyncProgress(context.Background())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif progress != nil {\n\t\tt.Fatalf(\"unexpected progress: %v\", progress)\n\t}\n\t// NetworkID\n\tnetworkID, err := ec.NetworkID(context.Background())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif networkID.Cmp(big.NewInt(0)) != 0 {\n\t\tt.Fatalf(\"unexpected networkID: %v\", networkID)\n\t}\n\t// SuggestGasPrice (should suggest 1 Gwei)\n\tgasPrice, err := ec.SuggestGasPrice(context.Background())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif gasPrice.Cmp(big.NewInt(1000000000)) != 0 {\n\t\tt.Fatalf(\"unexpected gas price: %v\", gasPrice)\n\t}\n}\n\nfunc testCallContract(t *testing.T, client *rpc.Client) {\n\tec := NewClient(client)\n\n\t// EstimateGas\n\tmsg := ethereum.CallMsg{\n\t\tFrom:     testAddr,\n\t\tTo:       &common.Address{},\n\t\tGas:      21000,\n\t\tGasPrice: big.NewInt(1),\n\t\tValue:    big.NewInt(1),\n\t}\n\tgas, err := ec.EstimateGas(context.Background(), msg)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif gas != 21000 {\n\t\tt.Fatalf(\"unexpected gas price: %v\", gas)\n\t}\n\t// CallContract\n\tif _, err := ec.CallContract(context.Background(), msg, big.NewInt(1)); err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\t// PendingCallCOntract\n\tif _, err := ec.PendingCallContract(context.Background(), msg); err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n}\n\nfunc testAtFunctions(t *testing.T, client *rpc.Client) {\n\tec := NewClient(client)\n\t// send a transaction for some interesting pending status\n\tsendTransaction(ec)\n\ttime.Sleep(100 * time.Millisecond)\n\t// Check pending transaction count\n\tpending, err := ec.PendingTransactionCount(context.Background())\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif pending != 1 {\n\t\tt.Fatalf(\"unexpected pending, wanted 1 got: %v\", pending)\n\t}\n\t// Query balance\n\tbalance, err := ec.BalanceAt(context.Background(), testAddr, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tpenBalance, err := ec.PendingBalanceAt(context.Background(), testAddr)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif balance.Cmp(penBalance) == 0 {\n\t\tt.Fatalf(\"unexpected balance: %v %v\", balance, penBalance)\n\t}\n\t// NonceAt\n\tnonce, err := ec.NonceAt(context.Background(), testAddr, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tpenNonce, err := ec.PendingNonceAt(context.Background(), testAddr)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif penNonce != nonce+1 {\n\t\tt.Fatalf(\"unexpected nonce: %v %v\", nonce, penNonce)\n\t}\n\t// StorageAt\n\tstorage, err := ec.StorageAt(context.Background(), testAddr, common.Hash{}, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tpenStorage, err := ec.PendingStorageAt(context.Background(), testAddr, common.Hash{})\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif !bytes.Equal(storage, penStorage) {\n\t\tt.Fatalf(\"unexpected storage: %v %v\", storage, penStorage)\n\t}\n\t// CodeAt\n\tcode, err := ec.CodeAt(context.Background(), testAddr, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tpenCode, err := ec.PendingCodeAt(context.Background(), testAddr)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tif !bytes.Equal(code, penCode) {\n\t\tt.Fatalf(\"unexpected code: %v %v\", code, penCode)\n\t}\n}\n\nfunc sendTransaction(ec *Client) error {\n\t// Retrieve chainID\n\tchainID, err := ec.ChainID(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Create transaction\n\ttx := types.NewTransaction(0, common.Address{1}, big.NewInt(1), 22000, big.NewInt(1), nil)\n\tsigner := types.LatestSignerForChainID(chainID)\n\tsignature, err := crypto.Sign(signer.Hash(tx).Bytes(), testKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsignedTx, err := tx.WithSignature(signer, signature)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Send transaction\n\treturn ec.SendTransaction(context.Background(), signedTx)\n}\n"
  },
  {
    "path": "ethclient/signer.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage ethclient\n\nimport (\n\t\"errors\"\n\t\"math/big\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n)\n\n// senderFromServer is a types.Signer that remembers the sender address returned by the RPC\n// server. It is stored in the transaction's sender address cache to avoid an additional\n// request in TransactionSender.\ntype senderFromServer struct {\n\taddr      common.Address\n\tblockhash common.Hash\n}\n\nvar errNotCached = errors.New(\"sender not cached\")\n\nfunc setSenderFromServer(tx *types.Transaction, addr common.Address, block common.Hash) {\n\t// Use types.Sender for side-effect to store our signer into the cache.\n\ttypes.Sender(&senderFromServer{addr, block}, tx)\n}\n\nfunc (s *senderFromServer) Equal(other types.Signer) bool {\n\tos, ok := other.(*senderFromServer)\n\treturn ok && os.blockhash == s.blockhash\n}\n\nfunc (s *senderFromServer) Sender(tx *types.Transaction) (common.Address, error) {\n\tif s.blockhash == (common.Hash{}) {\n\t\treturn common.Address{}, errNotCached\n\t}\n\treturn s.addr, nil\n}\n\nfunc (s *senderFromServer) ChainID() *big.Int {\n\tpanic(\"can't sign with senderFromServer\")\n}\nfunc (s *senderFromServer) Hash(tx *types.Transaction) common.Hash {\n\tpanic(\"can't sign with senderFromServer\")\n}\nfunc (s *senderFromServer) SignatureValues(tx *types.Transaction, sig []byte) (R, S, V *big.Int, err error) {\n\tpanic(\"can't sign with senderFromServer\")\n}\n"
  },
  {
    "path": "ethstats/ethstats.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Package ethstats implements the network stats reporting service.\npackage ethstats\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"net/http\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/consensus\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\tethproto \"github.com/ethereum/go-ethereum/eth/protocols/eth\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/les\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/miner\"\n\t\"github.com/ethereum/go-ethereum/node\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n\t\"github.com/gorilla/websocket\"\n)\n\nconst (\n\t// historyUpdateRange is the number of blocks a node should report upon login or\n\t// history request.\n\thistoryUpdateRange = 50\n\n\t// txChanSize is the size of channel listening to NewTxsEvent.\n\t// The number is referenced from the size of tx pool.\n\ttxChanSize = 4096\n\t// chainHeadChanSize is the size of channel listening to ChainHeadEvent.\n\tchainHeadChanSize = 10\n)\n\n// backend encompasses the bare-minimum functionality needed for ethstats reporting\ntype backend interface {\n\tSubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription\n\tSubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription\n\tCurrentHeader() *types.Header\n\tHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error)\n\tGetTd(ctx context.Context, hash common.Hash) *big.Int\n\tStats() (pending int, queued int)\n\tDownloader() *downloader.Downloader\n}\n\n// fullNodeBackend encompasses the functionality necessary for a full node\n// reporting to ethstats\ntype fullNodeBackend interface {\n\tbackend\n\tMiner() *miner.Miner\n\tBlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error)\n\tCurrentBlock() *types.Block\n\tSuggestPrice(ctx context.Context) (*big.Int, error)\n}\n\n// Service implements an Ethereum netstats reporting daemon that pushes local\n// chain statistics up to a monitoring server.\ntype Service struct {\n\tserver  *p2p.Server // Peer-to-peer server to retrieve networking infos\n\tbackend backend\n\tengine  consensus.Engine // Consensus engine to retrieve variadic block fields\n\n\tnode string // Name of the node to display on the monitoring page\n\tpass string // Password to authorize access to the monitoring page\n\thost string // Remote address of the monitoring service\n\n\tpongCh chan struct{} // Pong notifications are fed into this channel\n\thistCh chan []uint64 // History request block numbers are fed into this channel\n\n\theadSub event.Subscription\n\ttxSub   event.Subscription\n}\n\n// connWrapper is a wrapper to prevent concurrent-write or concurrent-read on the\n// websocket.\n//\n// From Gorilla websocket docs:\n//   Connections support one concurrent reader and one concurrent writer.\n//   Applications are responsible for ensuring that no more than one goroutine calls the write methods\n//     - NextWriter, SetWriteDeadline, WriteMessage, WriteJSON, EnableWriteCompression, SetCompressionLevel\n//   concurrently and that no more than one goroutine calls the read methods\n//     - NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler\n//   concurrently.\n//   The Close and WriteControl methods can be called concurrently with all other methods.\ntype connWrapper struct {\n\tconn *websocket.Conn\n\n\trlock sync.Mutex\n\twlock sync.Mutex\n}\n\nfunc newConnectionWrapper(conn *websocket.Conn) *connWrapper {\n\treturn &connWrapper{conn: conn}\n}\n\n// WriteJSON wraps corresponding method on the websocket but is safe for concurrent calling\nfunc (w *connWrapper) WriteJSON(v interface{}) error {\n\tw.wlock.Lock()\n\tdefer w.wlock.Unlock()\n\n\treturn w.conn.WriteJSON(v)\n}\n\n// ReadJSON wraps corresponding method on the websocket but is safe for concurrent calling\nfunc (w *connWrapper) ReadJSON(v interface{}) error {\n\tw.rlock.Lock()\n\tdefer w.rlock.Unlock()\n\n\treturn w.conn.ReadJSON(v)\n}\n\n// Close wraps corresponding method on the websocket but is safe for concurrent calling\nfunc (w *connWrapper) Close() error {\n\t// The Close and WriteControl methods can be called concurrently with all other methods,\n\t// so the mutex is not used here\n\treturn w.conn.Close()\n}\n\n// New returns a monitoring service ready for stats reporting.\nfunc New(node *node.Node, backend backend, engine consensus.Engine, url string) error {\n\t// Parse the netstats connection url\n\tre := regexp.MustCompile(\"([^:@]*)(:([^@]*))?@(.+)\")\n\tparts := re.FindStringSubmatch(url)\n\tif len(parts) != 5 {\n\t\treturn fmt.Errorf(\"invalid netstats url: \\\"%s\\\", should be nodename:secret@host:port\", url)\n\t}\n\tethstats := &Service{\n\t\tbackend: backend,\n\t\tengine:  engine,\n\t\tserver:  node.Server(),\n\t\tnode:    parts[1],\n\t\tpass:    parts[3],\n\t\thost:    parts[4],\n\t\tpongCh:  make(chan struct{}),\n\t\thistCh:  make(chan []uint64, 1),\n\t}\n\n\tnode.RegisterLifecycle(ethstats)\n\treturn nil\n}\n\n// Start implements node.Lifecycle, starting up the monitoring and reporting daemon.\nfunc (s *Service) Start() error {\n\t// Subscribe to chain events to execute updates on\n\tchainHeadCh := make(chan core.ChainHeadEvent, chainHeadChanSize)\n\ts.headSub = s.backend.SubscribeChainHeadEvent(chainHeadCh)\n\ttxEventCh := make(chan core.NewTxsEvent, txChanSize)\n\ts.txSub = s.backend.SubscribeNewTxsEvent(txEventCh)\n\tgo s.loop(chainHeadCh, txEventCh)\n\n\tlog.Info(\"Stats daemon started\")\n\treturn nil\n}\n\n// Stop implements node.Lifecycle, terminating the monitoring and reporting daemon.\nfunc (s *Service) Stop() error {\n\ts.headSub.Unsubscribe()\n\ts.txSub.Unsubscribe()\n\tlog.Info(\"Stats daemon stopped\")\n\treturn nil\n}\n\n// loop keeps trying to connect to the netstats server, reporting chain events\n// until termination.\nfunc (s *Service) loop(chainHeadCh chan core.ChainHeadEvent, txEventCh chan core.NewTxsEvent) {\n\t// Start a goroutine that exhausts the subscriptions to avoid events piling up\n\tvar (\n\t\tquitCh = make(chan struct{})\n\t\theadCh = make(chan *types.Block, 1)\n\t\ttxCh   = make(chan struct{}, 1)\n\t)\n\tgo func() {\n\t\tvar lastTx mclock.AbsTime\n\n\tHandleLoop:\n\t\tfor {\n\t\t\tselect {\n\t\t\t// Notify of chain head events, but drop if too frequent\n\t\t\tcase head := <-chainHeadCh:\n\t\t\t\tselect {\n\t\t\t\tcase headCh <- head.Block:\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t// Notify of new transaction events, but drop if too frequent\n\t\t\tcase <-txEventCh:\n\t\t\t\tif time.Duration(mclock.Now()-lastTx) < time.Second {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tlastTx = mclock.Now()\n\n\t\t\t\tselect {\n\t\t\t\tcase txCh <- struct{}{}:\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t// node stopped\n\t\t\tcase <-s.txSub.Err():\n\t\t\t\tbreak HandleLoop\n\t\t\tcase <-s.headSub.Err():\n\t\t\t\tbreak HandleLoop\n\t\t\t}\n\t\t}\n\t\tclose(quitCh)\n\t}()\n\n\t// Resolve the URL, defaulting to TLS, but falling back to none too\n\tpath := fmt.Sprintf(\"%s/api\", s.host)\n\turls := []string{path}\n\n\t// url.Parse and url.IsAbs is unsuitable (https://github.com/golang/go/issues/19779)\n\tif !strings.Contains(path, \"://\") {\n\t\turls = []string{\"wss://\" + path, \"ws://\" + path}\n\t}\n\n\terrTimer := time.NewTimer(0)\n\tdefer errTimer.Stop()\n\t// Loop reporting until termination\n\tfor {\n\t\tselect {\n\t\tcase <-quitCh:\n\t\t\treturn\n\t\tcase <-errTimer.C:\n\t\t\t// Establish a websocket connection to the server on any supported URL\n\t\t\tvar (\n\t\t\t\tconn *connWrapper\n\t\t\t\terr  error\n\t\t\t)\n\t\t\tdialer := websocket.Dialer{HandshakeTimeout: 5 * time.Second}\n\t\t\theader := make(http.Header)\n\t\t\theader.Set(\"origin\", \"http://localhost\")\n\t\t\tfor _, url := range urls {\n\t\t\t\tc, _, e := dialer.Dial(url, header)\n\t\t\t\terr = e\n\t\t\t\tif err == nil {\n\t\t\t\t\tconn = newConnectionWrapper(c)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tlog.Warn(\"Stats server unreachable\", \"err\", err)\n\t\t\t\terrTimer.Reset(10 * time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Authenticate the client with the server\n\t\t\tif err = s.login(conn); err != nil {\n\t\t\t\tlog.Warn(\"Stats login failed\", \"err\", err)\n\t\t\t\tconn.Close()\n\t\t\t\terrTimer.Reset(10 * time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo s.readLoop(conn)\n\n\t\t\t// Send the initial stats so our node looks decent from the get go\n\t\t\tif err = s.report(conn); err != nil {\n\t\t\t\tlog.Warn(\"Initial stats report failed\", \"err\", err)\n\t\t\t\tconn.Close()\n\t\t\t\terrTimer.Reset(0)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Keep sending status updates until the connection breaks\n\t\t\tfullReport := time.NewTicker(15 * time.Second)\n\n\t\t\tfor err == nil {\n\t\t\t\tselect {\n\t\t\t\tcase <-quitCh:\n\t\t\t\t\tfullReport.Stop()\n\t\t\t\t\t// Make sure the connection is closed\n\t\t\t\t\tconn.Close()\n\t\t\t\t\treturn\n\n\t\t\t\tcase <-fullReport.C:\n\t\t\t\t\tif err = s.report(conn); err != nil {\n\t\t\t\t\t\tlog.Warn(\"Full stats report failed\", \"err\", err)\n\t\t\t\t\t}\n\t\t\t\tcase list := <-s.histCh:\n\t\t\t\t\tif err = s.reportHistory(conn, list); err != nil {\n\t\t\t\t\t\tlog.Warn(\"Requested history report failed\", \"err\", err)\n\t\t\t\t\t}\n\t\t\t\tcase head := <-headCh:\n\t\t\t\t\tif err = s.reportBlock(conn, head); err != nil {\n\t\t\t\t\t\tlog.Warn(\"Block stats report failed\", \"err\", err)\n\t\t\t\t\t}\n\t\t\t\t\tif err = s.reportPending(conn); err != nil {\n\t\t\t\t\t\tlog.Warn(\"Post-block transaction stats report failed\", \"err\", err)\n\t\t\t\t\t}\n\t\t\t\tcase <-txCh:\n\t\t\t\t\tif err = s.reportPending(conn); err != nil {\n\t\t\t\t\t\tlog.Warn(\"Transaction stats report failed\", \"err\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfullReport.Stop()\n\n\t\t\t// Close the current connection and establish a new one\n\t\t\tconn.Close()\n\t\t\terrTimer.Reset(0)\n\t\t}\n\t}\n}\n\n// readLoop loops as long as the connection is alive and retrieves data packets\n// from the network socket. If any of them match an active request, it forwards\n// it, if they themselves are requests it initiates a reply, and lastly it drops\n// unknown packets.\nfunc (s *Service) readLoop(conn *connWrapper) {\n\t// If the read loop exists, close the connection\n\tdefer conn.Close()\n\n\tfor {\n\t\t// Retrieve the next generic network packet and bail out on error\n\t\tvar blob json.RawMessage\n\t\tif err := conn.ReadJSON(&blob); err != nil {\n\t\t\tlog.Warn(\"Failed to retrieve stats server message\", \"err\", err)\n\t\t\treturn\n\t\t}\n\t\t// If the network packet is a system ping, respond to it directly\n\t\tvar ping string\n\t\tif err := json.Unmarshal(blob, &ping); err == nil && strings.HasPrefix(ping, \"primus::ping::\") {\n\t\t\tif err := conn.WriteJSON(strings.Replace(ping, \"ping\", \"pong\", -1)); err != nil {\n\t\t\t\tlog.Warn(\"Failed to respond to system ping message\", \"err\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// Not a system ping, try to decode an actual state message\n\t\tvar msg map[string][]interface{}\n\t\tif err := json.Unmarshal(blob, &msg); err != nil {\n\t\t\tlog.Warn(\"Failed to decode stats server message\", \"err\", err)\n\t\t\treturn\n\t\t}\n\t\tlog.Trace(\"Received message from stats server\", \"msg\", msg)\n\t\tif len(msg[\"emit\"]) == 0 {\n\t\t\tlog.Warn(\"Stats server sent non-broadcast\", \"msg\", msg)\n\t\t\treturn\n\t\t}\n\t\tcommand, ok := msg[\"emit\"][0].(string)\n\t\tif !ok {\n\t\t\tlog.Warn(\"Invalid stats server message type\", \"type\", msg[\"emit\"][0])\n\t\t\treturn\n\t\t}\n\t\t// If the message is a ping reply, deliver (someone must be listening!)\n\t\tif len(msg[\"emit\"]) == 2 && command == \"node-pong\" {\n\t\t\tselect {\n\t\t\tcase s.pongCh <- struct{}{}:\n\t\t\t\t// Pong delivered, continue listening\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t\t// Ping routine dead, abort\n\t\t\t\tlog.Warn(\"Stats server pinger seems to have died\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\t// If the message is a history request, forward to the event processor\n\t\tif len(msg[\"emit\"]) == 2 && command == \"history\" {\n\t\t\t// Make sure the request is valid and doesn't crash us\n\t\t\trequest, ok := msg[\"emit\"][1].(map[string]interface{})\n\t\t\tif !ok {\n\t\t\t\tlog.Warn(\"Invalid stats history request\", \"msg\", msg[\"emit\"][1])\n\t\t\t\tselect {\n\t\t\t\tcase s.histCh <- nil: // Treat it as an no indexes request\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlist, ok := request[\"list\"].([]interface{})\n\t\t\tif !ok {\n\t\t\t\tlog.Warn(\"Invalid stats history block list\", \"list\", request[\"list\"])\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// Convert the block number list to an integer list\n\t\t\tnumbers := make([]uint64, len(list))\n\t\t\tfor i, num := range list {\n\t\t\t\tn, ok := num.(float64)\n\t\t\t\tif !ok {\n\t\t\t\t\tlog.Warn(\"Invalid stats history block number\", \"number\", num)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tnumbers[i] = uint64(n)\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase s.histCh <- numbers:\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t\t// Report anything else and continue\n\t\tlog.Info(\"Unknown stats message\", \"msg\", msg)\n\t}\n}\n\n// nodeInfo is the collection of meta information about a node that is displayed\n// on the monitoring page.\ntype nodeInfo struct {\n\tName     string `json:\"name\"`\n\tNode     string `json:\"node\"`\n\tPort     int    `json:\"port\"`\n\tNetwork  string `json:\"net\"`\n\tProtocol string `json:\"protocol\"`\n\tAPI      string `json:\"api\"`\n\tOs       string `json:\"os\"`\n\tOsVer    string `json:\"os_v\"`\n\tClient   string `json:\"client\"`\n\tHistory  bool   `json:\"canUpdateHistory\"`\n}\n\n// authMsg is the authentication infos needed to login to a monitoring server.\ntype authMsg struct {\n\tID     string   `json:\"id\"`\n\tInfo   nodeInfo `json:\"info\"`\n\tSecret string   `json:\"secret\"`\n}\n\n// login tries to authorize the client at the remote server.\nfunc (s *Service) login(conn *connWrapper) error {\n\t// Construct and send the login authentication\n\tinfos := s.server.NodeInfo()\n\n\tvar protocols []string\n\tfor _, proto := range s.server.Protocols {\n\t\tprotocols = append(protocols, fmt.Sprintf(\"%s/%d\", proto.Name, proto.Version))\n\t}\n\tvar network string\n\tif info := infos.Protocols[\"eth\"]; info != nil {\n\t\tnetwork = fmt.Sprintf(\"%d\", info.(*ethproto.NodeInfo).Network)\n\t} else {\n\t\tnetwork = fmt.Sprintf(\"%d\", infos.Protocols[\"les\"].(*les.NodeInfo).Network)\n\t}\n\tauth := &authMsg{\n\t\tID: s.node,\n\t\tInfo: nodeInfo{\n\t\t\tName:     s.node,\n\t\t\tNode:     infos.Name,\n\t\t\tPort:     infos.Ports.Listener,\n\t\t\tNetwork:  network,\n\t\t\tProtocol: strings.Join(protocols, \", \"),\n\t\t\tAPI:      \"No\",\n\t\t\tOs:       runtime.GOOS,\n\t\t\tOsVer:    runtime.GOARCH,\n\t\t\tClient:   \"0.1.1\",\n\t\t\tHistory:  true,\n\t\t},\n\t\tSecret: s.pass,\n\t}\n\tlogin := map[string][]interface{}{\n\t\t\"emit\": {\"hello\", auth},\n\t}\n\tif err := conn.WriteJSON(login); err != nil {\n\t\treturn err\n\t}\n\t// Retrieve the remote ack or connection termination\n\tvar ack map[string][]string\n\tif err := conn.ReadJSON(&ack); err != nil || len(ack[\"emit\"]) != 1 || ack[\"emit\"][0] != \"ready\" {\n\t\treturn errors.New(\"unauthorized\")\n\t}\n\treturn nil\n}\n\n// report collects all possible data to report and send it to the stats server.\n// This should only be used on reconnects or rarely to avoid overloading the\n// server. Use the individual methods for reporting subscribed events.\nfunc (s *Service) report(conn *connWrapper) error {\n\tif err := s.reportLatency(conn); err != nil {\n\t\treturn err\n\t}\n\tif err := s.reportBlock(conn, nil); err != nil {\n\t\treturn err\n\t}\n\tif err := s.reportPending(conn); err != nil {\n\t\treturn err\n\t}\n\tif err := s.reportStats(conn); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// reportLatency sends a ping request to the server, measures the RTT time and\n// finally sends a latency update.\nfunc (s *Service) reportLatency(conn *connWrapper) error {\n\t// Send the current time to the ethstats server\n\tstart := time.Now()\n\n\tping := map[string][]interface{}{\n\t\t\"emit\": {\"node-ping\", map[string]string{\n\t\t\t\"id\":         s.node,\n\t\t\t\"clientTime\": start.String(),\n\t\t}},\n\t}\n\tif err := conn.WriteJSON(ping); err != nil {\n\t\treturn err\n\t}\n\t// Wait for the pong request to arrive back\n\tselect {\n\tcase <-s.pongCh:\n\t\t// Pong delivered, report the latency\n\tcase <-time.After(5 * time.Second):\n\t\t// Ping timeout, abort\n\t\treturn errors.New(\"ping timed out\")\n\t}\n\tlatency := strconv.Itoa(int((time.Since(start) / time.Duration(2)).Nanoseconds() / 1000000))\n\n\t// Send back the measured latency\n\tlog.Trace(\"Sending measured latency to ethstats\", \"latency\", latency)\n\n\tstats := map[string][]interface{}{\n\t\t\"emit\": {\"latency\", map[string]string{\n\t\t\t\"id\":      s.node,\n\t\t\t\"latency\": latency,\n\t\t}},\n\t}\n\treturn conn.WriteJSON(stats)\n}\n\n// blockStats is the information to report about individual blocks.\ntype blockStats struct {\n\tNumber     *big.Int       `json:\"number\"`\n\tHash       common.Hash    `json:\"hash\"`\n\tParentHash common.Hash    `json:\"parentHash\"`\n\tTimestamp  *big.Int       `json:\"timestamp\"`\n\tMiner      common.Address `json:\"miner\"`\n\tGasUsed    uint64         `json:\"gasUsed\"`\n\tGasLimit   uint64         `json:\"gasLimit\"`\n\tDiff       string         `json:\"difficulty\"`\n\tTotalDiff  string         `json:\"totalDifficulty\"`\n\tTxs        []txStats      `json:\"transactions\"`\n\tTxHash     common.Hash    `json:\"transactionsRoot\"`\n\tRoot       common.Hash    `json:\"stateRoot\"`\n\tUncles     uncleStats     `json:\"uncles\"`\n}\n\n// txStats is the information to report about individual transactions.\ntype txStats struct {\n\tHash common.Hash `json:\"hash\"`\n}\n\n// uncleStats is a custom wrapper around an uncle array to force serializing\n// empty arrays instead of returning null for them.\ntype uncleStats []*types.Header\n\nfunc (s uncleStats) MarshalJSON() ([]byte, error) {\n\tif uncles := ([]*types.Header)(s); len(uncles) > 0 {\n\t\treturn json.Marshal(uncles)\n\t}\n\treturn []byte(\"[]\"), nil\n}\n\n// reportBlock retrieves the current chain head and reports it to the stats server.\nfunc (s *Service) reportBlock(conn *connWrapper, block *types.Block) error {\n\t// Gather the block details from the header or block chain\n\tdetails := s.assembleBlockStats(block)\n\n\t// Assemble the block report and send it to the server\n\tlog.Trace(\"Sending new block to ethstats\", \"number\", details.Number, \"hash\", details.Hash)\n\n\tstats := map[string]interface{}{\n\t\t\"id\":    s.node,\n\t\t\"block\": details,\n\t}\n\treport := map[string][]interface{}{\n\t\t\"emit\": {\"block\", stats},\n\t}\n\treturn conn.WriteJSON(report)\n}\n\n// assembleBlockStats retrieves any required metadata to report a single block\n// and assembles the block stats. If block is nil, the current head is processed.\nfunc (s *Service) assembleBlockStats(block *types.Block) *blockStats {\n\t// Gather the block infos from the local blockchain\n\tvar (\n\t\theader *types.Header\n\t\ttd     *big.Int\n\t\ttxs    []txStats\n\t\tuncles []*types.Header\n\t)\n\n\t// check if backend is a full node\n\tfullBackend, ok := s.backend.(fullNodeBackend)\n\tif ok {\n\t\tif block == nil {\n\t\t\tblock = fullBackend.CurrentBlock()\n\t\t}\n\t\theader = block.Header()\n\t\ttd = fullBackend.GetTd(context.Background(), header.Hash())\n\n\t\ttxs = make([]txStats, len(block.Transactions()))\n\t\tfor i, tx := range block.Transactions() {\n\t\t\ttxs[i].Hash = tx.Hash()\n\t\t}\n\t\tuncles = block.Uncles()\n\t} else {\n\t\t// Light nodes would need on-demand lookups for transactions/uncles, skip\n\t\tif block != nil {\n\t\t\theader = block.Header()\n\t\t} else {\n\t\t\theader = s.backend.CurrentHeader()\n\t\t}\n\t\ttd = s.backend.GetTd(context.Background(), header.Hash())\n\t\ttxs = []txStats{}\n\t}\n\n\t// Assemble and return the block stats\n\tauthor, _ := s.engine.Author(header)\n\n\treturn &blockStats{\n\t\tNumber:     header.Number,\n\t\tHash:       header.Hash(),\n\t\tParentHash: header.ParentHash,\n\t\tTimestamp:  new(big.Int).SetUint64(header.Time),\n\t\tMiner:      author,\n\t\tGasUsed:    header.GasUsed,\n\t\tGasLimit:   header.GasLimit,\n\t\tDiff:       header.Difficulty.String(),\n\t\tTotalDiff:  td.String(),\n\t\tTxs:        txs,\n\t\tTxHash:     header.TxHash,\n\t\tRoot:       header.Root,\n\t\tUncles:     uncles,\n\t}\n}\n\n// reportHistory retrieves the most recent batch of blocks and reports it to the\n// stats server.\nfunc (s *Service) reportHistory(conn *connWrapper, list []uint64) error {\n\t// Figure out the indexes that need reporting\n\tindexes := make([]uint64, 0, historyUpdateRange)\n\tif len(list) > 0 {\n\t\t// Specific indexes requested, send them back in particular\n\t\tindexes = append(indexes, list...)\n\t} else {\n\t\t// No indexes requested, send back the top ones\n\t\thead := s.backend.CurrentHeader().Number.Int64()\n\t\tstart := head - historyUpdateRange + 1\n\t\tif start < 0 {\n\t\t\tstart = 0\n\t\t}\n\t\tfor i := uint64(start); i <= uint64(head); i++ {\n\t\t\tindexes = append(indexes, i)\n\t\t}\n\t}\n\t// Gather the batch of blocks to report\n\thistory := make([]*blockStats, len(indexes))\n\tfor i, number := range indexes {\n\t\tfullBackend, ok := s.backend.(fullNodeBackend)\n\t\t// Retrieve the next block if it's known to us\n\t\tvar block *types.Block\n\t\tif ok {\n\t\t\tblock, _ = fullBackend.BlockByNumber(context.Background(), rpc.BlockNumber(number)) // TODO ignore error here ?\n\t\t} else {\n\t\t\tif header, _ := s.backend.HeaderByNumber(context.Background(), rpc.BlockNumber(number)); header != nil {\n\t\t\t\tblock = types.NewBlockWithHeader(header)\n\t\t\t}\n\t\t}\n\t\t// If we do have the block, add to the history and continue\n\t\tif block != nil {\n\t\t\thistory[len(history)-1-i] = s.assembleBlockStats(block)\n\t\t\tcontinue\n\t\t}\n\t\t// Ran out of blocks, cut the report short and send\n\t\thistory = history[len(history)-i:]\n\t\tbreak\n\t}\n\t// Assemble the history report and send it to the server\n\tif len(history) > 0 {\n\t\tlog.Trace(\"Sending historical blocks to ethstats\", \"first\", history[0].Number, \"last\", history[len(history)-1].Number)\n\t} else {\n\t\tlog.Trace(\"No history to send to stats server\")\n\t}\n\tstats := map[string]interface{}{\n\t\t\"id\":      s.node,\n\t\t\"history\": history,\n\t}\n\treport := map[string][]interface{}{\n\t\t\"emit\": {\"history\", stats},\n\t}\n\treturn conn.WriteJSON(report)\n}\n\n// pendStats is the information to report about pending transactions.\ntype pendStats struct {\n\tPending int `json:\"pending\"`\n}\n\n// reportPending retrieves the current number of pending transactions and reports\n// it to the stats server.\nfunc (s *Service) reportPending(conn *connWrapper) error {\n\t// Retrieve the pending count from the local blockchain\n\tpending, _ := s.backend.Stats()\n\t// Assemble the transaction stats and send it to the server\n\tlog.Trace(\"Sending pending transactions to ethstats\", \"count\", pending)\n\n\tstats := map[string]interface{}{\n\t\t\"id\": s.node,\n\t\t\"stats\": &pendStats{\n\t\t\tPending: pending,\n\t\t},\n\t}\n\treport := map[string][]interface{}{\n\t\t\"emit\": {\"pending\", stats},\n\t}\n\treturn conn.WriteJSON(report)\n}\n\n// nodeStats is the information to report about the local node.\ntype nodeStats struct {\n\tActive   bool `json:\"active\"`\n\tSyncing  bool `json:\"syncing\"`\n\tMining   bool `json:\"mining\"`\n\tHashrate int  `json:\"hashrate\"`\n\tPeers    int  `json:\"peers\"`\n\tGasPrice int  `json:\"gasPrice\"`\n\tUptime   int  `json:\"uptime\"`\n}\n\n// reportStats retrieves various stats about the node at the networking and\n// mining layer and reports it to the stats server.\nfunc (s *Service) reportStats(conn *connWrapper) error {\n\t// Gather the syncing and mining infos from the local miner instance\n\tvar (\n\t\tmining   bool\n\t\thashrate int\n\t\tsyncing  bool\n\t\tgasprice int\n\t)\n\t// check if backend is a full node\n\tfullBackend, ok := s.backend.(fullNodeBackend)\n\tif ok {\n\t\tmining = fullBackend.Miner().Mining()\n\t\thashrate = int(fullBackend.Miner().Hashrate())\n\n\t\tsync := fullBackend.Downloader().Progress()\n\t\tsyncing = fullBackend.CurrentHeader().Number.Uint64() >= sync.HighestBlock\n\n\t\tprice, _ := fullBackend.SuggestPrice(context.Background())\n\t\tgasprice = int(price.Uint64())\n\t} else {\n\t\tsync := s.backend.Downloader().Progress()\n\t\tsyncing = s.backend.CurrentHeader().Number.Uint64() >= sync.HighestBlock\n\t}\n\t// Assemble the node stats and send it to the server\n\tlog.Trace(\"Sending node details to ethstats\")\n\n\tstats := map[string]interface{}{\n\t\t\"id\": s.node,\n\t\t\"stats\": &nodeStats{\n\t\t\tActive:   true,\n\t\t\tMining:   mining,\n\t\t\tHashrate: hashrate,\n\t\t\tPeers:    s.server.PeerCount(),\n\t\t\tGasPrice: gasprice,\n\t\t\tSyncing:  syncing,\n\t\t\tUptime:   100,\n\t\t},\n\t}\n\treport := map[string][]interface{}{\n\t\t\"emit\": {\"stats\", stats},\n\t}\n\treturn conn.WriteJSON(report)\n}\n"
  },
  {
    "path": "ffi-toolkit/Cargo.toml",
    "content": "[package]\nname = \"ffi-toolkit\"\nversion = \"0.1.0\"\nauthors = [\"laser <l@s3r.com>\"]\nlicense = \"MIT OR Apache-2.0\"\n\nedition = \"2018\"\n\n[dependencies]\nlibc = \"0.2\"\n"
  },
  {
    "path": "ffi-toolkit/LICENSE-APACHE",
    "content": "                              Apache License\n                        Version 2.0, January 2004\n                     http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n   \"License\" shall mean the terms and conditions for use, reproduction,\n   and distribution as defined by Sections 1 through 9 of this document.\n\n   \"Licensor\" shall mean the copyright owner or entity authorized by\n   the copyright owner that is granting the License.\n\n   \"Legal Entity\" shall mean the union of the acting entity and all\n   other entities that control, are controlled by, or are under common\n   control with that entity. For the purposes of this definition,\n   \"control\" means (i) the power, direct or indirect, to cause the\n   direction or management of such entity, whether by contract or\n   otherwise, or (ii) ownership of fifty percent (50%) or more of the\n   outstanding shares, or (iii) beneficial ownership of such entity.\n\n   \"You\" (or \"Your\") shall mean an individual or Legal Entity\n   exercising permissions granted by this License.\n\n   \"Source\" form shall mean the preferred form for making modifications,\n   including but not limited to software source code, documentation\n   source, and configuration files.\n\n   \"Object\" form shall mean any form resulting from mechanical\n   transformation or translation of a Source form, including but\n   not limited to compiled object code, generated documentation,\n   and conversions to other media types.\n\n   \"Work\" shall mean the work of authorship, whether in Source or\n   Object form, made available under the License, as indicated by a\n   copyright notice that is included in or attached to the work\n   (an example is provided in the Appendix below).\n\n   \"Derivative Works\" shall mean any work, whether in Source or Object\n   form, that is based on (or derived from) the Work and for which the\n   editorial revisions, annotations, elaborations, or other modifications\n   represent, as a whole, an original work of authorship. For the purposes\n   of this License, Derivative Works shall not include works that remain\n   separable from, or merely link (or bind by name) to the interfaces of,\n   the Work and Derivative Works thereof.\n\n   \"Contribution\" shall mean any work of authorship, including\n   the original version of the Work and any modifications or additions\n   to that Work or Derivative Works thereof, that is intentionally\n   submitted to Licensor for inclusion in the Work by the copyright owner\n   or by an individual or Legal Entity authorized to submit on behalf of\n   the copyright owner. For the purposes of this definition, \"submitted\"\n   means any form of electronic, verbal, or written communication sent\n   to the Licensor or its representatives, including but not limited to\n   communication on electronic mailing lists, source code control systems,\n   and issue tracking systems that are managed by, or on behalf of, the\n   Licensor for the purpose of discussing and improving the Work, but\n   excluding communication that is conspicuously marked or otherwise\n   designated in writing by the copyright owner as \"Not a Contribution.\"\n\n   \"Contributor\" shall mean Licensor and any individual or Legal Entity\n   on behalf of whom a Contribution has been received by Licensor and\n   subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   copyright license to reproduce, prepare Derivative Works of,\n   publicly display, publicly perform, sublicense, and distribute the\n   Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   (except as stated in this section) patent license to make, have made,\n   use, offer to sell, sell, import, and otherwise transfer the Work,\n   where such license applies only to those patent claims licensable\n   by such Contributor that are necessarily infringed by their\n   Contribution(s) alone or by combination of their Contribution(s)\n   with the Work to which such Contribution(s) was submitted. If You\n   institute patent litigation against any entity (including a\n   cross-claim or counterclaim in a lawsuit) alleging that the Work\n   or a Contribution incorporated within the Work constitutes direct\n   or contributory patent infringement, then any patent licenses\n   granted to You under this License for that Work shall terminate\n   as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n   Work or Derivative Works thereof in any medium, with or without\n   modifications, and in Source or Object form, provided that You\n   meet the following conditions:\n\n   (a) You must give any other recipients of the Work or\n       Derivative Works a copy of this License; and\n\n   (b) You must cause any modified files to carry prominent notices\n       stating that You changed the files; and\n\n   (c) You must retain, in the Source form of any Derivative Works\n       that You distribute, all copyright, patent, trademark, and\n       attribution notices from the Source form of the Work,\n       excluding those notices that do not pertain to any part of\n       the Derivative Works; and\n\n   (d) If the Work includes a \"NOTICE\" text file as part of its\n       distribution, then any Derivative Works that You distribute must\n       include a readable copy of the attribution notices contained\n       within such NOTICE file, excluding those notices that do not\n       pertain to any part of the Derivative Works, in at least one\n       of the following places: within a NOTICE text file distributed\n       as part of the Derivative Works; within the Source form or\n       documentation, if provided along with the Derivative Works; or,\n       within a display generated by the Derivative Works, if and\n       wherever such third-party notices normally appear. The contents\n       of the NOTICE file are for informational purposes only and\n       do not modify the License. You may add Your own attribution\n       notices within Derivative Works that You distribute, alongside\n       or as an addendum to the NOTICE text from the Work, provided\n       that such additional attribution notices cannot be construed\n       as modifying the License.\n\n   You may add Your own copyright statement to Your modifications and\n   may provide additional or different license terms and conditions\n   for use, reproduction, or distribution of Your modifications, or\n   for any such Derivative Works as a whole, provided Your use,\n   reproduction, and distribution of the Work otherwise complies with\n   the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n   any Contribution intentionally submitted for inclusion in the Work\n   by You to the Licensor shall be under the terms and conditions of\n   this License, without any additional terms or conditions.\n   Notwithstanding the above, nothing herein shall supersede or modify\n   the terms of any separate license agreement you may have executed\n   with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n   names, trademarks, service marks, or product names of the Licensor,\n   except as required for reasonable and customary use in describing the\n   origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n   agreed to in writing, Licensor provides the Work (and each\n   Contributor provides its Contributions) on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n   implied, including, without limitation, any warranties or conditions\n   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n   PARTICULAR PURPOSE. You are solely responsible for determining the\n   appropriateness of using or redistributing the Work and assume any\n   risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n   whether in tort (including negligence), contract, or otherwise,\n   unless required by applicable law (such as deliberate and grossly\n   negligent acts) or agreed to in writing, shall any Contributor be\n   liable to You for damages, including any direct, indirect, special,\n   incidental, or consequential damages of any character arising as a\n   result of this License or out of the use or inability to use the\n   Work (including but not limited to damages for loss of goodwill,\n   work stoppage, computer failure or malfunction, or any and all\n   other commercial damages or losses), even if such Contributor\n   has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n   the Work or Derivative Works thereof, You may choose to offer,\n   and charge a fee for, acceptance of support, warranty, indemnity,\n   or other liability obligations and/or rights consistent with this\n   License. However, in accepting such obligations, You may act only\n   on Your own behalf and on Your sole responsibility, not on behalf\n   of any other Contributor, and only if You agree to indemnify,\n   defend, and hold each Contributor harmless for any liability\n   incurred by, or claims asserted against, such Contributor by reason\n   of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n   To apply the Apache License to your work, attach the following\n   boilerplate notice, with the fields enclosed by brackets \"[]\"\n   replaced with your own identifying information. (Don't include\n   the brackets!)  The text should be enclosed in the appropriate\n   comment syntax for the file format. We also recommend that a\n   file or class name and description of purpose be included on the\n   same \"printed page\" as the copyright notice for easier\n   identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License."
  },
  {
    "path": "ffi-toolkit/LICENSE-MIT",
    "content": "Permission is hereby granted, free of charge, to any\nperson obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the\nSoftware without restriction, including without\nlimitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice\nshall be included in all copies or substantial portions\nof the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\nANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\nSHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\nIN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE."
  },
  {
    "path": "ffi-toolkit/README.md",
    "content": "# FFI Toolkit\n\nA collection of functions useful for working with the Rust FFI.\n\n## License\n\nMIT or Apache 2.0\n"
  },
  {
    "path": "ffi-toolkit/src/lib.rs",
    "content": "extern crate libc;\n\nuse std::borrow::Cow;\nuse std::ffi::{CStr, CString};\nuse std::path::PathBuf;\n\n// produce a C string from a Rust string\npub fn rust_str_to_c_str<T: Into<String>>(s: T) -> *mut libc::c_char {\n    CString::new(s.into()).unwrap().into_raw()\n}\n\n// consume a C string-pointer and free its memory\npub unsafe fn free_c_str(ptr: *mut libc::c_char) {\n    if !ptr.is_null() {\n        let _ = CString::from_raw(ptr);\n    }\n}\n\n// return a forgotten raw pointer to something of type T\npub fn raw_ptr<T>(thing: T) -> *mut T {\n    Box::into_raw(Box::new(thing))\n}\n\n// transmutes a C string to a copy-on-write Rust string\npub unsafe fn c_str_to_rust_str<'a>(x: *const libc::c_char) -> Cow<'a, str> {\n    use std::borrow::Cow;\n    if x.is_null() {\n        Cow::from(\"\")\n    } else {\n        CStr::from_ptr(x).to_string_lossy()\n    }\n}\n\n// cast from mutable to constant reference\npub unsafe fn cast_const<'a, T>(x: *mut T) -> &'a T {\n    assert!(!x.is_null(), \"Object argument was null\");\n    (&(*x))\n}\n\n// transmutes a C string to a PathBuf\npub unsafe fn c_str_to_pbuf(x: *const libc::c_char) -> PathBuf {\n    PathBuf::from(String::from(c_str_to_rust_str(x)))\n}\n"
  },
  {
    "path": "fil-proofs-param/Cargo.toml",
    "content": "[package]\nname = \"fil-proofs-param\"\ndescription = \"Filecoin parameter cli tools.\"\nversion = \"2.0.1\"\nauthors = [\"dignifiedquire <dignifiedquire@gmail.com>\", \"laser <l@s3r.com>\", \"porcuquine <porcuquine@users.noreply.github.com>\"]\nlicense = \"MIT OR Apache-2.0\"\nedition = \"2018\"\nrepository = \"https://github.com/filecoin-project/rust-fil-proofs\"\nreadme = \"README.md\"\n\n[dependencies]\nstorage-proofs-core = { path = \"../storage-proofs-core\", version = \"^7.0.0\", default-features = false}\nstorage-proofs-porep = { path = \"../storage-proofs-porep\", version = \"^7.0.0\", default-features = false }\nstorage-proofs-post = { path = \"../storage-proofs-post\", version = \"^7.0.0\", default-features = false }\nfilecoin-hashers = { version = \"^2.0.0\", path = \"../filecoin-hashers\", default-features = false, features = [\"poseidon\", \"sha256\"] }\nfilecoin-proofs = { version = \"^7.0.0\", path = \"../filecoin-proofs\", default-features = false }\nbitvec = \"0.17\"\nrand = \"0.7\"\nlazy_static = \"1.2\"\nmemmap = \"0.7\"\npbr = \"1.0\"\nbyteorder = \"1\"\nitertools = \"0.9\"\nserde = { version = \"1.0\", features = [\"rc\", \"derive\"] }\nserde_json = \"1.0\"\nff = { version = \"0.2.3\", package = \"fff\" }\nblake2b_simd = \"0.5\"\nbellperson = { version = \"0.13\", default-features = false }\nlog = \"0.4.7\"\nfil_logger = \"0.1\"\nenv_proxy = \"0.4\"\nflate2 = { version = \"1.0.9\", features = [\"rust_backend\"]}\ntar = \"0.4.26\"\nrayon = \"1.1.0\"\nblake2s_simd = \"0.5.8\"\nhex = \"0.4.0\"\nmerkletree = \"0.21.0\"\nbincode = \"1.1.2\"\nanyhow = \"1.0.23\"\nrand_xorshift = \"0.2.0\"\nsha2 = \"0.9.1\"\ntypenum = \"1.11.2\"\ngperftools = { version = \"0.2\", optional = true }\ngeneric-array = \"0.14.4\"\nstructopt = \"0.3.12\"\nhumansize = \"1.1.0\"\nindicatif = \"0.15.0\"\ngroupy = \"0.3.0\"\ndialoguer = \"0.8.0\"\nclap = \"2.33.3\"\n\n[dependencies.reqwest]\nversion = \"0.10\"\ndefault-features = false\nfeatures = [\"blocking\", \"native-tls-vendored\"]\n\n[dev-dependencies]\ncriterion = \"0.3\"\nrexpect = \"0.4.0\"\npretty_assertions = \"0.6.1\"\nfailure = \"0.1.7\"\ntempfile = \"3\"\n\n[features]\ndefault = [\"gpu\", \"pairing\"]\ncpu-profile = [\"gperftools\"]\nheap-profile = [\"gperftools/heap\"]\nsimd = [\"storage-proofs-core/simd\"]\nasm = [\"storage-proofs-core/asm\"]\ngpu = [\"storage-proofs-core/gpu\", \"storage-proofs-porep/gpu\", \"storage-proofs-post/gpu\", \"bellperson/gpu\"]\npairing = [\"storage-proofs-core/pairing\", \"storage-proofs-porep/pairing\", \"storage-proofs-post/pairing\", \"bellperson/pairing\"]\nblst = [\"storage-proofs-core/blst\", \"storage-proofs-porep/blst\", \"storage-proofs-post/blst\", \"bellperson/blst\"]\n"
  },
  {
    "path": "fil-proofs-param/LICENSE-APACHE",
    "content": "                              Apache License\n                        Version 2.0, January 2004\n                     http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n   \"License\" shall mean the terms and conditions for use, reproduction,\n   and distribution as defined by Sections 1 through 9 of this document.\n\n   \"Licensor\" shall mean the copyright owner or entity authorized by\n   the copyright owner that is granting the License.\n\n   \"Legal Entity\" shall mean the union of the acting entity and all\n   other entities that control, are controlled by, or are under common\n   control with that entity. For the purposes of this definition,\n   \"control\" means (i) the power, direct or indirect, to cause the\n   direction or management of such entity, whether by contract or\n   otherwise, or (ii) ownership of fifty percent (50%) or more of the\n   outstanding shares, or (iii) beneficial ownership of such entity.\n\n   \"You\" (or \"Your\") shall mean an individual or Legal Entity\n   exercising permissions granted by this License.\n\n   \"Source\" form shall mean the preferred form for making modifications,\n   including but not limited to software source code, documentation\n   source, and configuration files.\n\n   \"Object\" form shall mean any form resulting from mechanical\n   transformation or translation of a Source form, including but\n   not limited to compiled object code, generated documentation,\n   and conversions to other media types.\n\n   \"Work\" shall mean the work of authorship, whether in Source or\n   Object form, made available under the License, as indicated by a\n   copyright notice that is included in or attached to the work\n   (an example is provided in the Appendix below).\n\n   \"Derivative Works\" shall mean any work, whether in Source or Object\n   form, that is based on (or derived from) the Work and for which the\n   editorial revisions, annotations, elaborations, or other modifications\n   represent, as a whole, an original work of authorship. For the purposes\n   of this License, Derivative Works shall not include works that remain\n   separable from, or merely link (or bind by name) to the interfaces of,\n   the Work and Derivative Works thereof.\n\n   \"Contribution\" shall mean any work of authorship, including\n   the original version of the Work and any modifications or additions\n   to that Work or Derivative Works thereof, that is intentionally\n   submitted to Licensor for inclusion in the Work by the copyright owner\n   or by an individual or Legal Entity authorized to submit on behalf of\n   the copyright owner. For the purposes of this definition, \"submitted\"\n   means any form of electronic, verbal, or written communication sent\n   to the Licensor or its representatives, including but not limited to\n   communication on electronic mailing lists, source code control systems,\n   and issue tracking systems that are managed by, or on behalf of, the\n   Licensor for the purpose of discussing and improving the Work, but\n   excluding communication that is conspicuously marked or otherwise\n   designated in writing by the copyright owner as \"Not a Contribution.\"\n\n   \"Contributor\" shall mean Licensor and any individual or Legal Entity\n   on behalf of whom a Contribution has been received by Licensor and\n   subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   copyright license to reproduce, prepare Derivative Works of,\n   publicly display, publicly perform, sublicense, and distribute the\n   Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   (except as stated in this section) patent license to make, have made,\n   use, offer to sell, sell, import, and otherwise transfer the Work,\n   where such license applies only to those patent claims licensable\n   by such Contributor that are necessarily infringed by their\n   Contribution(s) alone or by combination of their Contribution(s)\n   with the Work to which such Contribution(s) was submitted. If You\n   institute patent litigation against any entity (including a\n   cross-claim or counterclaim in a lawsuit) alleging that the Work\n   or a Contribution incorporated within the Work constitutes direct\n   or contributory patent infringement, then any patent licenses\n   granted to You under this License for that Work shall terminate\n   as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n   Work or Derivative Works thereof in any medium, with or without\n   modifications, and in Source or Object form, provided that You\n   meet the following conditions:\n\n   (a) You must give any other recipients of the Work or\n       Derivative Works a copy of this License; and\n\n   (b) You must cause any modified files to carry prominent notices\n       stating that You changed the files; and\n\n   (c) You must retain, in the Source form of any Derivative Works\n       that You distribute, all copyright, patent, trademark, and\n       attribution notices from the Source form of the Work,\n       excluding those notices that do not pertain to any part of\n       the Derivative Works; and\n\n   (d) If the Work includes a \"NOTICE\" text file as part of its\n       distribution, then any Derivative Works that You distribute must\n       include a readable copy of the attribution notices contained\n       within such NOTICE file, excluding those notices that do not\n       pertain to any part of the Derivative Works, in at least one\n       of the following places: within a NOTICE text file distributed\n       as part of the Derivative Works; within the Source form or\n       documentation, if provided along with the Derivative Works; or,\n       within a display generated by the Derivative Works, if and\n       wherever such third-party notices normally appear. The contents\n       of the NOTICE file are for informational purposes only and\n       do not modify the License. You may add Your own attribution\n       notices within Derivative Works that You distribute, alongside\n       or as an addendum to the NOTICE text from the Work, provided\n       that such additional attribution notices cannot be construed\n       as modifying the License.\n\n   You may add Your own copyright statement to Your modifications and\n   may provide additional or different license terms and conditions\n   for use, reproduction, or distribution of Your modifications, or\n   for any such Derivative Works as a whole, provided Your use,\n   reproduction, and distribution of the Work otherwise complies with\n   the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n   any Contribution intentionally submitted for inclusion in the Work\n   by You to the Licensor shall be under the terms and conditions of\n   this License, without any additional terms or conditions.\n   Notwithstanding the above, nothing herein shall supersede or modify\n   the terms of any separate license agreement you may have executed\n   with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n   names, trademarks, service marks, or product names of the Licensor,\n   except as required for reasonable and customary use in describing the\n   origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n   agreed to in writing, Licensor provides the Work (and each\n   Contributor provides its Contributions) on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n   implied, including, without limitation, any warranties or conditions\n   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n   PARTICULAR PURPOSE. You are solely responsible for determining the\n   appropriateness of using or redistributing the Work and assume any\n   risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n   whether in tort (including negligence), contract, or otherwise,\n   unless required by applicable law (such as deliberate and grossly\n   negligent acts) or agreed to in writing, shall any Contributor be\n   liable to You for damages, including any direct, indirect, special,\n   incidental, or consequential damages of any character arising as a\n   result of this License or out of the use or inability to use the\n   Work (including but not limited to damages for loss of goodwill,\n   work stoppage, computer failure or malfunction, or any and all\n   other commercial damages or losses), even if such Contributor\n   has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n   the Work or Derivative Works thereof, You may choose to offer,\n   and charge a fee for, acceptance of support, warranty, indemnity,\n   or other liability obligations and/or rights consistent with this\n   License. However, in accepting such obligations, You may act only\n   on Your own behalf and on Your sole responsibility, not on behalf\n   of any other Contributor, and only if You agree to indemnify,\n   defend, and hold each Contributor harmless for any liability\n   incurred by, or claims asserted against, such Contributor by reason\n   of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n   To apply the Apache License to your work, attach the following\n   boilerplate notice, with the fields enclosed by brackets \"[]\"\n   replaced with your own identifying information. (Don't include\n   the brackets!)  The text should be enclosed in the appropriate\n   comment syntax for the file format. We also recommend that a\n   file or class name and description of purpose be included on the\n   same \"printed page\" as the copyright notice for easier\n   identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License."
  },
  {
    "path": "fil-proofs-param/LICENSE-MIT",
    "content": "Permission is hereby granted, free of charge, to any\nperson obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the\nSoftware without restriction, including without\nlimitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice\nshall be included in all copies or substantial portions\nof the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\nANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\nSHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\nIN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE."
  },
  {
    "path": "fil-proofs-param/README.md",
    "content": "# worlddatabase\n"
  },
  {
    "path": "fil-proofs-param/scripts/pin-params.sh",
    "content": "#!/usr/bin/env bash\nset -Eeuo pipefail\n\n# pin-params.sh\n#\n# - Post the directory of params to cluster.ipfs.io\n# - Grab the CID for the previous params from proofs.filecoin.io\n# - Add the old params as a `prev` dir to the new params dir to keep them around.\n# - Pin the new cid on cluster\n# - Publish the new cid as a dnslink to proofs.filecoin.io\n# - The gateways will pin the new dir by checking proofs.filecoin.io hourly.\n#\n# Requires:\n#  - `ipfs-cluster-ctl` - download from https://dist.ipfs.io/#ipfs-cluster-ctl\n#  - `npx`, as provide `npm` >= v6\n#  - `ipfs`\n#\n# You _must_ provide the following env vars\n#\n#  - CLUSTER_TOKEN - the basic auth string as \"username:password\"\n#  - DNSIMPLE_TOKEN - an api key for a dnsimple account with a zone for proofs.filecoin.io\n#\n# Optional: you can override the input dir by passing a path as the first param.\n#\n# Usage:\n#   CLUSTER_TOKEN=\"user:pass\" DNSIMPLE_TOKEN=\"xyz\" ./pin-params.sh\n#\n\nINPUT_DIR=${1:-\"/var/tmp/filecoin-proof-parameters\"}\n: \"${CLUSTER_TOKEN:?please set CLUSTER_TOKEN env var}\"\n: \"${DNSIMPLE_TOKEN:?please set DNSIMPLE_TOKEN env var}\"\n\necho \"checking $INPUT_DIR\"\n\n# Grab the version number from the files in the dir.\n# Fail if more than 1 version or doesnt match a version string like vNN, e.g v12\nif ls -A $INPUT_DIR &> /dev/null; then\n  # version will be a list if there is more than one...\n  VERSION=$(ls $INPUT_DIR | sort -r | cut -c 1-3 | uniq)\n  echo found $VERSION\n\n  if [[ $(echo $VERSION | wc -w) -eq 1 && $VERSION =~ ^v[0-9]+ ]]; then\n    # we have 1 version, lets go...\n    COUNT=$(ls -l $INPUT_DIR | wc -l | xargs echo -n)\n    echo \"adding $COUNT files to ipfs...\"\n\n  else\n    echo \"Error: input dir should contain just the current version of the params\"\n    exit 1\n  fi\nelse\n  echo \"Error: input dir '$INPUT_DIR' should contain the params\"\n  exit 1\nfi\n\nCLUSTER_HOST=\"/dnsaddr/filecoin.collab.ipfscluster.io\"\nADDITIONAL_CLUSTER_HOST=\"/dnsaddr/cluster.ipfs.io\"\nCLUSTER_PIN_NAME=\"filecoin-proof-parameters-$VERSION\"\nDNSLINK_DOMAIN=\"proofs.filecoin.io\"\n\n# Add and pin to collab cluster. After this it will be on 1 peer and pin requests\n# will have been triggered for the others.\nROOT_CID=$(ipfs-cluster-ctl \\\n  --host $CLUSTER_HOST \\\n  --basic-auth $CLUSTER_TOKEN \\\n  add --quieter \\\n  --local \\\n  --name $CLUSTER_PIN_NAME \\\n  --recursive $INPUT_DIR )\n\necho \"ok! root cid is $ROOT_CID\"\n\n# Pin to main cluster additionally.\nipfs-cluster-ctl \\\n    --host $ADDITIONAL_CLUSTER_HOST \\\n    --basic-auth $CLUSTER_TOKEN \\\n    pin add $ROOT_CID \\\n    --no-status\n\necho \"ok! Pin request sent to additional cluster\"\n\n# Publist the new cid to the dnslink\nnpx dnslink-dnsimple --domain $DNSLINK_DOMAIN --link \"/ipfs/$ROOT_CID\"\n\necho \"done!\"\n"
  },
  {
    "path": "fil-proofs-param/scripts/verify-parameters-json.sh",
    "content": "#!/bin/sh\n\n# This script verifies that a given `.params` file (and the corresponding\n# `.vk` file) is part of `parameters.json` and has the correct digest.\n#\n# This script runs on POSIX compatible shells. You need to have standard\n# utilities (`basename`, `head`, `grep`) as well as have `jq` and `b2sum`\n# installed.\n#\n# The inputs are a `parameter.json` file and a `.params' file.\n\nif [ \"${#}\" -ne 2 ]; then\n    echo \"Verify that a given .params file (and the corresponding .vk file)\"\n    echo \"is part of parameters.json and has the correct digest.\"\n    echo \"\"\n    echo \"Usage: $(basename \"${0}\") parameters.json parameter-file.params\"\n    exit 1\nfi\n\nif ! command -v b2sum >/dev/null 2>&1\nthen\n    echo \"ERROR: 'b2sum' needs to be installed.\"\n    exit 1\nfi\n\nif ! command -v jq >/dev/null 2>&1\nthen\n    echo \"ERROR: 'jq' needs to be installed.\"\n    exit 1\nfi\n\nPARAMS_JSON=${1}\nPARAMS_ID=\"${2%.*}\"\n\nPARAMS_FILE=\"${PARAMS_ID}.params\"\nVK_FILE=\"${PARAMS_ID}.vk\"\n\n# Transforms the `parameters.json` into a string that consists of digest and\n# filename pairs.\nPARAMS_JSON_DATA=$(jq -r 'to_entries[] | \"\\(.value.digest) \\(.key)\"' \"${PARAMS_JSON}\")\n\nVK_HASH_SHORT=$(b2sum \"${VK_FILE}\"|head --bytes 32)\nif echo \"${PARAMS_JSON_DATA}\"|grep --silent \"${VK_HASH_SHORT} ${VK_FILE}\"; then\n    echo \"ok Correct digest of VK file was found in ${PARAMS_JSON}.\"\nelse\n    echo \"not ok ERROR: Digest of VK file was *not* found/correct in ${PARAMS_JSON}.\"\n    exit 1\nfi\n\nPARAMS_HASH_SHORT=$(b2sum \"${PARAMS_FILE}\"|head --bytes 32)\nif echo \"${PARAMS_JSON_DATA}\"|grep --silent \"${PARAMS_HASH_SHORT} ${PARAMS_FILE}\"; then\n    echo \"ok Correct digest of params file was found in ${PARAMS_JSON}.\"\nelse\n    echo \"not ok ERROR: Digest of params file was *not* found/correct in ${PARAMS_JSON}.\"\n    exit 1\nfi\n\necho \"# Verification successfully completed.\"\n"
  },
  {
    "path": "fil-proofs-param/src/bin/fakeipfsadd.rs",
    "content": "use std::fs::File;\nuse std::io;\n\nuse blake2b_simd::State as Blake2b;\nuse structopt::StructOpt;\n\n#[derive(Debug, StructOpt)]\n#[structopt(\n    name = \"fakeipfsadd\",\n    version = \"0.1\",\n    about = \"This program is used to simulate the `ipfs add` command while testing. It accepts a \\\n        path to a file and writes 32 characters of its hex-encoded BLAKE2b checksum to stdout. \\\n        Note that the real `ipfs add` command computes and emits a CID.\"\n)]\nenum Cli {\n    Add {\n        #[structopt(help = \"Positional argument for the path to the file to add.\")]\n        file_path: String,\n        #[structopt(short = \"Q\", help = \"Simulates the -Q argument to `ipfs add`.\")]\n        quieter: bool,\n    },\n}\n\nimpl Cli {\n    fn file_path(&self) -> &str {\n        match self {\n            Cli::Add { file_path, .. } => file_path,\n        }\n    }\n}\n\npub fn main() {\n    let cli = Cli::from_args();\n\n    let mut src_file =\n        File::open(cli.file_path()).expect(&format!(\"failed to open file: {}\", cli.file_path()));\n\n    let mut hasher = Blake2b::new();\n    io::copy(&mut src_file, &mut hasher).expect(\"failed to write BLAKE2b bytes to hasher\");\n    let hex_string: String = hasher.finalize().to_hex()[..32].into();\n    println!(\"{}\", hex_string)\n}\n"
  },
  {
    "path": "fil-proofs-param/src/bin/paramcache.rs",
    "content": "use std::env;\nuse std::process::exit;\nuse std::str::FromStr;\n\nuse dialoguer::{theme::ColorfulTheme, MultiSelect};\nuse filecoin_proofs::{\n    constants::{\n        DefaultPieceHasher, POREP_PARTITIONS, PUBLISHED_SECTOR_SIZES, WINDOW_POST_CHALLENGE_COUNT,\n        WINDOW_POST_SECTOR_COUNT, WINNING_POST_CHALLENGE_COUNT, WINNING_POST_SECTOR_COUNT,\n    },\n    parameters::{public_params, window_post_public_params, winning_post_public_params},\n    types::{PaddedBytesAmount, PoRepConfig, PoRepProofPartitions, PoStConfig, SectorSize},\n    with_shape, PoStType,\n};\nuse humansize::{file_size_opts, FileSize};\nuse indicatif::ProgressBar;\nuse log::{error, info, warn};\nuse rand::rngs::OsRng;\nuse storage_proofs_core::{\n    api_version::ApiVersion, compound_proof::CompoundProof, merkle::MerkleTreeTrait,\n    parameter_cache::CacheableParameters,\n};\nuse storage_proofs_porep::stacked::{StackedCircuit, StackedCompound, StackedDrg};\nuse storage_proofs_post::fallback::{FallbackPoSt, FallbackPoStCircuit, FallbackPoStCompound};\nuse structopt::StructOpt;\n\nfn cache_porep_params<Tree: 'static + MerkleTreeTrait>(porep_config: PoRepConfig) {\n    info!(\"generating PoRep groth params\");\n\n    let public_params = public_params(\n        PaddedBytesAmount::from(porep_config),\n        usize::from(PoRepProofPartitions::from(porep_config)),\n        porep_config.porep_id,\n        porep_config.api_version,\n    )\n    .expect(\"failed to get public params from config\");\n\n    let circuit = <StackedCompound<Tree, DefaultPieceHasher> as CompoundProof<\n        StackedDrg<Tree, DefaultPieceHasher>,\n        StackedCircuit<Tree, DefaultPieceHasher>,\n    >>::blank_circuit(&public_params);\n\n    let _ = StackedCompound::<Tree, DefaultPieceHasher>::get_param_metadata(\n        circuit.clone(),\n        &public_params,\n    )\n    .expect(\"failed to get metadata\");\n\n    let _ = StackedCompound::<Tree, DefaultPieceHasher>::get_groth_params(\n        Some(&mut OsRng),\n        circuit.clone(),\n        &public_params,\n    )\n    .expect(\"failed to get groth params\");\n\n    let _ = StackedCompound::<Tree, DefaultPieceHasher>::get_verifying_key(\n        Some(&mut OsRng),\n        circuit,\n        &public_params,\n    )\n    .expect(\"failed to get verifying key\");\n}\n\nfn cache_winning_post_params<Tree: 'static + MerkleTreeTrait>(post_config: &PoStConfig) {\n    info!(\"generating Winning-PoSt groth params\");\n\n    let public_params = winning_post_public_params::<Tree>(post_config)\n        .expect(\"failed to get public params from config\");\n\n    let circuit = <FallbackPoStCompound<Tree> as CompoundProof<\n        FallbackPoSt<Tree>,\n        FallbackPoStCircuit<Tree>,\n    >>::blank_circuit(&public_params);\n\n    let _ = <FallbackPoStCompound<Tree>>::get_param_metadata(circuit.clone(), &public_params)\n        .expect(\"failed to get metadata\");\n\n    let _ = <FallbackPoStCompound<Tree>>::get_groth_params(\n        Some(&mut OsRng),\n        circuit.clone(),\n        &public_params,\n    )\n    .expect(\"failed to get groth params\");\n\n    let _ =\n        <FallbackPoStCompound<Tree>>::get_verifying_key(Some(&mut OsRng), circuit, &public_params)\n            .expect(\"failed to get verifying key\");\n}\n\nfn cache_window_post_params<Tree: 'static + MerkleTreeTrait>(post_config: &PoStConfig) {\n    info!(\"generating Window-PoSt groth params\");\n\n    let public_params = window_post_public_params::<Tree>(post_config)\n        .expect(\"failed to get public params from config\");\n\n    let circuit: FallbackPoStCircuit<Tree> = <FallbackPoStCompound<Tree> as CompoundProof<\n        FallbackPoSt<Tree>,\n        FallbackPoStCircuit<Tree>,\n    >>::blank_circuit(&public_params);\n\n    let _ = <FallbackPoStCompound<Tree>>::get_param_metadata(circuit.clone(), &public_params)\n        .expect(\"failed to get metadata\");\n\n    let _ = <FallbackPoStCompound<Tree>>::get_groth_params(\n        Some(&mut OsRng),\n        circuit.clone(),\n        &public_params,\n    )\n    .expect(\"failed to get groth params\");\n\n    let _ = <FallbackPoStCompound<Tree>>::get_verifying_key(\n        Some(&mut OsRng),\n        circuit.clone(),\n        &public_params,\n    )\n    .expect(\"failed to get verifying key\");\n}\n\n#[derive(Debug, StructOpt)]\n#[structopt(\n    name = \"paramcache\",\n    about = \"generates and caches SDR PoRep, Winning-PoSt, and Window-PoSt groth params\"\n)]\nstruct Opt {\n    #[structopt(long, help = \"Only cache PoSt groth params.\")]\n    only_post: bool,\n    #[structopt(\n        short = \"z\",\n        long,\n        use_delimiter = true,\n        help = \"A comma-separated list of sector sizes (in number of bytes).\"\n    )]\n    sector_sizes: Vec<u64>,\n    #[structopt(\n        long = \"api-version\",\n        value_name = \"SEMANTIC VERSION\",\n        default_value = \"1.1.0\",\n        help = \"Use a specific rust-fil-proofs API version.\"\n    )]\n    api_version: String,\n}\n\nfn generate_params_post(sector_size: u64, api_version: ApiVersion) {\n    with_shape!(\n        sector_size,\n        cache_winning_post_params,\n        &PoStConfig {\n            sector_size: SectorSize(sector_size),\n            challenge_count: WINNING_POST_CHALLENGE_COUNT,\n            sector_count: WINNING_POST_SECTOR_COUNT,\n            typ: PoStType::Winning,\n            priority: true,\n            api_version,\n        }\n    );\n\n    with_shape!(\n        sector_size,\n        cache_window_post_params,\n        &PoStConfig {\n            sector_size: SectorSize(sector_size),\n            challenge_count: WINDOW_POST_CHALLENGE_COUNT,\n            sector_count: *WINDOW_POST_SECTOR_COUNT\n                .read()\n                .expect(\"WINDOW_POST_SECTOR_COUNT poisoned\")\n                .get(&sector_size)\n                .expect(\"unknown sector size\"),\n            typ: PoStType::Window,\n            priority: true,\n            api_version,\n        }\n    );\n}\n\nfn generate_params_porep(sector_size: u64, api_version: ApiVersion) {\n    with_shape!(\n        sector_size,\n        cache_porep_params,\n        PoRepConfig {\n            sector_size: SectorSize(sector_size),\n            partitions: PoRepProofPartitions(\n                *POREP_PARTITIONS\n                    .read()\n                    .expect(\"POREP_PARTITIONS poisoned\")\n                    .get(&sector_size)\n                    .expect(\"unknown sector size\"),\n            ),\n            porep_id: [0; 32],\n            api_version,\n        }\n    );\n}\n\npub fn main() {\n    // Create a stderr logger for all log levels.\n    env::set_var(\"RUST_LOG\", \"paramcache\");\n    fil_logger::init();\n\n    let mut opts = Opt::from_args();\n\n    // If no sector-sizes were given provided via. the CLI, display an interactive menu. Otherwise,\n    // filter out invalid CLI sector-size arguments.\n    if opts.sector_sizes.is_empty() {\n        let sector_size_strings: Vec<String> = PUBLISHED_SECTOR_SIZES\n            .iter()\n            .map(|sector_size| {\n                let human_size = sector_size\n                    .file_size(file_size_opts::BINARY)\n                    .expect(\"failed to format sector size\");\n                // Right align numbers for easier reading.\n                format!(\"{: >7}\", human_size)\n            })\n            .collect();\n\n        opts.sector_sizes = MultiSelect::with_theme(&ColorfulTheme::default())\n            .with_prompt(\n                \"Select the sizes that should be generated if not already cached [use space key to \\\n                select, press return to finish]\",\n            )\n            .items(&sector_size_strings)\n            .interact()\n            .expect(\"interaction failed\")\n            .into_iter()\n            .map(|i| PUBLISHED_SECTOR_SIZES[i])\n            .collect();\n    } else {\n        opts.sector_sizes.retain(|size| {\n            if PUBLISHED_SECTOR_SIZES.contains(size) {\n                true\n            } else {\n                let human_size = size\n                    .file_size(file_size_opts::BINARY)\n                    .expect(\"failed to humansize sector size argument\");\n                warn!(\"ignoring invalid sector size argument: {}\", human_size);\n                false\n            }\n        });\n    }\n\n    if opts.sector_sizes.is_empty() {\n        error!(\"no valid sector sizes given, aborting\");\n        exit(1);\n    }\n\n    let api_version = ApiVersion::from_str(&opts.api_version)\n        .expect(\"Cannot parse API version from semver string (e.g. 1.1.0)\");\n\n    for sector_size in opts.sector_sizes {\n        let human_size = sector_size\n            .file_size(file_size_opts::BINARY)\n            .expect(\"failed to format sector size\");\n        let message = format!(\"Generating sector size: {}\", human_size);\n        info!(\"{}\", &message);\n\n        let spinner = ProgressBar::new_spinner();\n        spinner.set_message(&message);\n        spinner.enable_steady_tick(100);\n\n        generate_params_post(sector_size, api_version);\n\n        if !opts.only_post {\n            generate_params_porep(sector_size, api_version);\n        }\n\n        spinner.finish_with_message(&format!(\"✔ {}\", &message));\n    }\n}\n"
  },
  {
    "path": "fil-proofs-param/src/bin/paramfetch.rs",
    "content": "use std::env;\nuse std::fs::{create_dir_all, rename, File};\nuse std::io::{self, copy, stderr, stdout, Read, Stdout, Write};\nuse std::path::PathBuf;\nuse std::process::{exit, Command};\n\nuse anyhow::{ensure, Context, Result};\nuse dialoguer::{theme::ColorfulTheme, MultiSelect, Select};\nuse filecoin_proofs::param::{\n    get_digest_for_file_within_cache, get_full_path_for_file_within_cache, has_extension,\n};\nuse flate2::read::GzDecoder;\nuse humansize::{file_size_opts, FileSize};\nuse lazy_static::lazy_static;\nuse log::{error, info, trace, warn};\nuse pbr::{ProgressBar, Units};\nuse reqwest::{blocking::Client, header, Proxy, Url};\nuse storage_proofs_core::parameter_cache::{\n    parameter_cache_dir, parameter_cache_dir_name, ParameterMap, GROTH_PARAMETER_EXT,\n};\nuse structopt::StructOpt;\nuse tar::Archive;\n\nlazy_static! {\n    static ref CLI_ABOUT: String = format!(\n        \"Downloads missing or outdated Groth parameter files from ipfs using ipget.\\n\\n\\\n\n        Set the $FIL_PROOFS_PARAMETER_CACHE env-var to specify the path to the parameter cache\n        directory (location where params are written), otherwise params will be written to '{}'.\",\n        parameter_cache_dir_name(),\n    );\n}\n\nconst DEFAULT_JSON: &str = include_str!(\"../../parameters.json\");\nconst DEFAULT_IPGET_VERSION: &str = \"v0.6.0\";\n\n#[inline]\nfn ipget_dir(version: &str) -> String {\n    format!(\"/var/tmp/ipget-{}\", version)\n}\n\n#[inline]\nfn ipget_path(version: &str) -> String {\n    format!(\"{}/ipget/ipget\", ipget_dir(version))\n}\n\n/// Reader with progress bar.\nstruct FetchProgress<R> {\n    reader: R,\n    progress_bar: ProgressBar<Stdout>,\n}\n\nimpl<R: Read> Read for FetchProgress<R> {\n    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {\n        self.reader.read(buf).map(|n| {\n            self.progress_bar.add(n as u64);\n            n\n        })\n    }\n}\n\nimpl<R: Read> FetchProgress<R> {\n    fn new(reader: R, size: u64) -> Self {\n        let mut progress_bar = ProgressBar::new(size);\n        progress_bar.set_units(Units::Bytes);\n        FetchProgress {\n            reader,\n            progress_bar,\n        }\n    }\n}\n\n/// Download a version of ipget.\nfn download_ipget(version: &str, verbose: bool) -> Result<()> {\n    info!(\"downloading ipget\");\n\n    let (os, ext) = if cfg!(target_os = \"macos\") {\n        (\"darwin\", \"tar.gz\")\n    } else if cfg!(target_os = \"windows\") {\n        // TODO: enable Windows by adding support for .zip files.\n        // (\"windows\", \"zip\")\n        unimplemented!(\"paramfetch does not currently support Windows/.zip downloads\");\n    } else {\n        (\"linux\", \"tar.gz\")\n    };\n\n    // Request ipget file.\n    let url = Url::parse(&format!(\n        \"https://dist.ipfs.io/ipget/{}/ipget_{}_{}-amd64.{}\",\n        version, version, os, ext,\n    ))?;\n    trace!(\"making GET request: {}\", url.as_str());\n    let client = Client::builder()\n        .proxy(Proxy::custom(move |url| env_proxy::for_url(&url).to_url()))\n        .build()?;\n    let mut resp = client.get(url).send()?;\n    trace!(\"received GET response\");\n    if !resp.status().is_success() {\n        error!(\"non-200 response status:\\n{:?}\\nexiting\", resp);\n        exit(1);\n    }\n\n    let size: Option<u64> = resp\n        .headers()\n        .get(header::CONTENT_LENGTH)\n        .and_then(|val| val.to_str().unwrap().parse().ok());\n\n    match size {\n        Some(size) => trace!(\"content-length: {}\", size),\n        None => trace!(\n            \"unable to parse content-length: {:?}\",\n            resp.headers().get(header::CONTENT_LENGTH),\n        ),\n    };\n\n    // Write downloaded file.\n    let write_path = format!(\"{}.{}\", ipget_dir(version), ext);\n    trace!(\"writing downloaded file to: {}\", write_path);\n    let mut writer = File::create(&write_path).expect(\"failed to create file\");\n    if verbose && size.is_some() {\n        let mut resp = FetchProgress::new(resp, size.unwrap());\n        copy(&mut resp, &mut writer).expect(\"failed to write download to file\");\n    } else {\n        copy(&mut resp, &mut writer).expect(\"failed to write download to file\");\n    }\n    drop(writer);\n\n    // Unzip and unarchive downloaded file.\n    let reader = File::open(&write_path).expect(\"failed to open downloaded tar file\");\n    if ext == \"tar.gz\" {\n        trace!(\"unzipping and unarchiving downloaded file\");\n        let unzipper = GzDecoder::new(reader);\n        let mut unarchiver = Archive::new(unzipper);\n        unarchiver\n            .unpack(ipget_dir(version))\n            .expect(\"failed to unzip and unarchive\");\n    } else {\n        unimplemented!(\"unzip is not yet supported\");\n    }\n    info!(\n        \"successfully downloaded ipget binary: {}\",\n        ipget_path(version),\n    );\n\n    Ok(())\n}\n\n/// Check which files are outdated (or no not exist).\nfn get_filenames_requiring_download(\n    parameter_map: &ParameterMap,\n    selected_filenames: Vec<String>,\n) -> Vec<String> {\n    selected_filenames\n        .into_iter()\n        .filter(|filename| {\n            trace!(\"determining if file is out of date: {}\", filename);\n            let path = get_full_path_for_file_within_cache(filename);\n            if !path.exists() {\n                trace!(\"file not found, marking for download\");\n                return true;\n            };\n            trace!(\"params file found\");\n            let calculated_digest = match get_digest_for_file_within_cache(&filename) {\n                Ok(digest) => digest,\n                Err(e) => {\n                    warn!(\"failed to hash file {}, marking for download\", e);\n                    return true;\n                }\n            };\n            let expected_digest = &parameter_map[filename].digest;\n            if &calculated_digest == expected_digest {\n                trace!(\"file is up to date\");\n                false\n            } else {\n                trace!(\"file has unexpected digest, marking for download\");\n                let new_filename = format!(\"{}-invalid-digest\", filename);\n                let new_path = path.with_file_name(new_filename);\n                trace!(\"moving invalid params to: {}\", new_path.display());\n                rename(path, new_path).expect(\"failed to move file\");\n                true\n            }\n        })\n        .collect()\n}\n\nfn download_file_with_ipget(\n    cid: &str,\n    path: &PathBuf,\n    ipget_path: &PathBuf,\n    ipget_args: &Option<String>,\n    verbose: bool,\n) -> Result<()> {\n    let mut args = vec![cid, \"-o\", path.to_str().unwrap()];\n    if let Some(ipget_args) = ipget_args {\n        args.extend(ipget_args.split_whitespace());\n    }\n    trace!(\n        \"spawning subprocess: {} {}\",\n        ipget_path.display(),\n        args.join(\" \")\n    );\n    let output = Command::new(ipget_path.as_os_str())\n        .args(&args)\n        .output()\n        .with_context(|| \"failed to spawn ipget subprocess\")?;\n    if verbose {\n        stdout()\n            .write_all(&output.stdout)\n            .with_context(|| \"failed to write ipget's stdout\")?;\n        stderr()\n            .write_all(&output.stderr)\n            .with_context(|| \"failed to write ipget's stderr\")?;\n    }\n    ensure!(output.status.success(), \"ipget returned non-zero exit code\");\n    Ok(())\n}\n\n#[derive(Debug, StructOpt)]\n#[structopt(name = \"paramfetch\", version = \"1.1\", about = CLI_ABOUT.as_str())]\nstruct Cli {\n    #[structopt(\n        long,\n        short = \"j\",\n        value_name = \"PATH TO JSON FILE\",\n        help = \"Use a specific JSON file.\"\n    )]\n    json: Option<String>,\n    #[structopt(long, short = \"r\", help = \"Prompt to retry file downloads on failure.\")]\n    retry: bool,\n    #[structopt(\n        long,\n        short = \"a\",\n        conflicts_with = \"sector-sizes\",\n        help = \"Download parameters for all sector sizes.\"\n    )]\n    all: bool,\n    #[structopt(\n        long = \"sector-sizes\",\n        short = \"z\",\n        value_name = \"SECTOR SIZES\",\n        value_delimiter = \",\",\n        require_delimiter = true,\n        multiple = false,\n        conflicts_with = \"all\",\n        help = \"A comma-separated list of sector sizes (in bytes) for which Groth parameters will \\\n            be downloaded.\"\n    )]\n    sector_sizes: Option<Vec<u64>>,\n    #[structopt(long, short = \"v\")]\n    verbose: bool,\n    #[structopt(\n        long = \"ipget-bin\",\n        short = \"i\",\n        value_name = \"PATH TO IPGET\",\n        conflicts_with = \"ipget-version\",\n        long_help = \"Path to an ipget binary. If this argument is not given, paramfetch with look \\\n            for ipget in the default location: /var/tmp/ipget-<version>/ipget/ipget. If no binary \\\n            is found in the default location, paramfetch will download ipget into that location.\"\n    )]\n    ipget_bin: Option<String>,\n    #[structopt(\n        long = \"ipget-version\",\n        value_name = \"VERSION\",\n        conflicts_with = \"ipget-bin\",\n        help = \"Set the version of ipget to use.\"\n    )]\n    ipget_version: Option<String>,\n    #[structopt(\n        long = \"ipget-args\",\n        value_name = \"ARGS\",\n        help = \"Specify additional arguments for ipget.\"\n    )]\n    ipget_args: Option<String>,\n}\n\npub fn main() {\n    // Log all log levels to stderr.\n    env::set_var(\"RUST_LOG\", \"paramfetch\");\n    fil_logger::init();\n\n    let cli = Cli::from_args();\n\n    // Parse parameters.json file.\n    let parameter_map: ParameterMap = match cli.json {\n        Some(json_path) => {\n            trace!(\"using json file: {}\", json_path);\n            let mut json_file = File::open(&json_path)\n                .map_err(|e| {\n                    error!(\"failed to open json file, exiting\\n{:?}\", e);\n                    exit(1);\n                })\n                .unwrap();\n            serde_json::from_reader(&mut json_file)\n                .map_err(|e| {\n                    error!(\"failed to parse json file, exiting\\n{:?}\", e);\n                    exit(1);\n                })\n                .unwrap()\n        }\n        None => {\n            trace!(\"using built-in json\");\n            serde_json::from_str(DEFAULT_JSON)\n                .map_err(|e| {\n                    error!(\"failed to parse built-in json, exiting\\n{:?}\", e);\n                    exit(1);\n                })\n                .unwrap()\n        }\n    };\n\n    let mut filenames: Vec<String> = parameter_map.keys().cloned().collect();\n    trace!(\"json contains {} files\", filenames.len());\n\n    // Filter out unwanted sector sizes from params files (.params files only, leave verifying-key\n    // files).\n    if let Some(ref sector_sizes) = cli.sector_sizes {\n        filenames.retain(|filename| {\n            let remove = has_extension(filename, GROTH_PARAMETER_EXT)\n                && !sector_sizes.contains(&parameter_map[filename].sector_size);\n            if remove {\n                let human_size = parameter_map[filename]\n                    .sector_size\n                    .file_size(file_size_opts::BINARY)\n                    .unwrap();\n                trace!(\"ignoring file: {} ({})\", filename, human_size);\n            }\n            !remove\n        });\n    }\n\n    // Determine which files are outdated.\n    filenames = get_filenames_requiring_download(&parameter_map, filenames);\n    if filenames.is_empty() {\n        info!(\"no outdated files, exiting\");\n        return;\n    }\n\n    // If no sector size CLI argument was provided, prompt the user to select which files to\n    // download.\n    if cli.sector_sizes.is_none() && !cli.all {\n        let filename_strings: Vec<String> = filenames\n            .iter()\n            .map(|filename| {\n                let human_size = parameter_map[filename]\n                    .sector_size\n                    .file_size(file_size_opts::BINARY)\n                    .unwrap();\n                format!(\"{} ({})\", filename, human_size)\n            })\n            .collect();\n        filenames = MultiSelect::with_theme(&ColorfulTheme::default())\n            .with_prompt(\"Select files to be downloaded (press space key to select)\")\n            .items(&filename_strings)\n            .interact()\n            .expect(\"MultiSelect interaction failed\")\n            .into_iter()\n            .map(|i| filenames[i].clone())\n            .collect();\n    }\n\n    info!(\n        \"{} files to be downloaded: {:?}\",\n        filenames.len(),\n        filenames\n    );\n    if filenames.is_empty() {\n        info!(\"no files to download, exiting\");\n        return;\n    }\n\n    let ipget_path = match cli.ipget_bin {\n        Some(ipget_path) => {\n            let ipget_path = PathBuf::from(ipget_path);\n            if !ipget_path.exists() {\n                error!(\n                    \"provided ipget binary not found: {}, exiting\",\n                    ipget_path.display()\n                );\n                exit(1);\n            }\n            ipget_path\n        }\n        None => {\n            let ipget_version = cli\n                .ipget_version\n                .unwrap_or(DEFAULT_IPGET_VERSION.to_string());\n            let ipget_path = PathBuf::from(ipget_path(&ipget_version));\n            if !ipget_path.exists() {\n                info!(\"ipget binary not found: {}\", ipget_path.display());\n                download_ipget(&ipget_version, cli.verbose).expect(\"ipget download failed\");\n            }\n            ipget_path\n        }\n    };\n    trace!(\"using ipget binary: {}\", ipget_path.display());\n\n    trace!(\"creating param cache dir(s) if they don't exist\");\n    create_dir_all(parameter_cache_dir()).expect(\"failed to create param cache dir\");\n\n    loop {\n        for filename in &filenames {\n            info!(\"downloading params file with ipget: {}\", filename);\n            let path = get_full_path_for_file_within_cache(filename);\n            match download_file_with_ipget(\n                &parameter_map[filename].cid,\n                &path,\n                &ipget_path,\n                &cli.ipget_args,\n                cli.verbose,\n            ) {\n                Ok(_) => info!(\"finished downloading params file\"),\n                Err(e) => warn!(\"failed to download params file: {}\", e),\n            };\n        }\n        filenames = get_filenames_requiring_download(&parameter_map, filenames);\n        if filenames.is_empty() {\n            info!(\"succesfully updated all files, exiting\");\n            return;\n        }\n        warn!(\n            \"{} files failed to be fetched: {:?}\",\n            filenames.len(),\n            filenames\n        );\n        let retry = cli.retry\n            || Select::with_theme(&ColorfulTheme::default())\n                .with_prompt(\"Retry failed downloads? (press arrow keys to select)\")\n                .items(&[\"y\", \"n\"])\n                .interact()\n                .map(|i| i == 0)\n                .expect(\"Select interaction failed\");\n        if !retry {\n            warn!(\"not retrying failed downloads, exiting\");\n            exit(1);\n        }\n    }\n}\n"
  },
  {
    "path": "fil-proofs-param/src/bin/parampublish.rs",
    "content": "use std::collections::BTreeMap;\nuse std::env;\nuse std::fs::{read_dir, File};\nuse std::io::{stderr, Write};\nuse std::path::Path;\nuse std::process::{exit, Command};\n\nuse anyhow::{ensure, Context, Result};\nuse dialoguer::{theme::ColorfulTheme, MultiSelect, Select};\nuse filecoin_proofs::{\n    param::{\n        add_extension, filename_to_parameter_id, get_digest_for_file_within_cache,\n        get_full_path_for_file_within_cache, has_extension, parameter_id_to_metadata_map,\n    },\n    SECTOR_SIZE_2_KIB, SECTOR_SIZE_32_GIB, SECTOR_SIZE_512_MIB, SECTOR_SIZE_64_GIB,\n    SECTOR_SIZE_8_MIB,\n};\nuse humansize::{file_size_opts, FileSize};\nuse itertools::Itertools;\nuse lazy_static::lazy_static;\nuse log::{error, info, trace, warn};\nuse storage_proofs_core::parameter_cache::{\n    parameter_cache_dir, parameter_cache_dir_name, ParameterData, ParameterMap,\n    GROTH_PARAMETER_EXT, PARAMETER_METADATA_EXT, VERIFYING_KEY_EXT,\n};\nuse structopt::StructOpt;\n\nlazy_static! {\n    static ref CLI_ABOUT: String = format!(\n        \"Publish param files found in the cache directory specified by the env-var \\\n        $FIL_PROOFS_PARAMETER_CACHE (or if the env-var is not set, the dir: {}) to ipfs\",\n        parameter_cache_dir_name(),\n    );\n}\n\n// Default sector-sizes to publish.\nconst DEFAULT_SECTOR_SIZES: [u64; 5] = [\n    SECTOR_SIZE_2_KIB,\n    SECTOR_SIZE_8_MIB,\n    SECTOR_SIZE_512_MIB,\n    SECTOR_SIZE_32_GIB,\n    SECTOR_SIZE_64_GIB,\n];\n\n#[derive(Clone, Debug, PartialEq)]\nstruct FileInfo {\n    id: String,\n    filename: String,\n    sector_size: u64,\n    version: String,\n    ext: String,\n}\n\n#[inline]\nfn human_size(sector_size: u64) -> String {\n    sector_size.file_size(file_size_opts::BINARY).unwrap()\n}\n\n/// Returns `true` if a params filename starts with a version string and has a valid extension.\nfn is_well_formed_filename(filename: &str) -> bool {\n    let ext_is_valid = has_extension(filename, GROTH_PARAMETER_EXT)\n        || has_extension(filename, VERIFYING_KEY_EXT)\n        || has_extension(filename, PARAMETER_METADATA_EXT);\n    if !ext_is_valid {\n        warn!(\"file has invalid extension: {}, ignoring file\", filename);\n        return false;\n    }\n    let version = filename.split('-').nth(0).unwrap();\n    let version_is_valid =\n        version.get(0..1).unwrap() == \"v\" && version[1..].chars().all(|c| c.is_digit(10));\n    if !version_is_valid {\n        warn!(\n            \"filename does not start with version: {}, ignoring file\",\n            filename\n        );\n        return false;\n    }\n    true\n}\n\nfn get_filenames_in_cache_dir() -> Vec<String> {\n    let path = parameter_cache_dir();\n    if !path.exists() {\n        warn!(\"param cache dir does not exist (no files to publish), exiting\");\n        exit(0);\n    }\n    // Ignore entries that are not files or have a non-Utf8 filename.\n    read_dir(path)\n        .expect(\"failed to read param cache dir\")\n        .filter_map(|entry_res| {\n            let path = entry_res.expect(\"failed to read directory entry\").path();\n            if !path.is_file() {\n                return None;\n            }\n            path.file_name()\n                .and_then(|os_str| os_str.to_str())\n                .map(|s| s.to_string())\n        })\n        .collect()\n}\n\nfn publish_file(ipfs_bin: &str, filename: &str) -> Result<String> {\n    let path = get_full_path_for_file_within_cache(filename);\n    let output = Command::new(ipfs_bin)\n        .args(&[\"add\", \"-Q\", path.to_str().unwrap()])\n        .output()\n        .expect(\"failed to run ipfs subprocess\");\n    stderr()\n        .write_all(&output.stderr)\n        .with_context(|| \"failed to write ipfs' stderr\")?;\n    ensure!(output.status.success(), \"failed to publish via ipfs\");\n    let cid = String::from_utf8(output.stdout)\n        .with_context(|| \"ipfs' stdout is not valid Utf8\")?\n        .trim()\n        .to_string();\n    Ok(cid)\n}\n\n/// Write the parameters.json file (or file specified by `json_path`) containing the published\n/// params' IPFS cid's.\nfn write_param_map_to_disk(param_map: &ParameterMap, json_path: &str) -> Result<()> {\n    let mut file = File::create(json_path).with_context(|| \"failed to create json file\")?;\n    serde_json::to_writer_pretty(&mut file, &param_map).with_context(|| \"failed to write json\")?;\n    Ok(())\n}\n\n#[derive(Debug, StructOpt)]\n#[structopt(name = \"parampublish\", version = \"1.0\", about = CLI_ABOUT.as_str())]\nstruct Cli {\n    #[structopt(\n        long = \"list-all\",\n        short = \"a\",\n        help = \"The user will be prompted to select the files to publish from the set of all files \\\n            found in the cache dir. Excluding the -a/--list-all flag will result in the user being \\\n            prompted for a single param version number for filtering-in files in the cache dir.\"\n    )]\n    list_all_files: bool,\n    #[structopt(\n        long = \"ipfs-bin\",\n        value_name = \"PATH TO IPFS BINARY\",\n        default_value = \"ipfs\",\n        help = \"Use a specific ipfs binary instead of searching for one in $PATH.\"\n    )]\n    ipfs_bin: String,\n    #[structopt(\n        long = \"json\",\n        short = \"j\",\n        value_name = \"PATH\",\n        default_value = \"parameters.json\",\n        help = \"The path to write the parameters.json file.\"\n    )]\n    json_path: String,\n}\n\npub fn main() {\n    // Log all levels to stderr.\n    env::set_var(\"RUST_LOG\", \"parampublish\");\n    fil_logger::init();\n\n    let cli = Cli::from_args();\n\n    let cache_dir = match env::var(\"FIL_PROOFS_PARAMETER_CACHE\") {\n        Ok(s) => s,\n        _ => format!(\"{}\", parameter_cache_dir().display()),\n    };\n    info!(\"using param cache dir: {}\", cache_dir);\n\n    if !Path::new(&cli.ipfs_bin).exists() {\n        error!(\"ipfs binary not found: `{}`, exiting\", cli.ipfs_bin);\n        exit(1);\n    }\n\n    // Get the param-id's in the cache dir for which three files exist (.meta, .params, and .vk).\n    let ids = {\n        let filenames: Vec<String> = get_filenames_in_cache_dir()\n            .into_iter()\n            .filter(|filename| is_well_formed_filename(filename))\n            .collect();\n        trace!(\"found {} param files in cache dir\", filenames.len());\n        let mut ids: Vec<String> = filenames\n            .iter()\n            .map(|filename| filename_to_parameter_id(filename).unwrap())\n            .unique()\n            .collect_vec();\n        ids.retain(|id| {\n            filenames.contains(&add_extension(id, GROTH_PARAMETER_EXT))\n                && filenames.contains(&add_extension(id, VERIFYING_KEY_EXT))\n                && filenames.contains(&add_extension(id, PARAMETER_METADATA_EXT))\n        });\n        if ids.is_empty() {\n            warn!(\"no file triples found, exiting\");\n            exit(0);\n        }\n        trace!(\"found {} file triples\", ids.len());\n        ids\n    };\n\n    // Read each param file's sector-size from its .meta file.\n    let meta_map = parameter_id_to_metadata_map(&ids).unwrap_or_else(|e| {\n        error!(\"failed to parse .meta file:\\n{:?}\\nexiting\", e);\n        exit(1);\n    });\n\n    // Store every param-id's .params and .vk file info.\n    let mut infos = Vec::<FileInfo>::with_capacity(2 * ids.len());\n    for id in &ids {\n        let version = id.split('-').nth(0).unwrap().to_string();\n        let sector_size = meta_map[id].sector_size;\n        infos.push(FileInfo {\n            id: id.clone(),\n            filename: add_extension(id, GROTH_PARAMETER_EXT),\n            sector_size,\n            version: version.clone(),\n            ext: GROTH_PARAMETER_EXT.to_string(),\n        });\n        infos.push(FileInfo {\n            id: id.clone(),\n            filename: add_extension(id, VERIFYING_KEY_EXT),\n            sector_size,\n            version,\n            ext: VERIFYING_KEY_EXT.to_string(),\n        });\n    }\n\n    if cli.list_all_files {\n        // Create two vectors, one containing the file infos sorted in the order with which they\n        // will appear in the user prompt and a second for each param file's prompt string sorted\n        // in the same way.\n        let mut infos_sorted: Vec<&FileInfo> = Vec::with_capacity(infos.len());\n        let mut items: Vec<String> = Vec::with_capacity(infos.len());\n        infos\n            .iter()\n            .sorted_by(|info_1, info_2| {\n                // Sort in descending order by version, then order each version's files by ascending\n                // sector-size and filename, example order:\n                // (\"v28\", 1024, \"filename\"), (\"v28\", 2056, \"filename\"), (\"v27\", 1024, \"filename\")\n                let a = (&info_2.version, info_1.sector_size, &info_1.filename);\n                let b = (&info_1.version, info_2.sector_size, &info_2.filename);\n                a.cmp(&b)\n            })\n            .for_each(|info| {\n                let item = format!(\"{} ({})\", info.filename, human_size(info.sector_size));\n                items.push(item);\n                infos_sorted.push(info);\n            });\n\n        infos = MultiSelect::with_theme(&ColorfulTheme::default())\n            .with_prompt(\"Select files to publish (press 'space' to select, 'return' to submit)\")\n            .items(&items[..])\n            .interact()\n            .expect(\"interaction failed\")\n            .into_iter()\n            .map(|i| infos_sorted[i].clone())\n            .collect();\n    } else {\n        let versions: Vec<String> = infos\n            .iter()\n            .map(|info| info.version.clone())\n            .dedup()\n            .sorted()\n            .rev()\n            .collect();\n\n        let selected_version = Select::with_theme(&ColorfulTheme::default())\n            .with_prompt(\"Select a version (press 'space' to select, 'q' to quit)\")\n            .default(0)\n            .items(&versions[..])\n            .interact_opt()\n            .expect(\"interaction failed\")\n            .map(|i| versions[i].clone())\n            .unwrap_or_else(|| {\n                warn!(\"no versions selected, exiting\");\n                exit(0);\n            });\n\n        infos.retain(|info| info.version == selected_version);\n\n        // Sort the param-ids by ascending sector-size. Associate each param-id (two files: .params and\n        // .vk) with one prompt item.\n        let mut ids_sorted = Vec::<String>::with_capacity(infos.len() / 2);\n        let mut items = Vec::<String>::with_capacity(infos.len() / 2);\n        let mut default_items: Vec<bool> = vec![];\n        infos\n            .iter()\n            .sorted_by_key(|info| info.sector_size)\n            .for_each(|info| {\n                if !ids_sorted.contains(&info.id) {\n                    let item = format!(\"{} ({})\", human_size(info.sector_size), info.id);\n                    items.push(item);\n                    ids_sorted.push(info.id.clone());\n                    default_items.push(DEFAULT_SECTOR_SIZES.contains(&info.sector_size));\n                }\n            });\n\n        let selected_ids: Vec<&String> = MultiSelect::with_theme(&ColorfulTheme::default())\n            .with_prompt(\"Select sizes to publish (press 'space' to select, 'return' to submit)\")\n            .items(&items[..])\n            .defaults(&default_items)\n            .interact()\n            .expect(\"interaction failed\")\n            .into_iter()\n            .map(|i| &ids_sorted[i])\n            .collect();\n\n        infos.retain(|info| selected_ids.contains(&&info.id));\n    }\n\n    let n_files_to_publish = infos.len();\n    if n_files_to_publish == 0 {\n        warn!(\"no params selected, exiting\");\n        exit(0);\n    }\n    trace!(\"{} files to publish\", n_files_to_publish);\n\n    // Publish files to ipfs.\n    let mut param_map: ParameterMap = BTreeMap::new();\n\n    for info in infos {\n        trace!(\"publishing file to ipfs: {}\", info.filename);\n        match publish_file(&cli.ipfs_bin, &info.filename) {\n            Ok(cid) => {\n                info!(\"successfully published file to ipfs, cid={}\", cid);\n                let digest =\n                    get_digest_for_file_within_cache(&info.filename).expect(\"failed to hash file\");\n                trace!(\"successfully hashed file: {}\", digest);\n                let param_data = ParameterData {\n                    cid,\n                    digest,\n                    sector_size: info.sector_size,\n                };\n                param_map.insert(info.filename, param_data);\n            }\n            Err(e) => {\n                error!(\"failed to publish file to ipfs:\\n{:?}\\nexiting\", e);\n                exit(1);\n            }\n        }\n    }\n    info!(\"finished publishing files\");\n\n    // Write parameters.json file containing published ipfs cid's.\n    if let Err(e) = write_param_map_to_disk(&param_map, &cli.json_path) {\n        error!(\"failed to write json file:\\n{:?}\\nexiting\", e);\n        exit(1);\n    }\n    info!(\"successfully wrote json file: {}\", cli.json_path);\n}\n"
  },
  {
    "path": "fil-proofs-param/src/lib.rs",
    "content": "#![deny(clippy::all, clippy::perf, clippy::correctness)]\n#![warn(clippy::unwrap_used)]\n"
  },
  {
    "path": "fil-proofs-param/tests/paramfetch/mod.rs",
    "content": "use std::collections::BTreeMap;\nuse std::fs::File;\nuse std::io::{self, BufReader, Write};\nuse std::path::PathBuf;\n\nuse blake2b_simd::State as Blake2b;\nuse failure::Error as FailureError;\nuse rand::{thread_rng, Rng};\nuse storage_proofs_core::parameter_cache::{ParameterData, ParameterMap};\n\nuse crate::support::tmp_manifest;\n\nmod session;\n\nuse session::ParamFetchSessionBuilder;\n\n/// Produce a random sequence of bytes and first 32 characters of hex encoded\n/// BLAKE2b checksum. This helper function must be kept up-to-date with the\n/// parampublish implementation.\nfn rand_bytes_with_blake2b() -> Result<(Vec<u8>, String), FailureError> {\n    let bytes = thread_rng().gen::<[u8; 32]>();\n\n    let mut hasher = Blake2b::new();\n\n    let mut as_slice = &bytes[..];\n\n    io::copy(&mut as_slice, &mut hasher)?;\n\n    Ok((\n        bytes.iter().cloned().collect(),\n        hasher.finalize().to_hex()[..32].into(),\n    ))\n}\n\n#[test]\nfn nothing_to_fetch_if_cache_fully_hydrated() -> Result<(), FailureError> {\n    let mut manifest: BTreeMap<String, ParameterData> = BTreeMap::new();\n\n    let (aaa_bytes, aaa_checksum) = rand_bytes_with_blake2b()?;\n    let mut aaa_bytes: &[u8] = &aaa_bytes;\n\n    // manifest entry checksum matches the BLAKE2b we compute locally\n    manifest.insert(\n        \"aaa.vk\".to_string(),\n        ParameterData {\n            cid: \"\".to_string(),\n            digest: aaa_checksum,\n            sector_size: 1234,\n        },\n    );\n\n    let manifest_pbuf = tmp_manifest(Some(manifest))?;\n\n    let mut session = ParamFetchSessionBuilder::new(Some(manifest_pbuf))\n        .with_session_timeout_ms(1000)\n        .with_file_and_bytes(\"aaa.vk\", &mut aaa_bytes)\n        .build();\n\n    session.exp_string(\"determining if file is out of date: aaa.vk\")?;\n    session.exp_string(\"file is up to date\")?;\n    session.exp_string(\"no outdated files, exiting\")?;\n\n    Ok(())\n}\n\n#[test]\nfn prompts_to_download_if_file_in_manifest_is_missing() -> Result<(), FailureError> {\n    let mut manifest: BTreeMap<String, ParameterData> = BTreeMap::new();\n\n    manifest.insert(\n        \"aaa.vk\".to_string(),\n        ParameterData {\n            cid: \"\".to_string(),\n            digest: \"\".to_string(),\n            sector_size: 1024,\n        },\n    );\n\n    let manifest_pbuf = tmp_manifest(Some(manifest))?;\n\n    let mut session = ParamFetchSessionBuilder::new(Some(manifest_pbuf))\n        .with_session_timeout_ms(1000)\n        .build();\n\n    session.exp_string(\"determining if file is out of date: aaa.vk\")?;\n    session.exp_string(\"file not found, marking for download\")?;\n    session.exp_string(\"Select files to be downloaded\")?;\n    session.exp_string(\"aaa.vk (1 KiB)\")?;\n\n    Ok(())\n}\n\n#[test]\nfn prompts_to_download_if_file_checksum_does_not_match_manifest() -> Result<(), FailureError> {\n    let mut manifest: BTreeMap<String, ParameterData> = BTreeMap::new();\n\n    let (aaa_bytes, _) = rand_bytes_with_blake2b()?;\n    let mut aaa_bytes: &[u8] = &aaa_bytes;\n\n    manifest.insert(\n        \"aaa.vk\".to_string(),\n        ParameterData {\n            cid: \"\".to_string(),\n            digest: \"obviouslywrong\".to_string(),\n            sector_size: 1024,\n        },\n    );\n\n    let manifest_pbuf = tmp_manifest(Some(manifest))?;\n\n    let mut session = ParamFetchSessionBuilder::new(Some(manifest_pbuf))\n        .with_session_timeout_ms(1000)\n        .with_file_and_bytes(\"aaa.vk\", &mut aaa_bytes)\n        .build();\n\n    session.exp_string(\"determining if file is out of date: aaa.vk\")?;\n    session.exp_string(\"params file found\")?;\n    session.exp_string(\"file has unexpected digest, marking for download\")?;\n    session.exp_string(\"Select files to be downloaded\")?;\n    session.exp_string(\"aaa.vk (1 KiB)\")?;\n\n    Ok(())\n}\n\n#[test]\nfn fetches_vk_even_if_sector_size_does_not_match() -> Result<(), FailureError> {\n    let mut manifest: BTreeMap<String, ParameterData> = BTreeMap::new();\n\n    manifest.insert(\n        \"aaa.params\".to_string(),\n        ParameterData {\n            cid: \"\".to_string(),\n            digest: \"\".to_string(),\n            sector_size: 1024,\n        },\n    );\n\n    manifest.insert(\n        \"aaa.vk\".to_string(),\n        ParameterData {\n            cid: \"\".to_string(),\n            digest: \"\".to_string(),\n            sector_size: 1024,\n        },\n    );\n\n    let manifest_pbuf = tmp_manifest(Some(manifest))?;\n\n    let mut session = ParamFetchSessionBuilder::new(Some(manifest_pbuf))\n        .with_session_timeout_ms(1000)\n        .whitelisted_sector_sizes(vec![\"6666\".to_string(), \"4444\".to_string()])\n        .build();\n\n    session.exp_string(\"json contains 2 files\")?;\n    session.exp_string(\"ignoring file: aaa.params (1 KiB)\")?;\n    session.exp_string(\"determining if file is out of date: aaa.vk\")?;\n    session.exp_string(\"file not found, marking for download\")?;\n\n    Ok(())\n}\n\n#[test]\nfn invalid_json_path_produces_error() -> Result<(), FailureError> {\n    let mut session = ParamFetchSessionBuilder::new(Some(PathBuf::from(\"/invalid/path\")))\n        .with_session_timeout_ms(1000)\n        .build();\n\n    session.exp_string(\"using json file: /invalid/path\")?;\n    session.exp_string(\"failed to open json file, exiting\")?;\n\n    Ok(())\n}\n\n#[test]\nfn invalid_json_produces_error() -> Result<(), FailureError> {\n    let manifest_pbuf = tmp_manifest(None)?;\n\n    let mut file = File::create(&manifest_pbuf)?;\n    file.write_all(b\"invalid json\")?;\n\n    let mut session = ParamFetchSessionBuilder::new(Some(manifest_pbuf))\n        .with_session_timeout_ms(1000)\n        .build();\n\n    session.exp_string(\"failed to parse json file, exiting\")?;\n\n    Ok(())\n}\n\n#[test]\nfn no_json_path_uses_default_manifest() -> Result<(), FailureError> {\n    let file = File::open(\"../parameters.json\")?;\n    let reader = BufReader::new(file);\n    let manifest: ParameterMap = serde_json::from_reader(reader)?;\n\n    let mut session = ParamFetchSessionBuilder::new(None)\n        .with_session_timeout_ms(1000)\n        .build();\n\n    session.exp_string(\"using built-in json\")?;\n\n    for parameter in manifest.keys() {\n        session.exp_string(&format!(\n            \"determining if file is out of date: {}\",\n            parameter\n        ))?;\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "fil-proofs-param/tests/paramfetch/session.rs",
    "content": "use std::fs::File;\nuse std::io::{self, Read};\nuse std::panic::panic_any;\nuse std::path::{Path, PathBuf};\n\nuse failure::SyncFailure;\nuse rexpect::session::PtyReplSession;\nuse tempfile::{tempdir, TempDir};\n\nuse crate::support::{cargo_bin, spawn_bash_with_retries};\n\npub struct ParamFetchSessionBuilder {\n    cache_dir: TempDir,\n    session_timeout_ms: u64,\n    whitelisted_sector_sizes: Option<Vec<String>>,\n    manifest: Option<PathBuf>,\n    prompt_enabled: bool,\n}\n\nimpl ParamFetchSessionBuilder {\n    pub fn new(manifest: Option<PathBuf>) -> ParamFetchSessionBuilder {\n        let temp_dir = tempdir().expect(\"could not create temp dir\");\n\n        ParamFetchSessionBuilder {\n            cache_dir: temp_dir,\n            session_timeout_ms: 1000,\n            manifest,\n            prompt_enabled: true,\n            whitelisted_sector_sizes: None,\n        }\n    }\n\n    /// Configure the pty timeout (see documentation for `rexpect::spawn_bash`).\n    pub fn with_session_timeout_ms(mut self, timeout_ms: u64) -> ParamFetchSessionBuilder {\n        self.session_timeout_ms = timeout_ms;\n        self\n    }\n\n    /// Configure the pty timeout (see documentation for `rexpect::spawn_bash`).\n    pub fn whitelisted_sector_sizes(\n        mut self,\n        sector_sizes: Vec<String>,\n    ) -> ParamFetchSessionBuilder {\n        self.whitelisted_sector_sizes = Some(sector_sizes);\n        self\n    }\n\n    /// Create a file with the provided bytes in the cache directory.\n    pub fn with_file_and_bytes<P: AsRef<Path>, R: Read>(\n        self,\n        filename: P,\n        r: &mut R,\n    ) -> ParamFetchSessionBuilder {\n        let mut pbuf = self.cache_dir.path().to_path_buf();\n        pbuf.push(filename.as_ref());\n\n        let mut file = File::create(&pbuf).expect(\"failed to create file in temp dir\");\n\n        io::copy(r, &mut file).expect(\"failed to copy bytes to file\");\n\n        self\n    }\n\n    /// Launch paramfetch in an environment configured by the builder.\n    pub fn build(self) -> ParamFetchSession {\n        let mut p = spawn_bash_with_retries(10, Some(self.session_timeout_ms))\n            .unwrap_or_else(|err| panic_any(err));\n\n        let cache_dir_path = format!(\"{:?}\", self.cache_dir.path());\n\n        let paramfetch_path = cargo_bin(\"paramfetch\");\n\n        let whitelist: String = self\n            .whitelisted_sector_sizes\n            .map(|wl| {\n                let mut s = \"--sector-sizes=\".to_string();\n                s.push_str(&wl.join(\",\"));\n                s\n            })\n            .unwrap_or_else(|| \"\".to_string());\n\n        let json_argument = if self.manifest.is_some() {\n            format!(\"--json={:?}\", self.manifest.expect(\"missing manifest\"))\n        } else {\n            \"\".to_string()\n        };\n\n        let cmd = format!(\n            \"{}={} {:?} {} {} {}\",\n            \"FIL_PROOFS_PARAMETER_CACHE\", // related to var name in core/src/settings.rs\n            cache_dir_path,\n            paramfetch_path,\n            if self.prompt_enabled { \"\" } else { \"--all\" },\n            json_argument,\n            whitelist,\n        );\n\n        p.execute(&cmd, \".*\").expect(\"could not execute paramfetch\");\n\n        ParamFetchSession {\n            pty_session: p,\n            _cache_dir: self.cache_dir,\n        }\n    }\n}\n\n/// An active pseudoterminal (pty) used to interact with paramfetch.\npub struct ParamFetchSession {\n    pty_session: PtyReplSession,\n    _cache_dir: TempDir,\n}\n\nimpl ParamFetchSession {\n    /// Block until provided string is seen on stdout from paramfetch and\n    /// return remaining output.\n    pub fn exp_string(\n        &mut self,\n        needle: &str,\n    ) -> Result<String, SyncFailure<rexpect::errors::Error>> {\n        self.pty_session\n            .exp_string(needle)\n            .map_err(SyncFailure::new)\n    }\n}\n"
  },
  {
    "path": "fil-proofs-param/tests/paramfetch/support/mod.rs",
    "content": "pub mod session;\n"
  },
  {
    "path": "fil-proofs-param/tests/parampublish/mod.rs",
    "content": "pub mod prompts_to_publish;\npub mod read_metadata_files;\npub mod support;\npub mod write_json_manifest;\n"
  },
  {
    "path": "fil-proofs-param/tests/parampublish/prompts_to_publish.rs",
    "content": "use failure::Error as FailureError;\nuse storage_proofs_core::parameter_cache::CacheEntryMetadata;\n\nuse crate::parampublish::support::session::ParamPublishSessionBuilder;\n\n#[test]\nfn ignores_files_unrecognized_extensions() -> Result<(), FailureError> {\n    let to_create = vec![\"v1-aaa.vk\", \"v1-aaa.params\", \"v1-bbb.txt\", \"ddd\"];\n\n    let (mut session, _) = ParamPublishSessionBuilder::new()\n        .with_session_timeout_ms(1000)\n        .with_files(&to_create)\n        .with_metadata(\"v1-aaa.meta\", &CacheEntryMetadata { sector_size: 1024 })\n        .list_all_files()\n        .build();\n\n    session.exp_string(\"found 3 param files in cache dir\")?;\n    session.exp_string(\"found 1 file triples\")?;\n    session.exp_string(\"Select files to publish\")?;\n    session.exp_string(\"v1-aaa.params (1 KiB)\")?;\n    session.exp_string(\"v1-aaa.vk (1 KiB)\")?;\n    session.send_line(\"\")?;\n    session.exp_string(\"no params selected, exiting\")?;\n\n    Ok(())\n}\n\n#[test]\nfn displays_sector_size_in_prompt() -> Result<(), FailureError> {\n    let to_create = vec![\"v1-aaa.vk\", \"v1-aaa.params\", \"v1-xxx.vk\", \"v1-xxx.params\"];\n\n    let (mut session, _) = ParamPublishSessionBuilder::new()\n        .with_session_timeout_ms(1000)\n        .with_files(&to_create)\n        .with_metadata(\"v1-aaa.meta\", &CacheEntryMetadata { sector_size: 2048 })\n        .with_metadata(\"v1-xxx.meta\", &CacheEntryMetadata { sector_size: 1024 })\n        .list_all_files()\n        .build();\n\n    session.exp_string(\"found 6 param files in cache dir\")?;\n    session.exp_string(\"found 2 file triples\")?;\n    session.exp_string(\"Select files to publish\")?;\n    session.exp_string(\"v1-xxx.params (1 KiB)\")?;\n    session.exp_string(\"v1-xxx.vk (1 KiB)\")?;\n    session.exp_string(\"v1-aaa.params (2 KiB)\")?;\n    session.exp_string(\"v1-aaa.vk (2 KiB)\")?;\n    session.send_line(\"\")?;\n    session.exp_string(\"no params selected, exiting\")?;\n\n    Ok(())\n}\n\n#[test]\nfn no_assets_no_prompt() -> Result<(), FailureError> {\n    let (mut session, _) = ParamPublishSessionBuilder::new()\n        .with_session_timeout_ms(1000)\n        .build();\n\n    session.exp_string(\"found 0 param files in cache dir\")?;\n    session.exp_string(\"no file triples found, exiting\")?;\n\n    Ok(())\n}\n"
  },
  {
    "path": "fil-proofs-param/tests/parampublish/read_metadata_files.rs",
    "content": "use failure::Error as FailureError;\n\nuse crate::parampublish::support::session::ParamPublishSessionBuilder;\n\n#[test]\nfn fails_if_missing_metadata_file() -> Result<(), FailureError> {\n    // missing the corresponding .meta file\n    let filenames = vec![\"v12-aaa.vk\", \"v12-aaa.params\"];\n\n    let (mut session, _) = ParamPublishSessionBuilder::new()\n        .with_session_timeout_ms(1000)\n        .with_files(&filenames)\n        .build();\n\n    session.exp_string(\"found 2 param files in cache dir\")?;\n    session.exp_string(\"no file triples found, exiting\")?;\n\n    Ok(())\n}\n\n#[test]\nfn fails_if_malformed_metadata_file() -> Result<(), FailureError> {\n    // A malformed v11-aaa.meta file.\n    let mut malformed: &[u8] = &[42];\n\n    let (mut session, _) = ParamPublishSessionBuilder::new()\n        .with_session_timeout_ms(1000)\n        .with_files(&[\"v11-aaa.vk\", \"v11-aaa.params\"])\n        .with_file_and_bytes(\"v11-aaa.meta\", &mut malformed)\n        .build();\n\n    session.exp_string(\"found 3 param files in cache dir\")?;\n    session.exp_string(\"found 1 file triples\")?;\n    session.exp_string(\"failed to parse .meta file\")?;\n    session.exp_string(\"exiting\")?;\n\n    Ok(())\n}\n"
  },
  {
    "path": "fil-proofs-param/tests/parampublish/support/mod.rs",
    "content": "pub mod session;\n"
  },
  {
    "path": "fil-proofs-param/tests/parampublish/support/session.rs",
    "content": "use std::fs::{read_dir, File};\nuse std::io::{self, Read, Write};\nuse std::panic::panic_any;\nuse std::path::{Path, PathBuf};\n\nuse failure::SyncFailure;\nuse rand::{thread_rng, Rng};\nuse rexpect::session::PtyReplSession;\nuse storage_proofs_core::parameter_cache::CacheEntryMetadata;\nuse tempfile::{tempdir, TempDir};\n\nuse crate::support::{cargo_bin, spawn_bash_with_retries, FakeIpfsBin};\n\npub struct ParamPublishSessionBuilder {\n    cache_dir: TempDir,\n    cached_file_pbufs: Vec<PathBuf>,\n    session_timeout_ms: u64,\n    manifest: PathBuf,\n    ipfs_bin_path: PathBuf,\n    list_all_files: bool,\n}\n\nimpl ParamPublishSessionBuilder {\n    pub fn new() -> ParamPublishSessionBuilder {\n        let temp_dir = tempdir().expect(\"could not create temp dir\");\n\n        let mut pbuf = temp_dir.path().to_path_buf();\n        pbuf.push(\"parameters.json\");\n\n        File::create(&pbuf).expect(\"failed to create file in temp dir\");\n\n        ParamPublishSessionBuilder {\n            cache_dir: temp_dir,\n            cached_file_pbufs: vec![],\n            session_timeout_ms: 1000,\n            manifest: pbuf,\n            ipfs_bin_path: cargo_bin(\"fakeipfsadd\"),\n            list_all_files: false,\n        }\n    }\n\n    /// Configure the path used by `parampublish` to add files to IPFS daemon.\n    pub fn with_ipfs_bin(mut self, ipfs_bin: &FakeIpfsBin) -> ParamPublishSessionBuilder {\n        let pbuf: PathBuf = PathBuf::from(&ipfs_bin.bin_path());\n        self.ipfs_bin_path = pbuf;\n        self\n    }\n\n    /// Create empty files with the given names in the cache directory.\n    pub fn with_files<P: AsRef<Path>>(self, filenames: &[P]) -> ParamPublishSessionBuilder {\n        filenames.iter().fold(self, |acc, item| acc.with_file(item))\n    }\n\n    /// Create a file containing 32 random bytes with the given name in the\n    /// cache directory.\n    pub fn with_file<P: AsRef<Path>>(mut self, filename: P) -> ParamPublishSessionBuilder {\n        let mut pbuf = self.cache_dir.path().to_path_buf();\n        pbuf.push(filename.as_ref());\n\n        let mut file = File::create(&pbuf).expect(\"failed to create file in temp dir\");\n\n        let random_bytes = thread_rng().gen::<[u8; 32]>();\n        file.write_all(&random_bytes)\n            .expect(\"failed to write bytes\");\n\n        self.cached_file_pbufs.push(pbuf);\n        self\n    }\n\n    /// Create a file with the provided bytes in the cache directory.\n    pub fn with_file_and_bytes<P: AsRef<Path>, R: Read>(\n        mut self,\n        filename: P,\n        r: &mut R,\n    ) -> ParamPublishSessionBuilder {\n        let mut pbuf = self.cache_dir.path().to_path_buf();\n        pbuf.push(filename.as_ref());\n\n        let mut file = File::create(&pbuf).expect(\"failed to create file in temp dir\");\n\n        io::copy(r, &mut file).expect(\"failed to copy bytes to file\");\n\n        self.cached_file_pbufs.push(pbuf);\n        self\n    }\n\n    /// Create a metadata file with the provided name in the cache directory.\n    pub fn with_metadata<P: AsRef<Path>>(\n        self,\n        filename: P,\n        meta: &CacheEntryMetadata,\n    ) -> ParamPublishSessionBuilder {\n        let mut meta_bytes: &[u8] = &serde_json::to_vec(meta)\n            .expect(\"failed to serialize CacheEntryMetadata to JSON byte array\");\n\n        self.with_file_and_bytes(filename, &mut meta_bytes)\n    }\n\n    /// Configure the pty timeout (see documentation for `rexpect::spawn_bash`).\n    pub fn with_session_timeout_ms(mut self, timeout_ms: u64) -> ParamPublishSessionBuilder {\n        self.session_timeout_ms = timeout_ms;\n        self\n    }\n\n    /// Prompts the user to filter by param version.\n    pub fn list_all_files(mut self) -> ParamPublishSessionBuilder {\n        self.list_all_files = true;\n        self\n    }\n\n    /// When publishing, write JSON manifest to provided path.\n    pub fn write_manifest_to(mut self, manifest_dest: PathBuf) -> ParamPublishSessionBuilder {\n        self.manifest = manifest_dest;\n        self\n    }\n\n    /// Launch parampublish in an environment configured by the builder.\n    pub fn build(self) -> (ParamPublishSession, Vec<PathBuf>) {\n        let mut p = spawn_bash_with_retries(10, Some(self.session_timeout_ms))\n            .unwrap_or_else(|err| panic_any(err));\n\n        let cache_dir_path = format!(\"{:?}\", self.cache_dir.path());\n\n        let cache_contents: Vec<PathBuf> = read_dir(&self.cache_dir)\n            .unwrap_or_else(|_| panic_any(format!(\"failed to read cache dir {:?}\", self.cache_dir)))\n            .map(|x| x.expect(\"failed to get dir entry\"))\n            .map(|x| x.path())\n            .collect();\n\n        let parampublish_path = cargo_bin(\"parampublish\");\n\n        let cmd = format!(\n            \"{}={} {:?} {} --ipfs-bin={:?} --json={:?}\",\n            \"FIL_PROOFS_PARAMETER_CACHE\", // related to var name in core/src/settings.rs\n            cache_dir_path,\n            parampublish_path,\n            if self.list_all_files { \"-a\" } else { \"\" },\n            self.ipfs_bin_path,\n            self.manifest\n        );\n\n        p.execute(&cmd, \".*\")\n            .expect(\"could not execute parampublish\");\n\n        (\n            ParamPublishSession {\n                pty_session: p,\n                _cache_dir: self.cache_dir,\n            },\n            cache_contents,\n        )\n    }\n}\n\n/// An active pseudoterminal (pty) used to interact with parampublish.\npub struct ParamPublishSession {\n    pty_session: PtyReplSession,\n    _cache_dir: TempDir,\n}\n\nimpl ParamPublishSession {\n    /// Send provided string and trailing newline to parampublish.\n    pub fn send_line(&mut self, line: &str) -> Result<usize, SyncFailure<rexpect::errors::Error>> {\n        self.pty_session.send_line(line).map_err(SyncFailure::new)\n    }\n\n    /// Block until provided string is seen on stdout from parampublish and\n    /// return remaining output.\n    pub fn exp_string(\n        &mut self,\n        needle: &str,\n    ) -> Result<String, SyncFailure<rexpect::errors::Error>> {\n        self.pty_session\n            .exp_string(needle)\n            .map_err(SyncFailure::new)\n    }\n}\n"
  },
  {
    "path": "fil-proofs-param/tests/parampublish/write_json_manifest.rs",
    "content": "use std::collections::BTreeMap;\nuse std::fs::File;\nuse std::path::Path;\n\nuse storage_proofs_core::parameter_cache::{CacheEntryMetadata, ParameterData};\n\nuse crate::{\n    parampublish::support::session::ParamPublishSessionBuilder,\n    support::{tmp_manifest, FakeIpfsBin},\n};\n\n#[test]\nfn writes_json_manifest() -> Result<(), failure::Error> {\n    let filenames = vec![\"v10-aaa.vk\", \"v10-aaa.params\"];\n\n    let manifest_path = tmp_manifest(None)?;\n\n    let ipfs = FakeIpfsBin::new();\n\n    let (mut session, files_in_cache) = ParamPublishSessionBuilder::new()\n        .with_session_timeout_ms(1000)\n        .with_files(&filenames)\n        .with_metadata(\"v10-aaa.meta\", &CacheEntryMetadata { sector_size: 1234 })\n        .write_manifest_to(manifest_path.clone())\n        .with_ipfs_bin(&ipfs)\n        .build();\n\n    // compute checksums from files added to cache to compare with\n    // manifest entries after publishing completes\n    let cache_checksums = filename_to_checksum(&ipfs, files_in_cache.as_ref());\n\n    session.exp_string(\"Select a version\")?;\n    // There is only one version of parameters, accept that one\n    session.send_line(\"\")?;\n    //session.exp_regex(\".*Select the sizes to publish.*\")?;\n    session.exp_string(\"Select sizes to publish\")?;\n    // There is only one size, accept that one\n    session.send_line(\" \")?;\n\n    // wait for confirmation...\n    session.exp_string(\"2 files to publish\")?;\n    session.exp_string(\"finished publishing files\")?;\n\n    // read the manifest file from disk and verify that it is well\n    // formed and contains the expected keys\n    let manifest_file = File::open(&manifest_path)?;\n    let manifest_map: BTreeMap<String, ParameterData> = serde_json::from_reader(manifest_file)?;\n\n    // ensure that each filename exists in the manifest and that its\n    // cid matches that which was produced from the `ipfs add` command\n    for filename in filenames.iter().cloned() {\n        if let (Some(m_entry), Some(expected)) =\n            (manifest_map.get(filename), cache_checksums.get(filename))\n        {\n            assert_eq!(\n                &m_entry.cid, expected,\n                \"manifest does not include digest produced by ipfs add for {}\",\n                filename\n            );\n        } else {\n            panic!(\"{} must be present in both manifest and cache\", filename);\n        }\n    }\n\n    Ok(())\n}\n\n/// Produce a map of filename (not path) to the checksum produced by the ipfs\n/// binary.\nfn filename_to_checksum<P: AsRef<Path>>(\n    ipfs_bin: &FakeIpfsBin,\n    paths: &[P],\n) -> BTreeMap<String, String> {\n    paths.iter().fold(BTreeMap::new(), |mut acc, item| {\n        acc.insert(\n            item.as_ref()\n                .file_name()\n                .and_then(|os_str| os_str.to_str())\n                .map(|s| s.to_string())\n                .unwrap_or_else(|| \"\".to_string()),\n            ipfs_bin\n                .compute_checksum(item)\n                .expect(\"failed to compute checksum\"),\n        );\n        acc\n    })\n}\n"
  },
  {
    "path": "fil-proofs-param/tests/suite.rs",
    "content": "mod paramfetch;\nmod parampublish;\nmod support;\n"
  },
  {
    "path": "fil-proofs-param/tests/support/mod.rs",
    "content": "use std::collections::BTreeMap;\nuse std::env;\nuse std::fs::File;\nuse std::path::{Path, PathBuf};\nuse std::process::Command;\nuse std::thread;\nuse std::time::Duration;\n\nuse failure::format_err;\nuse rexpect::{session::PtyReplSession, spawn_bash};\nuse storage_proofs_core::parameter_cache::ParameterData;\nuse tempfile::tempdir;\n\npub struct FakeIpfsBin {\n    bin_path: PathBuf,\n}\n\nimpl FakeIpfsBin {\n    pub fn new() -> FakeIpfsBin {\n        FakeIpfsBin {\n            bin_path: cargo_bin(\"fakeipfsadd\"),\n        }\n    }\n\n    pub fn compute_checksum<P: AsRef<Path>>(&self, path: P) -> Result<String, failure::Error> {\n        let output = Command::new(&self.bin_path)\n            .arg(\"add\")\n            .arg(\"-Q\")\n            .arg(path.as_ref())\n            .output()?;\n\n        if !output.status.success() {\n            Err(format_err!(\n                \"{:?} produced non-zero exit code\",\n                &self.bin_path\n            ))\n        } else {\n            Ok(String::from_utf8(output.stdout)?.trim().to_string())\n        }\n    }\n\n    pub fn bin_path(&self) -> &Path {\n        &self.bin_path\n    }\n}\n\n/// Get the path of the target directory.\npub fn target_dir() -> PathBuf {\n    env::current_exe()\n        .ok()\n        .map(|mut path| {\n            path.pop();\n            if path.ends_with(\"deps\") {\n                path.pop();\n            }\n            path\n        })\n        .expect(\"failed to get current exe path\")\n}\n\n/// Look up the path to a cargo-built binary within an integration test.\npub fn cargo_bin<S: AsRef<str>>(name: S) -> PathBuf {\n    target_dir().join(format!(\"{}{}\", name.as_ref(), env::consts::EXE_SUFFIX))\n}\n\n/// Spawn a pty and, if an error is produced, retry with linear backoff (to 5s).\npub fn spawn_bash_with_retries(\n    retries: u8,\n    timeout: Option<u64>,\n) -> Result<PtyReplSession, rexpect::errors::Error> {\n    let result = spawn_bash(timeout);\n    if result.is_ok() || retries == 0 {\n        result\n    } else {\n        let sleep_d = Duration::from_millis(5000 / u64::from(retries));\n        eprintln!(\n            \"failed to spawn pty: {} retries remaining - sleeping {:?}\",\n            retries, sleep_d\n        );\n        thread::sleep(sleep_d);\n        spawn_bash_with_retries(retries - 1, timeout)\n    }\n}\n\n/// Create a parameters.json manifest file in a temp directory and return its\n/// path.\npub fn tmp_manifest(\n    opt_manifest: Option<BTreeMap<String, ParameterData>>,\n) -> Result<PathBuf, failure::Error> {\n    let manifest_dir = tempdir()?;\n    let mut pbuf = manifest_dir.into_path();\n    pbuf.push(\"parameters.json\");\n\n    let mut file = File::create(&pbuf)?;\n    if let Some(map) = opt_manifest {\n        // JSON encode the manifest and write bytes to temp file\n        serde_json::to_writer(&mut file, &map)?;\n    }\n\n    Ok(pbuf)\n}\n"
  },
  {
    "path": "fil-proofs-tooling/.gitignore",
    "content": "/target\n**/*.rs.bk\nCargo.lock\n.criterion\n**/*.h\nheaptrack*\n.bencher\nlogging-toolkit\n*.profile\n*.heap\nrust-fil-proofs.config.toml\n"
  },
  {
    "path": "fil-proofs-tooling/Cargo.toml",
    "content": "[package]\nname = \"fil-proofs-tooling\"\ndescription = \"Tooling for rust-fil-proofs\"\nversion = \"6.0.1\"\nauthors = [\"dignifiedquire <dignifiedquire@gmail.com>\"]\nlicense = \"MIT OR Apache-2.0\"\npublish = false\nedition = \"2018\"\nrepository = \"https://github.com/filecoin-project/rust-fil-proofs\"\nreadme = \"README.md\"\n\n[dependencies]\nstorage-proofs-core = { path = \"../storage-proofs-core\", version = \"^7.0.0\", default-features = false}\nstorage-proofs-porep = { path = \"../storage-proofs-porep\", version = \"^7.0.0\", default-features = false }\nstorage-proofs-post = { path = \"../storage-proofs-post\", version = \"^7.0.0\", default-features = false }\nfilecoin-proofs = { path = \"../filecoin-proofs\", default-features = false }\nfilecoin-hashers = { path = \"../filecoin-hashers\", default-features = false, features = [\"poseidon\", \"blake2s\", \"sha256\"] }\nclap = \"2\"\nserde = { version = \"1.0\", features = [\"derive\"] }\nserde_json = \"1.0\"\ntoml = \"0.5\"\nlazy_static = \"1.2\"\nglob = \"0.3\"\nregex = \"1.3.7\"\ncommandspec = \"0.12.2\"\nchrono = { version = \"0.4.7\", features = [\"serde\"] }\nmemmap = \"0.7.0\"\nbellperson = { version = \"0.13\", default-features = false }\nrand = \"0.7\"\ntempfile = \"3.0.8\"\ncpu-time = \"1.0.0\"\ngit2 = \"0.13.6\"\nheim = { git = \"https://github.com/heim-rs/heim\", rev = \"e22e235\", features = [\"host\", \"memory\", \"cpu\"] }\nasync-std = \"1.6\"\nblake2s_simd = \"0.5.6\"\nfil_logger = \"0.1\"\nlog = \"0.4.8\"\nuom = \"0.30\"\nmerkletree = \"0.21.0\"\nbincode = \"1.1.2\"\nanyhow = \"1.0.23\"\nff = { version = \"0.2.3\", package = \"fff\" }\nrand_xorshift = \"0.2.0\"\nbytefmt = \"0.1.7\"\nrayon = \"1.3.0\"\nflexi_logger = \"0.16.1\"\ntypenum = \"1.11.2\"\ngeneric-array = \"0.14.4\"\nbyte-unit = \"4.0.9\"\nfdlimit = \"0.2.0\"\ndialoguer = \"0.8.0\"\nstructopt = \"0.3.12\"\nhumansize = \"1.1.0\"\n\n[features]\ndefault = [\"gpu\", \"measurements\", \"pairing\"]\ngpu = [\n    \"storage-proofs-core/gpu\",\n    \"storage-proofs-porep/gpu\",\n    \"storage-proofs-post/gpu\",\n    \"filecoin-proofs/gpu\",\n    \"bellperson/gpu\",\n    \"filecoin-hashers/gpu\",\n]\nmeasurements = [\"storage-proofs-core/measurements\"]\nprofile = [\"storage-proofs-core/profile\", \"measurements\"]\npairing = [\n    \"storage-proofs-core/pairing\",\n    \"storage-proofs-porep/pairing\",\n    \"storage-proofs-post/pairing\",\n    \"filecoin-proofs/pairing\",\n    \"bellperson/pairing\",\n    \"filecoin-hashers/pairing\",\n]\nblst = [\n    \"storage-proofs-core/blst\",\n    \"storage-proofs-porep/blst\",\n    \"storage-proofs-post/blst\",\n    \"filecoin-proofs/blst\",\n    \"bellperson/blst\",\n    \"filecoin-hashers/blst\",\n]\n\n[target.'cfg(target_arch = \"x86_64\")'.dependencies]\nraw-cpuid = \"8.1.2\"\n"
  },
  {
    "path": "fil-proofs-tooling/LICENSE-APACHE",
    "content": "                         Copyright (c) 2018 Filecoin Project\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n"
  },
  {
    "path": "fil-proofs-tooling/LICENSE-MIT",
    "content": "Permission is hereby granted, free of charge, to any\nperson obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the\nSoftware without restriction, including without\nlimitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice\nshall be included in all copies or substantial portions\nof the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\nANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\nSHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\nIN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE."
  },
  {
    "path": "fil-proofs-tooling/README.md",
    "content": "worlddatabase\n"
  },
  {
    "path": "fil-proofs-tooling/release.toml",
    "content": "disable-push = true\ndisable-publish = true\ndisable-tag = true\nno-dev-version = true\n"
  },
  {
    "path": "fil-proofs-tooling/scripts/aggregate-benchmarks.sh",
    "content": "#!/usr/bin/env bash\n\nset -e\n\nstacked_path=$1\nmicro_path=$2\nhash_constraints_path=$3\nwindow_post_path=$4\n\njq --sort-keys -s '{ benchmarks: { \"stacked-benchmarks\": { outputs: { \"max-resident-set-size-kb\": .[0] } } } } * .[1]' \\\n  <(jq '.[\"max-resident-set-size-kb\"]' $stacked_path) \\\n  <(jq -s '.[0] * { benchmarks: { \"hash-constraints\": .[1], \"stacked-benchmarks\": .[2], \"micro-benchmarks\": .[3], \"window-post-benchmarks\": .[4] } }' \\\n    <(jq 'del (.benchmarks)' $micro_path) \\\n    <(jq '.benchmarks' $hash_constraints_path) \\\n    <(jq '.benchmarks' $stacked_path) \\\n    <(jq '.benchmarks' $micro_path) \\\n    <(jq '.benchmarks' $window_post_path))\n"
  },
  {
    "path": "fil-proofs-tooling/scripts/benchy.sh",
    "content": "#!/usr/bin/env bash\n\nwhich jq >/dev/null || { printf '%s\\n' \"error: jq\" >&2; exit 1; }\n\nBENCHY_STDOUT=$(mktemp)\nGTIME_STDERR=$(mktemp)\nJQ_STDERR=$(mktemp)\n\nGTIME_BIN=\"env time\"\nGTIME_ARG=\"-f '{ \\\"max-resident-set-size-kb\\\": %M }' cargo run --quiet --bin benchy --release -- ${@}\"\n\nif [[ $(env time --version 2>&1) != *\"GNU\"* ]]; then\n    if [[ $(/usr/bin/time --version 2>&1) != *\"GNU\"* ]]; then\n        if [[ $(env gtime --version 2>&1) != *\"GNU\"* ]]; then\n            printf '%s\\n' \"error: GNU time not installed\" >&2\n            exit 1\n        else\n            GTIME_BIN=\"gtime\"\n        fi\n    else\n        GTIME_BIN=\"/usr/bin/time\"\n    fi\nfi\n\nCMD=\"${GTIME_BIN} ${GTIME_ARG}\"\n\neval \"RUST_BACKTRACE=1 RUSTFLAGS=\\\"-Awarnings -C target-cpu=native\\\" ${CMD}\" > $BENCHY_STDOUT 2> $GTIME_STDERR\n\nGTIME_EXIT_CODE=$?\n\njq -s '.[0] * .[1]' $BENCHY_STDOUT $GTIME_STDERR 2> $JQ_STDERR\n\nJQ_EXIT_CODE=$?\n\nif [[ ! $GTIME_EXIT_CODE -eq 0 || ! $JQ_EXIT_CODE -eq 0 ]]; then\n    >&2 echo \"*********************************************\"\n    >&2 echo \"* benchy failed - dumping debug information *\"\n    >&2 echo \"*********************************************\"\n    >&2 echo \"\"\n    >&2 echo \"<COMMAND>\"\n    >&2 echo \"${CMD}\"\n    >&2 echo \"</COMMAND>\"\n    >&2 echo \"\"\n    >&2 echo \"<GTIME_STDERR>\"\n    >&2 echo \"$(cat $GTIME_STDERR)\"\n    >&2 echo \"</GTIME_STDERR>\"\n    >&2 echo \"\"\n    >&2 echo \"<BENCHY_STDOUT>\"\n    >&2 echo \"$(cat $BENCHY_STDOUT)\"\n    >&2 echo \"</BENCHY_STDOUT>\"\n    >&2 echo \"\"\n    >&2 echo \"<JQ_STDERR>\"\n    >&2 echo \"$(cat $JQ_STDERR)\"\n    >&2 echo \"</JQ_STDERR>\"\n    exit 1\nfi\n"
  },
  {
    "path": "fil-proofs-tooling/scripts/micro.sh",
    "content": "#!/usr/bin/env bash\n\nMICRO_SDERR=$(mktemp)\nMICRO_SDOUT=$(mktemp)\nJQ_STDERR=$(mktemp)\n\nCMD=\"cargo run --bin micro --release ${@}\"\n\neval \"RUST_BACKTRACE=1 RUSTFLAGS=\\\"-Awarnings -C target-cpu=native\\\" ${CMD}\" 1> $MICRO_SDOUT 2> $MICRO_SDERR\n\nMICRO_EXIT_CODE=$?\n\ncat $MICRO_SDOUT | jq '.' 2> $JQ_STDERR\n\nJQ_EXIT_CODE=$?\n\nif [[ ! $MICRO_EXIT_CODE -eq 0 || ! $JQ_EXIT_CODE -eq 0 ]]; then\n    >&2 echo \"********************************************\"\n    >&2 echo \"* micro failed - dumping debug information *\"\n    >&2 echo \"********************************************\"\n    >&2 echo \"\"\n    >&2 echo \"<COMMAND>\"\n    >&2 echo \"${CMD}\"\n    >&2 echo \"</COMMAND>\"\n    >&2 echo \"\"\n    >&2 echo \"<MICRO_SDERR>\"\n    >&2 echo \"$(cat $MICRO_SDERR)\"\n    >&2 echo \"</MICRO_SDERR>\"\n    >&2 echo \"\"\n    >&2 echo \"<MICRO_SDOUT>\"\n    >&2 echo \"$(cat $MICRO_SDOUT)\"\n    >&2 echo \"</MICRO_SDOUT>\"\n    >&2 echo \"\"\n    >&2 echo \"<JQ_STDERR>\"\n    >&2 echo \"$(cat $JQ_STDERR)\"\n    >&2 echo \"</JQ_STDERR>\"\n    exit 1\nfi\n"
  },
  {
    "path": "fil-proofs-tooling/scripts/retry.sh",
    "content": "#!/usr/bin/env bash\n\n# Inspired by https://gist.github.com/reacocard/28611bfaa2395072119464521d48729a\n\nset -o errexit\nset -o nounset\nset -o pipefail\n\n# Retry a command on a particular exit code, up to a max number of attempts,\n# with exponential backoff.\n# Invocation:\n#   err_retry exit_code attempts sleep_multiplier <command>\n# exit_code: The exit code to retry on.\n# attempts: The number of attempts to make.\n# sleep_millis: Multiplier for sleep between attempts. Examples:\n#     If multiplier is 1000, sleep intervals are 1, 4, 9, 16, etc. seconds.\n#     If multiplier is 5000, sleep intervals are 5, 20, 45, 80, 125, etc. seconds.\n\nexit_code=$1\nattempts=$2\nsleep_millis=$3\nshift 3\n\nfor attempt in `seq 1 $attempts`; do\n    # This weird construction lets us capture return codes under -o errexit\n    \"$@\" && rc=$? || rc=$?\n\n    if [[ ! $rc -eq $exit_code ]]; then\n        exit $rc\n    fi\n\n    if [[ $attempt -eq $attempts ]]; then\n        exit $rc\n    fi\n\n    sleep_ms=\"$(($attempt * $attempt * $sleep_millis))\"\n\n    sleep_seconds=$(echo \"scale=2; ${sleep_ms}/1000\" | bc)\n\n    (>&2 echo \"sleeping ${sleep_seconds}s and then retrying ($((attempt + 1))/${attempts})\")\n\n    sleep \"${sleep_seconds}\"\ndone\n"
  },
  {
    "path": "fil-proofs-tooling/scripts/run-remote.sh",
    "content": "#!/usr/bin/env bash\n\nCMDS=$(cat <<EOF\n\nset -e\n\n# Creates a temporary directory in which we build rust-fil-proofs and capture\n# performance metrics. The name of the directory (today's UTC seconds plus 24\n# hours) serves as a cleanup mechanism; before metrics are captured, any expired\n# directories are removed.\n\n_one_day_from_now=\\$((\\$(date +%s) + 86400))\n_metrics_dir=/tmp/metrics/\\$_one_day_from_now\n\n# Find and prune any stale metrics directories.\nfind /tmp/metrics/ -maxdepth 1 -mindepth 1 -type d -printf \"%f\\n\" \\\n    | xargs -I {} bash -c 'if (({} < \\$(date +%s))) ; then rm -rf /tmp/metrics/{} ; fi' 2> /dev/null\n\ngit clone -b $1 --single-branch https://github.com/filecoin-project/rust-fil-proofs.git \\$_metrics_dir || true\n\ncd \\$_metrics_dir\n\n./fil-proofs-tooling/scripts/retry.sh 42 10 60000 \\\n    ./fil-proofs-tooling/scripts/with-lock.sh 42 /tmp/metrics.lock \\\n    ./fil-proofs-tooling/scripts/with-dots.sh \\\n    ${@:3}\nEOF\n)\n\nssh -q $2 \"$CMDS\"\n"
  },
  {
    "path": "fil-proofs-tooling/scripts/with-dots.sh",
    "content": "#!/usr/bin/env bash\n\ntrap cleanup EXIT\n\ncleanup() {\n  kill $DOT_PID\n}\n\n(\n  sleep 1\n  while true; do\n    (printf \".\" >&2)\n    sleep 1\n  done\n) &\nDOT_PID=$!\n\n$@\n"
  },
  {
    "path": "fil-proofs-tooling/scripts/with-lock.sh",
    "content": "#!/usr/bin/env bash\n\n# Inspired by http://mywiki.wooledge.org/BashFAQ/045\n\nfailure_code=$1\nlockdir=$2\nshift 2\n\n# Check to make sure that the process which owns the lock, if one exists, is\n# still alive. If the process is not alive, release the lock.\nfor lockdir_pid in $(find \"$lockdir\" -type f -exec basename {} \\; 2> /dev/null)\ndo\n    if ! ps -p \"${lockdir_pid}\" > /dev/null\n    then\n        (>&2 echo \"cleaning up leaked lock (pid=${lockdir_pid}, path=${lockdir})\")\n        rm -rf \"${lockdir}\"\n    fi\ndone\n\nif mkdir \"$lockdir\" > /dev/null 2>&1\nthen\n    (>&2 echo \"successfully acquired lock (pid=$$, path=${lockdir})\")\n\n    # Create a file to track the process id that acquired the lock. This\n    # is used to prevent leaks if the lock isn't relinquished correctly.\n    touch \"$lockdir/$$\"\n\n    # Unlock (by removing dir and pid file) when the script finishes.\n    trap '(>&2 echo \"relinquishing lock (${lockdir})\"); rm -rf \"$lockdir\"' EXIT\n\n    # Execute command\n    \"$@\"\nelse\n    (>&2 echo \"failed to acquire lock (path=${lockdir})\")\n    exit \"$failure_code\"\nfi\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/benchy/hash_fns.rs",
    "content": "use bellperson::bls::Bls12;\nuse bellperson::gadgets::boolean::Boolean;\nuse bellperson::util_cs::test_cs::TestConstraintSystem;\nuse bellperson::ConstraintSystem;\nuse fil_proofs_tooling::metadata::Metadata;\nuse rand::RngCore;\nuse serde::Serialize;\nuse storage_proofs_core::util::{bits_to_bytes, bytes_into_boolean_vec, bytes_into_boolean_vec_be};\n\nfn blake2s_count(bytes: usize) -> anyhow::Result<Report> {\n    let rng = &mut rand::thread_rng();\n\n    let mut cs = TestConstraintSystem::<Bls12>::new();\n    let mut data = vec![0u8; bytes];\n    rng.fill_bytes(&mut data);\n\n    let data_bits: Vec<Boolean> = {\n        let mut cs = cs.namespace(|| \"data\");\n        bytes_into_boolean_vec(&mut cs, Some(data.as_slice()), data.len())\n            .expect(\"failed to convert to boolean vector\")\n    };\n\n    let personalization = vec![0u8; 8];\n    let out: Vec<bool> =\n        bellperson::gadgets::blake2s::blake2s(&mut cs, &data_bits, &personalization)?\n            .into_iter()\n            .map(|b| b.get_value().expect(\"failed to get bool value\"))\n            .collect();\n\n    assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n    let expected = blake2s_simd::blake2s(&data);\n    assert_eq!(\n        expected.as_ref(),\n        &bits_to_bytes(&out[..])[..],\n        \"circuit and non circuit do not match\"\n    );\n\n    Ok(Report {\n        hash_fn: \"blake2s\".into(),\n        bytes,\n        constraints: cs.num_constraints(),\n    })\n}\n\nfn sha256_count(bytes: usize) -> anyhow::Result<Report> {\n    let mut rng = rand::thread_rng();\n\n    let mut cs = TestConstraintSystem::<Bls12>::new();\n    let mut data = vec![0u8; bytes];\n    rng.fill_bytes(&mut data);\n\n    let data_bits: Vec<Boolean> = {\n        let mut cs = cs.namespace(|| \"data\");\n        bytes_into_boolean_vec_be(&mut cs, Some(data.as_slice()), data.len())\n            .expect(\"failed to convert bytes into boolean vector big endian\")\n    };\n\n    let _out: Vec<bool> = bellperson::gadgets::sha256::sha256(&mut cs, &data_bits)?\n        .into_iter()\n        .map(|b| b.get_value().expect(\"failed to get bool value\"))\n        .collect();\n\n    assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n    Ok(Report {\n        hash_fn: \"sha256\".into(),\n        bytes,\n        constraints: cs.num_constraints(),\n    })\n}\n\n#[derive(Serialize)]\n#[serde(rename_all = \"kebab-case\")]\nstruct Report {\n    hash_fn: String,\n    constraints: usize,\n    bytes: usize,\n}\n\npub fn run() -> anyhow::Result<()> {\n    let reports = vec![\n        blake2s_count(32)?,\n        blake2s_count(64)?,\n        blake2s_count(128)?,\n        blake2s_count(256)?,\n        sha256_count(32)?,\n        sha256_count(64)?,\n        sha256_count(128)?,\n        sha256_count(256)?,\n    ];\n\n    // print reports\n    let wrapped = Metadata::wrap(reports)?;\n    serde_json::to_writer(std::io::stdout(), &wrapped)?;\n\n    Ok(())\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/benchy/main.rs",
    "content": "//requires nightly, or later stable version\n//#![warn(clippy::unwrap_used)]\n\nuse std::io::{stdin, stdout};\nuse std::str::FromStr;\n\nuse anyhow::Result;\nuse byte_unit::Byte;\nuse clap::{value_t, App, Arg, SubCommand};\n\nuse storage_proofs_core::api_version::ApiVersion;\n\nuse crate::prodbench::ProdbenchInputs;\n\nmod hash_fns;\nmod merkleproofs;\nmod prodbench;\nmod window_post;\nmod winning_post;\n\nfn main() -> Result<()> {\n    fil_logger::init();\n\n    let window_post_cmd = SubCommand::with_name(\"window-post\")\n        .about(\"Benchmark Window PoST\")\n        .arg(\n            Arg::with_name(\"preserve-cache\")\n                .long(\"preserve-cache\")\n                .required(false)\n                .help(\"Preserve the directory where cached files are persisted\")\n                .takes_value(false),\n        )\n        .arg(\n            Arg::with_name(\"skip-precommit-phase1\")\n                .long(\"skip-precommit-phase1\")\n                .required(false)\n                .help(\"Skip precommit phase 1\")\n                .takes_value(false),\n        )\n        .arg(\n            Arg::with_name(\"skip-precommit-phase2\")\n                .long(\"skip-precommit-phase2\")\n                .required(false)\n                .help(\"Skip precommit phase 2\")\n                .takes_value(false),\n        )\n        .arg(\n            Arg::with_name(\"skip-commit-phase1\")\n                .long(\"skip-commit-phase1\")\n                .required(false)\n                .help(\"Skip commit phase 1\")\n                .takes_value(false),\n        )\n        .arg(\n            Arg::with_name(\"skip-commit-phase2\")\n                .long(\"skip-commit-phase2\")\n                .required(false)\n                .help(\"Skip commit phase 2\")\n                .takes_value(false),\n        )\n        .arg(\n            Arg::with_name(\"test-resume\")\n                .long(\"test-resume\")\n                .required(false)\n                .help(\"Test replication resume\")\n                .takes_value(false),\n        )\n        .arg(\n            Arg::with_name(\"cache\")\n                .long(\"cache\")\n                .required(false)\n                .help(\"The directory where cached files are persisted\")\n                .default_value(\"\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"size\")\n                .long(\"size\")\n                .required(true)\n                .help(\"The data size (e.g. 2KiB)\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"api_version\")\n                .long(\"api-version\")\n                .required(true)\n                .help(\"The api_version to use (default: 1.0.0)\")\n                .default_value(\"1.0.0\")\n                .takes_value(true),\n        );\n\n    let winning_post_cmd = SubCommand::with_name(\"winning-post\")\n        .about(\"Benchmark Winning PoST\")\n        .arg(\n            Arg::with_name(\"size\")\n                .long(\"size\")\n                .required(true)\n                .help(\"The data size (e.g. 2KiB)\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"api_version\")\n                .long(\"api-version\")\n                .required(true)\n                .help(\"The api_version to use (default: 1.0.0)\")\n                .default_value(\"1.0.0\")\n                .takes_value(true),\n        );\n\n    let hash_cmd = SubCommand::with_name(\"hash-constraints\")\n        .about(\"Benchmark hash function inside of a circuit\");\n\n    let prodbench_cmd = SubCommand::with_name(\"prodbench\")\n        .about(\"Benchmark prodbench\")\n        .arg(\n            Arg::with_name(\"config\")\n                .long(\"config\")\n                .takes_value(true)\n                .required(false)\n                .help(\"path to config.json\"),\n        )\n        .arg(\n            Arg::with_name(\"skip-seal-proof\")\n                .long(\"skip-seal-proof\")\n                .takes_value(false)\n                .help(\"skip generation (and verification) of seal proof\"),\n        )\n        .arg(\n            Arg::with_name(\"skip-post-proof\")\n                .long(\"skip-post-proof\")\n                .takes_value(false)\n                .help(\"skip generation (and verification) of PoSt proof\"),\n        )\n        .arg(\n            Arg::with_name(\"only-replicate\")\n                .long(\"only-replicate\")\n                .takes_value(false)\n                .help(\"only run replication\"),\n        )\n        .arg(\n            Arg::with_name(\"only-add-piece\")\n                .long(\"only-add-piece\")\n                .takes_value(false)\n                .help(\"only run piece addition\"),\n        );\n\n    let merkleproof_cmd = SubCommand::with_name(\"merkleproofs\")\n        .about(\"Benchmark merkle proof generation\")\n        .arg(\n            Arg::with_name(\"size\")\n                .long(\"size\")\n                .required(true)\n                .help(\"The data size (e.g. 2KiB)\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"proofs\")\n                .long(\"proofs\")\n                .default_value(\"1024\")\n                .required(false)\n                .help(\"How many proofs to generate (default is 1024)\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"validate\")\n                .long(\"validate\")\n                .required(false)\n                .default_value(\"true\")\n                .help(\"Validate proofs if specified\")\n                .takes_value(false),\n        );\n\n    let matches = App::new(\"benchy\")\n        .version(\"0.1\")\n        .subcommand(window_post_cmd)\n        .subcommand(winning_post_cmd)\n        .subcommand(hash_cmd)\n        .subcommand(prodbench_cmd)\n        .subcommand(merkleproof_cmd)\n        .get_matches();\n\n    match matches.subcommand() {\n        (\"window-post\", Some(m)) => {\n            let preserve_cache = m.is_present(\"preserve-cache\");\n            // For now these options are combined.\n            let skip_precommit_phase1 = m.is_present(\"skip-precommit-phase1\");\n            let skip_precommit_phase2 = m.is_present(\"skip-precommit-phase2\");\n            let skip_commit_phase1 = m.is_present(\"skip-commit-phase1\");\n            let skip_commit_phase2 = m.is_present(\"skip-commit-phase2\");\n            let test_resume = m.is_present(\"test-resume\");\n            let cache_dir = value_t!(m, \"cache\", String)?;\n            let sector_size = Byte::from_str(value_t!(m, \"size\", String)?)?.get_bytes() as usize;\n            let api_version = ApiVersion::from_str(&value_t!(m, \"api_version\", String)?)?;\n            window_post::run(\n                sector_size,\n                api_version,\n                cache_dir,\n                preserve_cache,\n                skip_precommit_phase1,\n                skip_precommit_phase2,\n                skip_commit_phase1,\n                skip_commit_phase2,\n                test_resume,\n            )?;\n        }\n        (\"winning-post\", Some(m)) => {\n            let sector_size = Byte::from_str(value_t!(m, \"size\", String)?)?.get_bytes() as usize;\n            let api_version = ApiVersion::from_str(&value_t!(m, \"api_version\", String)?)?;\n            winning_post::run(sector_size, api_version)?;\n        }\n        (\"hash-constraints\", Some(_m)) => {\n            hash_fns::run()?;\n        }\n        (\"merkleproofs\", Some(m)) => {\n            let size = Byte::from_str(value_t!(m, \"size\", String)?)?.get_bytes() as usize;\n\n            let proofs = value_t!(m, \"proofs\", usize)?;\n            merkleproofs::run(size, proofs, m.is_present(\"validate\"))?;\n        }\n        (\"prodbench\", Some(m)) => {\n            let inputs: ProdbenchInputs = if m.is_present(\"config\") {\n                let file = value_t!(m, \"config\", String).expect(\"failed to get config\");\n                serde_json::from_reader(\n                    std::fs::File::open(&file)\n                        .unwrap_or_else(|_| panic!(\"invalid file {:?}\", file)),\n                )\n            } else {\n                serde_json::from_reader(stdin())\n            }\n            .expect(\"failed to deserialize stdin to ProdbenchInputs\");\n\n            let outputs = prodbench::run(\n                inputs,\n                m.is_present(\"skip-seal-proof\"),\n                m.is_present(\"skip-post-proof\"),\n                m.is_present(\"only-replicate\"),\n                m.is_present(\"only-add-piece\"),\n            );\n\n            serde_json::to_writer(stdout(), &outputs)\n                .expect(\"failed to write ProdbenchOutput to stdout\")\n        }\n        _ => panic!(\"carnation\"),\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/benchy/merkleproofs.rs",
    "content": "use std::fs::{create_dir, remove_dir_all};\nuse std::time::{SystemTime, UNIX_EPOCH};\n\nuse anyhow::Result;\nuse filecoin_hashers::Hasher;\nuse filecoin_proofs::with_shape;\nuse log::{debug, info};\nuse rand::{thread_rng, Rng};\nuse storage_proofs_core::merkle::{\n    generate_tree, get_base_tree_count, MerkleProofTrait, MerkleTreeTrait, MerkleTreeWrapper,\n};\nuse storage_proofs_core::util::default_rows_to_discard;\nuse typenum::Unsigned;\n\nfn generate_proofs<R: Rng, Tree: MerkleTreeTrait>(\n    rng: &mut R,\n    tree: &MerkleTreeWrapper<\n        <Tree as MerkleTreeTrait>::Hasher,\n        <Tree as MerkleTreeTrait>::Store,\n        <Tree as MerkleTreeTrait>::Arity,\n        <Tree as MerkleTreeTrait>::SubTreeArity,\n        <Tree as MerkleTreeTrait>::TopTreeArity,\n    >,\n    base_tree_nodes: usize,\n    nodes: usize,\n    proofs_count: usize,\n    validate: bool,\n) -> Result<()> {\n    let proofs_count = if proofs_count >= nodes {\n        info!(\n            \"requested {} proofs, but instead challenging all {} nodes sequentially\",\n            proofs_count, nodes\n        );\n\n        nodes\n    } else {\n        proofs_count\n    };\n\n    info!(\n        \"creating {} inclusion proofs over {} nodes (validate enabled? {})\",\n        proofs_count, nodes, validate\n    );\n\n    let rows_to_discard = default_rows_to_discard(\n        base_tree_nodes,\n        <Tree as MerkleTreeTrait>::Arity::to_usize(),\n    );\n    for i in 0..proofs_count {\n        let challenge = if proofs_count == nodes {\n            i\n        } else {\n            rng.gen_range(0, nodes)\n        };\n        debug!(\"challenge[{}] = {}\", i, challenge);\n        let proof = tree\n            .gen_cached_proof(challenge, Some(rows_to_discard))\n            .expect(\"failed to generate proof\");\n        if validate {\n            assert!(proof.validate(challenge));\n        }\n    }\n\n    Ok(())\n}\n\npub fn run_merkleproofs_bench<Tree: 'static + MerkleTreeTrait>(\n    size: usize,\n    proofs_count: usize,\n    validate: bool,\n) -> Result<()> {\n    let tree_count = get_base_tree_count::<Tree>();\n    let base_tree_leaves =\n        size / std::mem::size_of::<<Tree::Hasher as Hasher>::Domain>() / tree_count;\n\n    let timestamp = SystemTime::now().duration_since(UNIX_EPOCH)?.as_millis();\n    let temp_path = std::env::temp_dir().join(format!(\"merkle-proof-bench-{}\", timestamp));\n    create_dir(&temp_path)?;\n\n    let mut rng = thread_rng();\n    info!(\n        \"generating merkle tree for sector size {} [base_tree_leaves {}, tree_count {}]\",\n        size, base_tree_leaves, tree_count\n    );\n    let (_data, tree) = generate_tree::<Tree, _>(\n        &mut rng,\n        base_tree_leaves * tree_count,\n        Some(temp_path.clone()),\n    );\n    generate_proofs::<_, Tree>(\n        &mut rng,\n        &tree,\n        base_tree_leaves,\n        base_tree_leaves * tree_count,\n        proofs_count,\n        validate,\n    )?;\n\n    remove_dir_all(&temp_path)?;\n\n    Ok(())\n}\n\npub fn run(size: usize, proofs_count: usize, validate: bool) -> Result<()> {\n    with_shape!(\n        size as u64,\n        run_merkleproofs_bench,\n        size,\n        proofs_count,\n        validate\n    )\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/benchy/prodbench.rs",
    "content": "use std::fs::remove_file;\nuse std::str::FromStr;\n\nuse bellperson::{bls::Bls12, util_cs::bench_cs::BenchCS, Circuit};\nuse fil_proofs_tooling::{\n    measure,\n    shared::{create_replicas, PROVER_ID, RANDOMNESS, TICKET_BYTES},\n    Metadata,\n};\nuse filecoin_hashers::sha256::Sha256Hasher;\nuse filecoin_proofs::{\n    clear_cache, parameters::public_params, seal_commit_phase1, seal_commit_phase2,\n    validate_cache_for_commit, DefaultOctLCTree, DefaultOctTree, PaddedBytesAmount, PoRepConfig,\n    PoRepProofPartitions, SectorSize, DRG_DEGREE, EXP_DEGREE, LAYERS, POREP_MINIMUM_CHALLENGES,\n    POREP_PARTITIONS,\n};\nuse log::info;\nuse rand::SeedableRng;\nuse rand_xorshift::XorShiftRng;\nuse serde::{Deserialize, Serialize};\n#[cfg(feature = \"measurements\")]\nuse storage_proofs_core::measurements::{Operation, OP_MEASUREMENTS};\nuse storage_proofs_core::{\n    api_version::ApiVersion, compound_proof::CompoundProof, parameter_cache::CacheableParameters,\n    proof::ProofScheme,\n};\nuse storage_proofs_porep::stacked::{LayerChallenges, SetupParams, StackedCompound, StackedDrg};\n\nconst SEED: [u8; 16] = [\n    0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, 0xe5,\n];\n\ntype ProdbenchTree = DefaultOctTree;\n\n#[derive(Default, Debug, Serialize)]\npub struct ProdbenchReport {\n    inputs: ProdbenchInputs,\n    outputs: ProdbenchOutputs,\n}\n\n#[derive(Default, Debug, Deserialize, Serialize)]\npub struct ProdbenchInputs {\n    /// The size of sector.\n    sector_size: String,\n    porep_challenges: u64,\n    porep_partitions: u8,\n    post_challenges: u64,\n    post_challenged_nodes: u64,\n    stacked_layers: u64,\n    /// How many sectors should be created in parallel.\n    num_sectors: u64,\n    api_version: String,\n}\n\nimpl ProdbenchInputs {\n    pub fn sector_size_bytes(&self) -> u64 {\n        bytefmt::parse(&self.sector_size).expect(\"failed to parse sector size\")\n    }\n    pub fn api_version(&self) -> ApiVersion {\n        ApiVersion::from_str(&self.api_version).expect(\"failed to parse api version\")\n    }\n}\n\n#[derive(Default, Debug, Serialize)]\npub struct ProdbenchOutputs {\n    comm_d_cpu_time_ms: u64,\n    comm_d_wall_time_ms: u64,\n    encode_window_time_all_cpu_time_ms: u64,\n    encode_window_time_all_wall_time_ms: u64,\n    encoding_cpu_time_ms: u64,\n    encoding_wall_time_ms: u64,\n    generate_tree_c_cpu_time_ms: u64,\n    generate_tree_c_wall_time_ms: u64,\n    porep_commit_time_cpu_time_ms: u64,\n    porep_commit_time_wall_time_ms: u64,\n    porep_proof_gen_cpu_time_ms: u64,\n    porep_proof_gen_wall_time_ms: u64,\n    post_finalize_ticket_cpu_time_ms: u64,\n    post_finalize_ticket_time_ms: u64,\n    post_partial_ticket_hash_cpu_time_ms: u64,\n    post_partial_ticket_hash_time_ms: u64,\n    post_proof_gen_cpu_time_ms: u64,\n    post_proof_gen_wall_time_ms: u64,\n    post_read_challenged_range_cpu_time_ms: u64,\n    post_read_challenged_range_time_ms: u64,\n    post_verify_cpu_time_ms: u64,\n    post_verify_wall_time_ms: u64,\n    tree_r_last_cpu_time_ms: u64,\n    tree_r_last_wall_time_ms: u64,\n    window_comm_leaves_time_cpu_time_ms: u64,\n    window_comm_leaves_time_wall_time_ms: u64,\n    add_piece_cpu_time_ms: u64,\n    add_piece_wall_time_ms: u64,\n    generate_piece_commitment_cpu_time_ms: u64,\n    generate_piece_commitment_wall_time_ms: u64,\n    #[serde(flatten)]\n    circuits: CircuitOutputs,\n}\n\n#[cfg(not(feature = \"measurements\"))]\nfn augment_with_op_measurements(mut _output: &mut ProdbenchOutputs) {}\n\n#[cfg(feature = \"measurements\")]\nfn augment_with_op_measurements(mut output: &mut ProdbenchOutputs) {\n    // drop the tx side of the channel, causing the iterator to yield None\n    // see also: https://doc.rust-lang.org/src/std/sync/mpsc/mod.rs.html#368\n    OP_MEASUREMENTS\n        .0\n        .lock()\n        .expect(\"failed to acquire mutex\")\n        .take();\n\n    let measurements = OP_MEASUREMENTS\n        .1\n        .lock()\n        .expect(\"failed to acquire lock on rx side of perf channel\");\n\n    for m in measurements.iter() {\n        let cpu_time = m.cpu_time.as_millis() as u64;\n        let wall_time = m.wall_time.as_millis() as u64;\n\n        match m.op {\n            Operation::GenerateTreeC => {\n                output.generate_tree_c_cpu_time_ms = cpu_time;\n                output.generate_tree_c_wall_time_ms = wall_time;\n            }\n            Operation::GenerateTreeRLast => {\n                output.tree_r_last_cpu_time_ms = cpu_time;\n                output.tree_r_last_wall_time_ms = wall_time;\n            }\n            Operation::CommD => {\n                output.comm_d_cpu_time_ms = cpu_time;\n                output.comm_d_wall_time_ms = wall_time;\n            }\n            Operation::EncodeWindowTimeAll => {\n                output.encode_window_time_all_cpu_time_ms = cpu_time;\n                output.encode_window_time_all_wall_time_ms = wall_time;\n            }\n            Operation::WindowCommLeavesTime => {\n                output.window_comm_leaves_time_cpu_time_ms = cpu_time;\n                output.window_comm_leaves_time_wall_time_ms = wall_time;\n            }\n            Operation::PorepCommitTime => {\n                output.porep_commit_time_cpu_time_ms = cpu_time;\n                output.porep_commit_time_wall_time_ms = wall_time;\n            }\n            Operation::AddPiece => {\n                output.add_piece_cpu_time_ms = cpu_time;\n                output.add_piece_wall_time_ms = wall_time;\n            }\n            Operation::GeneratePieceCommitment => {\n                output.generate_piece_commitment_cpu_time_ms = cpu_time;\n                output.generate_piece_commitment_wall_time_ms = wall_time;\n            }\n            _ => {}\n        }\n    }\n}\n\nfn configure_global_config(inputs: &ProdbenchInputs) {\n    LAYERS\n        .write()\n        .expect(\"LAYERS poisoned\")\n        .insert(inputs.sector_size_bytes(), inputs.stacked_layers as usize);\n    POREP_PARTITIONS\n        .write()\n        .expect(\"POREP_PARTITIONS poisoned\")\n        .insert(inputs.sector_size_bytes(), inputs.porep_partitions);\n    POREP_MINIMUM_CHALLENGES\n        .write()\n        .expect(\"POREP_MINIMUM_CHALLENGES poisoned\")\n        .insert(inputs.sector_size_bytes(), inputs.porep_challenges);\n}\n\npub fn run(\n    inputs: ProdbenchInputs,\n    skip_seal_proof: bool,\n    skip_post_proof: bool,\n    only_replicate: bool,\n    only_add_piece: bool,\n) -> Metadata<ProdbenchReport> {\n    configure_global_config(&inputs);\n\n    let mut outputs = ProdbenchOutputs::default();\n\n    let sector_size = SectorSize(inputs.sector_size_bytes());\n    let arbitrary_porep_id = [123; 32];\n\n    assert!(inputs.num_sectors > 0, \"Missing num_sectors\");\n\n    let (cfg, repls) = create_replicas::<DefaultOctLCTree>(\n        sector_size,\n        inputs.num_sectors as usize,\n        only_add_piece,\n        arbitrary_porep_id,\n        inputs.api_version(),\n    );\n\n    if only_add_piece || only_replicate {\n        augment_with_op_measurements(&mut outputs);\n        return Metadata::wrap(ProdbenchReport { inputs, outputs })\n            .expect(\"failed to retrieve metadata\");\n    }\n\n    let (created, replica_measurement) = repls.expect(\"unreachable: only_add_piece==false\");\n    generate_params(&inputs);\n\n    if !skip_seal_proof {\n        for (value, (sector_id, replica_info)) in\n            replica_measurement.return_value.iter().zip(created.iter())\n        {\n            let measured = measure(|| {\n                validate_cache_for_commit::<_, _, DefaultOctLCTree>(\n                    &replica_info.private_replica_info.cache_dir_path(),\n                    &replica_info.private_replica_info.replica_path(),\n                )?;\n\n                let phase1_output = seal_commit_phase1::<_, DefaultOctLCTree>(\n                    cfg,\n                    &replica_info.private_replica_info.cache_dir_path(),\n                    &replica_info.private_replica_info.replica_path(),\n                    PROVER_ID,\n                    *sector_id,\n                    TICKET_BYTES,\n                    RANDOMNESS,\n                    value.clone(),\n                    &replica_info.piece_info,\n                )?;\n\n                clear_cache::<DefaultOctLCTree>(\n                    &replica_info.private_replica_info.cache_dir_path(),\n                )?;\n\n                seal_commit_phase2(cfg, phase1_output, PROVER_ID, *sector_id)\n            })\n            .expect(\"failed to prove sector\");\n\n            outputs.porep_proof_gen_cpu_time_ms += measured.cpu_time.as_millis() as u64;\n            outputs.porep_proof_gen_wall_time_ms += measured.wall_time.as_millis() as u64;\n        }\n    }\n\n    if !skip_post_proof {\n        // TODO: add winning and window PoSt\n    }\n\n    // Clean-up persisted replica files.\n    for (_, info) in &created {\n        remove_file(info.private_replica_info.replica_path())\n            .expect(\"failed to remove sealed replica file\");\n    }\n\n    augment_with_op_measurements(&mut outputs);\n    outputs.circuits = run_measure_circuits(&inputs);\n\n    Metadata::wrap(ProdbenchReport { inputs, outputs }).expect(\"failed to retrieve metadata\")\n}\n\n#[derive(Default, Debug, Serialize)]\nstruct CircuitOutputs {\n    pub porep_constraints: usize,\n}\n\nfn run_measure_circuits(i: &ProdbenchInputs) -> CircuitOutputs {\n    let porep_constraints = measure_porep_circuit(i);\n\n    CircuitOutputs { porep_constraints }\n}\n\nfn measure_porep_circuit(i: &ProdbenchInputs) -> usize {\n    let layers = i.stacked_layers as usize;\n    let challenge_count = i.porep_challenges as usize;\n    let drg_degree = DRG_DEGREE;\n    let expansion_degree = EXP_DEGREE;\n    let nodes = (i.sector_size_bytes() / 32) as usize;\n    let layer_challenges = LayerChallenges::new(layers, challenge_count);\n\n    let arbitrary_porep_id = [222; 32];\n    let sp = SetupParams {\n        nodes,\n        degree: drg_degree,\n        expansion_degree,\n        porep_id: arbitrary_porep_id,\n        layer_challenges,\n        api_version: i.api_version(),\n    };\n\n    let pp = StackedDrg::<ProdbenchTree, Sha256Hasher>::setup(&sp).expect(\"failed to setup DRG\");\n\n    let mut cs = BenchCS::<Bls12>::new();\n    <StackedCompound<_, _> as CompoundProof<StackedDrg<ProdbenchTree, Sha256Hasher>, _>>::blank_circuit(\n        &pp,\n    )\n        .synthesize(&mut cs)\n        .expect(\"failed to synthesize stacked compound\");\n\n    cs.num_constraints()\n}\n\nfn generate_params(i: &ProdbenchInputs) {\n    let sector_size = SectorSize(i.sector_size_bytes());\n    let partitions = PoRepProofPartitions(\n        *POREP_PARTITIONS\n            .read()\n            .expect(\"POREP_PARTITIONS poisoned\")\n            .get(&i.sector_size_bytes())\n            .expect(\"unknown sector size\"),\n    );\n    info!(\n        \"generating params: porep: (size: {:?}, partitions: {:?})\",\n        &sector_size, &partitions\n    );\n    let dummy_porep_id = [0; 32];\n\n    cache_porep_params(PoRepConfig {\n        sector_size,\n        partitions,\n        porep_id: dummy_porep_id,\n        api_version: i.api_version(),\n    });\n}\n\nfn cache_porep_params(porep_config: PoRepConfig) {\n    let public_params = public_params(\n        PaddedBytesAmount::from(porep_config),\n        usize::from(PoRepProofPartitions::from(porep_config)),\n        porep_config.porep_id,\n        porep_config.api_version,\n    )\n    .expect(\"failed to get public_params\");\n\n    {\n        let circuit = <StackedCompound<ProdbenchTree, _> as CompoundProof<\n            StackedDrg<ProdbenchTree, Sha256Hasher>,\n            _,\n        >>::blank_circuit(&public_params);\n        StackedCompound::<ProdbenchTree, Sha256Hasher>::get_param_metadata(circuit, &public_params)\n            .expect(\"cannot get param metadata\");\n    }\n    {\n        let circuit = <StackedCompound<ProdbenchTree, _> as CompoundProof<\n            StackedDrg<ProdbenchTree, Sha256Hasher>,\n            _,\n        >>::blank_circuit(&public_params);\n        StackedCompound::<ProdbenchTree, Sha256Hasher>::get_groth_params(\n            Some(&mut XorShiftRng::from_seed(SEED)),\n            circuit,\n            &public_params,\n        )\n        .expect(\"failed to get groth params\");\n    }\n    {\n        let circuit = <StackedCompound<ProdbenchTree, _> as CompoundProof<\n            StackedDrg<ProdbenchTree, Sha256Hasher>,\n            _,\n        >>::blank_circuit(&public_params);\n\n        StackedCompound::<ProdbenchTree, Sha256Hasher>::get_verifying_key(\n            Some(&mut XorShiftRng::from_seed(SEED)),\n            circuit,\n            &public_params,\n        )\n        .expect(\"failed to get verifying key\");\n    }\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/benchy/stacked.rs",
    "content": "use std::fs::OpenOptions;\nuse std::time::Duration;\nuse std::{io, u32};\n\nuse anyhow::bail;\nuse bellperson::Circuit;\nuse chrono::Utc;\nuse log::info;\nuse merkletree::store::StoreConfig;\nuse paired::bls12_381::Bls12;\nuse rand::Rng;\nuse serde::Serialize;\n\nuse fil_proofs_tooling::{measure, FuncMeasurement, Metadata};\nuse storage_proofs::cache_key::CacheKey;\nuse storage_proofs::compound_proof::{self, CompoundProof};\nuse storage_proofs::drgraph::*;\nuse storage_proofs::gadgets::BenchCS;\nuse storage_proofs::hasher::{Blake2sHasher, Domain, Hasher, PedersenHasher, Sha256Hasher};\nuse storage_proofs::merkle::{BinaryMerkleTree, MerkleTreeTrait};\nuse storage_proofs::porep::stacked::StackedCompound;\nuse storage_proofs::porep::stacked::{\n    self, ChallengeRequirements, LayerChallenges, StackedDrg, TemporaryAuxCache, BINARY_ARITY,\n    EXP_DEGREE,\n};\nuse storage_proofs::porep::PoRep;\nuse storage_proofs::proof::ProofScheme;\nuse storage_proofs::test_helper::setup_replica;\nuse storage_proofs::util::default_rows_to_discard;\nuse tempfile::TempDir;\n\nfn dump_proof_bytes<Tree: MerkleTreeTrait>(\n    all_partition_proofs: &[Vec<stacked::Proof<Tree, Sha256Hasher>>],\n) -> anyhow::Result<()> {\n    let file = OpenOptions::new()\n        .write(true)\n        .create(true)\n        .open(format!(\"./proofs-{:?}\", Utc::now()))\n        .unwrap();\n\n    serde_json::to_writer(file, all_partition_proofs)?;\n\n    Ok(())\n}\n\n#[derive(Clone, Debug)]\nstruct Params {\n    samples: usize,\n    data_size: usize,\n    partitions: usize,\n    layer_challenges: LayerChallenges,\n    circuit: bool,\n    groth: bool,\n    bench: bool,\n    extract: bool,\n    dump_proofs: bool,\n    bench_only: bool,\n    hasher: String,\n}\n\nimpl From<Params> for Inputs {\n    fn from(p: Params) -> Self {\n        Inputs {\n            sector_size: p.data_size,\n            partitions: p.partitions,\n            hasher: p.hasher.clone(),\n            samples: p.samples,\n            layers: p.layer_challenges.layers(),\n            partition_challenges: p.layer_challenges.challenges_count_all() / p.partitions,\n            total_challenges: p.layer_challenges.challenges_count_all(),\n        }\n    }\n}\n\nfn generate_report<H: 'static>(params: Params, cache_dir: &TempDir) -> anyhow::Result<Report>\nwhere\n    H: Hasher,\n{\n    let FuncMeasurement {\n        cpu_time: total_cpu_time,\n        wall_time: total_wall_time,\n        return_value: mut report,\n    } = measure(|| {\n        let mut report = Report {\n            inputs: Inputs::from(params.clone()),\n            outputs: Default::default(),\n        };\n\n        let Params {\n            samples,\n            data_size,\n            partitions,\n            circuit,\n            groth,\n            bench,\n            extract,\n            dump_proofs,\n            bench_only,\n            layer_challenges,\n            ..\n        } = &params;\n\n        let mut total_proving_wall_time = Duration::new(0, 0);\n        let mut total_proving_cpu_time = Duration::new(0, 0);\n\n        let rng = &mut rand::thread_rng();\n        let nodes = data_size / 32;\n        let data: Vec<u8> = (0..nodes)\n            .flat_map(|_| {\n                let v: H::Domain = H::Domain::random(rng);\n                v.into_bytes()\n            })\n            .collect();\n\n        // MT for original data is always named tree-d, and it will be\n        // referenced later in the process as such.\n        let config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            default_rows_to_discard(nodes, BINARY_ARITY),\n        );\n\n        let replica_id = H::Domain::random(rng);\n        let arbitrary_porep_id = [11; 32];\n        let sp = stacked::SetupParams {\n            nodes,\n            degree: BASE_DEGREE,\n            expansion_degree: EXP_DEGREE,\n            porep_id: arbitrary_porep_id,\n            layer_challenges: layer_challenges.clone(),\n        };\n\n        let pp = StackedDrg::<BinaryMerkleTree<H>, Sha256Hasher>::setup(&sp)?;\n\n        let (pub_in, priv_in, d) = if *bench_only {\n            (None, None, None)\n        } else {\n            // Generate a replica path.\n            let replica_path = cache_dir.path().join(\"replica-path\");\n            let mut mmapped_data = setup_replica(&data, &replica_path);\n\n            let seed = rng.gen();\n\n            let FuncMeasurement {\n                cpu_time: replication_cpu_time,\n                wall_time: replication_wall_time,\n                return_value: (pub_inputs, priv_inputs),\n            } = measure(|| {\n                let (tau, (p_aux, t_aux)) =\n                    StackedDrg::<BinaryMerkleTree<H>, Sha256Hasher>::replicate(\n                        &pp,\n                        &replica_id,\n                        (&mut mmapped_data[..]).into(),\n                        None,\n                        config.clone(),\n                        replica_path.clone(),\n                    )?;\n\n                let pb = stacked::PublicInputs::<H::Domain, <Sha256Hasher as Hasher>::Domain> {\n                    replica_id,\n                    seed,\n                    tau: Some(tau),\n                    k: Some(0),\n                };\n\n                // Convert TemporaryAux to TemporaryAuxCache, which instantiates all\n                // elements based on the configs stored in TemporaryAux.\n                let t_aux = TemporaryAuxCache::new(&t_aux, replica_path)\n                    .expect(\"failed to restore contents of t_aux\");\n\n                let pv = stacked::PrivateInputs { p_aux, t_aux };\n\n                Ok((pb, pv))\n            })?;\n\n            let avg_duration = |duration: Duration, data_size: &usize| {\n                if *data_size > (u32::MAX as usize) {\n                    // Duration only supports division by u32, so if data_size (of type usize) is larger,\n                    // we have to jump through some hoops to get the value we want, which is duration / size.\n                    // Consider: x = size / max\n                    //           y = duration / x = duration * max / size\n                    //           y / max = duration * max / size * max = duration / size\n                    let x = *data_size as f64 / f64::from(u32::MAX);\n                    let y = duration / x as u32;\n                    y / u32::MAX\n                } else {\n                    duration / (*data_size as u32)\n                }\n            };\n\n            report.outputs.replication_wall_time_ms =\n                Some(replication_wall_time.as_millis() as u64);\n            report.outputs.replication_cpu_time_ms = Some(replication_cpu_time.as_millis() as u64);\n\n            report.outputs.replication_wall_time_ns_per_byte =\n                Some(avg_duration(replication_wall_time, data_size).as_nanos() as u64);\n            report.outputs.replication_cpu_time_ns_per_byte =\n                Some(avg_duration(replication_cpu_time, data_size).as_nanos() as u64);\n\n            let FuncMeasurement {\n                cpu_time: vanilla_proving_cpu_time,\n                wall_time: vanilla_proving_wall_time,\n                return_value: all_partition_proofs,\n            } = measure(|| {\n                StackedDrg::<BinaryMerkleTree<H>, Sha256Hasher>::prove_all_partitions(\n                    &pp,\n                    &pub_inputs,\n                    &priv_inputs,\n                    *partitions,\n                )\n            })?;\n\n            report.outputs.vanilla_proving_wall_time_us =\n                Some(vanilla_proving_wall_time.as_micros() as u64);\n            report.outputs.vanilla_proving_cpu_time_us =\n                Some(vanilla_proving_cpu_time.as_micros() as u64);\n\n            total_proving_wall_time += vanilla_proving_wall_time;\n            total_proving_cpu_time += vanilla_proving_cpu_time;\n\n            if *dump_proofs {\n                dump_proof_bytes(&all_partition_proofs)?;\n            }\n\n            let mut total_verification_time = FuncMeasurement {\n                cpu_time: Duration::new(0, 0),\n                wall_time: Duration::new(0, 0),\n                return_value: (),\n            };\n\n            for _ in 0..*samples {\n                let m = measure(|| {\n                    let verified =\n                        StackedDrg::<BinaryMerkleTree<H>, Sha256Hasher>::verify_all_partitions(\n                            &pp,\n                            &pub_inputs,\n                            &all_partition_proofs,\n                        )?;\n\n                    if !verified {\n                        panic!(\"verification failed\");\n                    }\n\n                    Ok(())\n                })?;\n\n                total_verification_time.cpu_time += m.cpu_time;\n                total_verification_time.wall_time += m.wall_time;\n\n                report.outputs.vanilla_verification_wall_time_us =\n                    Some(m.wall_time.as_micros() as u64);\n                report.outputs.vanilla_verification_cpu_time_us =\n                    Some(m.cpu_time.as_micros() as u64);\n            }\n\n            let avg_seconds = |duration: Duration, samples: &usize| {\n                let n = duration / *samples as u32;\n                f64::from(n.subsec_nanos()) / 1_000_000_000f64 + (n.as_secs() as f64)\n            };\n\n            report.outputs.verifying_wall_time_avg_ms =\n                Some((avg_seconds(total_verification_time.wall_time, samples) * 1000.0) as u64);\n            report.outputs.verifying_cpu_time_avg_ms =\n                Some((avg_seconds(total_verification_time.cpu_time, samples) * 1000.0) as u64);\n\n            (Some(pub_inputs), Some(priv_inputs), Some(data))\n        };\n\n        if *circuit || *groth || *bench || *bench_only {\n            let CircuitWorkMeasurement {\n                cpu_time,\n                wall_time,\n            } = do_circuit_work(&pp, pub_in, priv_in, &params, &mut report)?;\n            total_proving_wall_time += wall_time;\n            total_proving_cpu_time += cpu_time;\n        }\n\n        if let Some(data) = d {\n            if *extract {\n                let m = measure(|| {\n                    StackedDrg::<BinaryMerkleTree<H>, Sha256Hasher>::extract_all(\n                        &pp,\n                        &replica_id,\n                        &data,\n                        Some(config.clone()),\n                    )\n                })?;\n\n                assert_ne!(&(*data), m.return_value.as_slice());\n                report.outputs.extracting_wall_time_ms = Some(m.wall_time.as_millis() as u64);\n                report.outputs.extracting_cpu_time_ms = Some(m.cpu_time.as_millis() as u64);\n            }\n        }\n\n        // total proving time is the sum of \"the circuit work\" and vanilla\n        // proving time\n        report.outputs.total_proving_wall_time_ms =\n            Some(total_proving_wall_time.as_millis() as u64);\n        report.outputs.total_proving_cpu_time_ms = Some(total_proving_cpu_time.as_millis() as u64);\n\n        Ok(report)\n    })?;\n\n    report.outputs.total_report_wall_time_ms = total_wall_time.as_millis() as u64;\n    report.outputs.total_report_cpu_time_ms = total_cpu_time.as_millis() as u64;\n\n    Ok(report)\n}\n\nstruct CircuitWorkMeasurement {\n    cpu_time: Duration,\n    wall_time: Duration,\n}\n\nfn do_circuit_work<Tree: 'static + MerkleTreeTrait>(\n    pp: &<StackedDrg<Tree, Sha256Hasher> as ProofScheme>::PublicParams,\n    pub_in: Option<<StackedDrg<Tree, Sha256Hasher> as ProofScheme>::PublicInputs>,\n    priv_in: Option<<StackedDrg<Tree, Sha256Hasher> as ProofScheme>::PrivateInputs>,\n    params: &Params,\n    report: &mut Report,\n) -> anyhow::Result<CircuitWorkMeasurement> {\n    let mut proving_wall_time = Duration::new(0, 0);\n    let mut proving_cpu_time = Duration::new(0, 0);\n\n    let Params {\n        samples,\n        partitions,\n        circuit,\n        groth,\n        bench,\n        bench_only,\n        ..\n    } = params;\n\n    let compound_public_params = compound_proof::PublicParams {\n        vanilla_params: pp.clone(),\n        partitions: Some(*partitions),\n        priority: false,\n    };\n\n    if *bench || *circuit || *bench_only {\n        info!(\"Generating blank circuit: start\");\n        let mut cs = BenchCS::<Bls12>::new();\n        <StackedCompound<_, _> as CompoundProof<StackedDrg<Tree, Sha256Hasher>, _>>::blank_circuit(\n            &pp,\n        )\n        .synthesize(&mut cs)?;\n\n        report.outputs.circuit_num_inputs = Some(cs.num_inputs() as u64);\n        report.outputs.circuit_num_constraints = Some(cs.num_constraints() as u64);\n        info!(\"Generating blank circuit: done\");\n    }\n\n    if *groth {\n        info!(\"Generating Groth Proof\");\n        let pub_inputs = pub_in.expect(\"missing public inputs\");\n        let priv_inputs = priv_in.expect(\"missing private inputs\");\n\n        // We should implement a method of CompoundProof, which will skip vanilla proving.\n        // We should also allow the serialized vanilla proofs to be passed (as a file) to the example\n        // and skip replication/vanilla-proving entirely.\n        let gparams = <StackedCompound<_, _> as CompoundProof<\n            StackedDrg<Tree, Sha256Hasher>,\n            _,\n        >>::groth_params::<rand::rngs::OsRng>(\n            None, &compound_public_params.vanilla_params\n        )?;\n\n        let multi_proof = {\n            let FuncMeasurement {\n                wall_time,\n                cpu_time,\n                return_value,\n            } = measure(|| {\n                StackedCompound::prove(&compound_public_params, &pub_inputs, &priv_inputs, &gparams)\n            })?;\n            proving_wall_time += wall_time;\n            proving_cpu_time += cpu_time;\n            return_value\n        };\n\n        let verified = {\n            let mut total_groth_verifying_wall_time = Duration::new(0, 0);\n            let mut total_groth_verifying_cpu_time = Duration::new(0, 0);\n\n            let mut result = true;\n            for _ in 0..*samples {\n                let cur_result = result;\n                let m = measure(|| {\n                    StackedCompound::verify(\n                        &compound_public_params,\n                        &pub_inputs,\n                        &multi_proof,\n                        &ChallengeRequirements {\n                            minimum_challenges: 1,\n                        },\n                    )\n                })?;\n\n                // If one verification fails, result becomes permanently false.\n                result = result && cur_result;\n                total_groth_verifying_wall_time += m.wall_time;\n                total_groth_verifying_cpu_time += m.cpu_time;\n            }\n            let avg_groth_verifying_wall_time = total_groth_verifying_wall_time / *samples as u32;\n            let avg_groth_verifying_cpu_time = total_groth_verifying_cpu_time / *samples as u32;\n\n            report.outputs.avg_groth_verifying_wall_time_ms =\n                Some(avg_groth_verifying_wall_time.as_millis() as u64);\n            report.outputs.avg_groth_verifying_cpu_time_ms =\n                Some(avg_groth_verifying_cpu_time.as_millis() as u64);\n\n            result\n        };\n        assert!(verified);\n    }\n\n    Ok(CircuitWorkMeasurement {\n        cpu_time: proving_cpu_time,\n        wall_time: proving_wall_time,\n    })\n}\n\n#[derive(Serialize)]\n#[serde(rename_all = \"kebab-case\")]\nstruct Inputs {\n    sector_size: usize,\n    partitions: usize,\n    hasher: String,\n    samples: usize,\n    layers: usize,\n    partition_challenges: usize,\n    total_challenges: usize,\n}\n\n#[derive(Serialize, Default)]\n#[serde(rename_all = \"kebab-case\")]\nstruct Outputs {\n    avg_groth_verifying_cpu_time_ms: Option<u64>,\n    avg_groth_verifying_wall_time_ms: Option<u64>,\n    circuit_num_constraints: Option<u64>,\n    circuit_num_inputs: Option<u64>,\n    extracting_cpu_time_ms: Option<u64>,\n    extracting_wall_time_ms: Option<u64>,\n    replication_wall_time_ms: Option<u64>,\n    replication_cpu_time_ms: Option<u64>,\n    replication_wall_time_ns_per_byte: Option<u64>,\n    replication_cpu_time_ns_per_byte: Option<u64>,\n    total_report_cpu_time_ms: u64,\n    total_report_wall_time_ms: u64,\n    total_proving_cpu_time_ms: Option<u64>,\n    total_proving_wall_time_ms: Option<u64>,\n    vanilla_proving_cpu_time_us: Option<u64>,\n    vanilla_proving_wall_time_us: Option<u64>,\n    vanilla_verification_wall_time_us: Option<u64>,\n    vanilla_verification_cpu_time_us: Option<u64>,\n    verifying_wall_time_avg_ms: Option<u64>,\n    verifying_cpu_time_avg_ms: Option<u64>,\n}\n\n#[derive(Serialize)]\n#[serde(rename_all = \"kebab-case\")]\nstruct Report {\n    inputs: Inputs,\n    outputs: Outputs,\n}\n\nimpl Report {\n    /// Print all results to stdout\n    pub fn print(&self) {\n        let wrapped = Metadata::wrap(&self).expect(\"failed to retrieve metadata\");\n        serde_json::to_writer(io::stdout(), &wrapped).expect(\"cannot write report-JSON to stdout\");\n    }\n}\n\npub struct RunOpts {\n    pub bench: bool,\n    pub bench_only: bool,\n    pub challenges: usize,\n    pub circuit: bool,\n    pub dump: bool,\n    pub extract: bool,\n    pub groth: bool,\n    pub hasher: String,\n    pub layers: usize,\n    pub no_bench: bool,\n    pub no_tmp: bool,\n    pub partitions: usize,\n    pub size: usize,\n}\n\npub fn run(opts: RunOpts) -> anyhow::Result<()> {\n    let layer_challenges = LayerChallenges::new(opts.layers, opts.challenges);\n\n    let params = Params {\n        data_size: opts.size * 1024,\n        partitions: opts.partitions,\n        layer_challenges,\n        dump_proofs: opts.dump,\n        groth: opts.groth,\n        bench: !opts.no_bench && opts.bench,\n        bench_only: opts.bench_only,\n        circuit: opts.circuit,\n        extract: opts.extract,\n        hasher: opts.hasher,\n        samples: 5,\n    };\n\n    info!(\"Benchy Stacked: {:?}\", &params);\n\n    let cache_dir = tempfile::tempdir().unwrap();\n\n    let report = match params.hasher.as_ref() {\n        \"pedersen\" => generate_report::<PedersenHasher>(params, &cache_dir)?,\n        \"sha256\" => generate_report::<Sha256Hasher>(params, &cache_dir)?,\n        \"blake2s\" => generate_report::<Blake2sHasher>(params, &cache_dir)?,\n        _ => bail!(\"invalid hasher: {}\", params.hasher),\n    };\n\n    report.print();\n\n    Ok(())\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/benchy/window_post.rs",
    "content": "use std::collections::BTreeMap;\nuse std::io::{stdout, Seek, SeekFrom, Write};\n\nuse fil_proofs_tooling::{measure, Metadata};\nuse filecoin_proofs::constants::{\n    POREP_PARTITIONS, WINDOW_POST_CHALLENGE_COUNT, WINDOW_POST_SECTOR_COUNT,\n};\nuse filecoin_proofs::types::{\n    PaddedBytesAmount, PoRepConfig, PoRepProofPartitions, PoStConfig, SectorSize,\n    UnpaddedBytesAmount,\n};\nuse filecoin_proofs::{\n    add_piece, generate_piece_commitment, generate_window_post, seal_commit_phase1,\n    seal_commit_phase2, seal_pre_commit_phase1, seal_pre_commit_phase2, validate_cache_for_commit,\n    validate_cache_for_precommit_phase2, verify_window_post, with_shape, PoStType,\n    PrivateReplicaInfo, PublicReplicaInfo,\n};\nuse log::info;\nuse serde::Serialize;\nuse storage_proofs::merkle::MerkleTreeTrait;\nuse storage_proofs::sector::SectorId;\nuse tempfile::NamedTempFile;\n\nuse crate::shared::{PROVER_ID, RANDOMNESS, TICKET_BYTES};\n\nconst SECTOR_ID: u64 = 0;\n\n#[derive(Serialize)]\n#[serde(rename_all = \"kebab-case\")]\nstruct Inputs {\n    sector_size: u64,\n}\n\n#[derive(Serialize)]\n#[serde(rename_all = \"kebab-case\")]\nstruct Outputs {\n    seal_pre_commit_phase1_cpu_time_ms: u64,\n    seal_pre_commit_phase1_wall_time_ms: u64,\n    validate_cache_for_precommit_phase2_cpu_time_ms: u64,\n    validate_cache_for_precommit_phase2_wall_time_ms: u64,\n    seal_pre_commit_phase2_cpu_time_ms: u64,\n    seal_pre_commit_phase2_wall_time_ms: u64,\n    validate_cache_for_commit_cpu_time_ms: u64,\n    validate_cache_for_commit_wall_time_ms: u64,\n    seal_commit_phase1_cpu_time_ms: u64,\n    seal_commit_phase1_wall_time_ms: u64,\n    seal_commit_phase2_cpu_time_ms: u64,\n    seal_commit_phase2_wall_time_ms: u64,\n    gen_window_post_cpu_time_ms: u64,\n    gen_window_post_wall_time_ms: u64,\n    verify_window_post_cpu_time_ms: u64,\n    verify_window_post_wall_time_ms: u64,\n}\n\n#[derive(Serialize)]\n#[serde(rename_all = \"kebab-case\")]\nstruct Report {\n    inputs: Inputs,\n    outputs: Outputs,\n}\n\nimpl Report {\n    /// Print all results to stdout\n    pub fn print(&self) {\n        let wrapped = Metadata::wrap(&self).expect(\"failed to retrieve metadata\");\n        serde_json::to_writer(stdout(), &wrapped).expect(\"cannot write report JSON to stdout\");\n    }\n}\n\npub fn run_window_post_bench<Tree: 'static + MerkleTreeTrait>(\n    sector_size: u64,\n) -> anyhow::Result<()> {\n    let sector_size_unpadded_bytes_ammount =\n        UnpaddedBytesAmount::from(PaddedBytesAmount(sector_size));\n\n    // Create files for the staged and sealed sectors.\n    let mut staged_file =\n        NamedTempFile::new().expect(\"could not create temp file for staged sector\");\n\n    let sealed_file = NamedTempFile::new().expect(\"could not create temp file for sealed sector\");\n\n    // Generate the data from which we will create a replica, we will then prove the continued\n    // storage of that replica using the PoSt.\n    let piece_bytes: Vec<u8> = (0..usize::from(sector_size_unpadded_bytes_ammount))\n        .map(|_| rand::random::<u8>())\n        .collect();\n\n    let mut piece_file = NamedTempFile::new()?;\n    piece_file.write_all(&piece_bytes)?;\n    piece_file.as_file_mut().sync_all()?;\n    piece_file.as_file_mut().seek(SeekFrom::Start(0))?;\n\n    let piece_info =\n        generate_piece_commitment(piece_file.as_file_mut(), sector_size_unpadded_bytes_ammount)?;\n    piece_file.as_file_mut().seek(SeekFrom::Start(0))?;\n\n    add_piece(\n        &mut piece_file,\n        &mut staged_file,\n        sector_size_unpadded_bytes_ammount,\n        &[],\n    )?;\n\n    let piece_infos = vec![piece_info];\n\n    // Replicate the staged sector, write the replica file to `sealed_path`.\n    let porep_config = PoRepConfig {\n        sector_size: SectorSize(sector_size),\n        partitions: PoRepProofPartitions(\n            *POREP_PARTITIONS\n                .read()\n                .unwrap()\n                .get(&(sector_size))\n                .unwrap(),\n        ),\n    };\n    let cache_dir = tempfile::tempdir().unwrap();\n    let sector_id = SectorId::from(SECTOR_ID);\n\n    let seal_pre_commit_phase1_measurement = measure(|| {\n        seal_pre_commit_phase1::<_, _, _, Tree>(\n            porep_config,\n            cache_dir.path(),\n            staged_file.path(),\n            sealed_file.path(),\n            PROVER_ID,\n            sector_id,\n            TICKET_BYTES,\n            &piece_infos,\n        )\n    })\n    .expect(\"failed in seal_pre_commit_phase1\");\n    let phase1_output = seal_pre_commit_phase1_measurement.return_value;\n\n    let validate_cache_for_precommit_phase2_measurement = measure(|| {\n        validate_cache_for_precommit_phase2::<_, _, Tree>(\n            cache_dir.path(),\n            sealed_file.path(),\n            &phase1_output,\n        )\n    })\n    .expect(\"failed to validate cache for precommit phase2\");\n\n    let seal_pre_commit_phase2_measurement = measure(|| {\n        seal_pre_commit_phase2::<_, _, Tree>(\n            porep_config,\n            phase1_output,\n            cache_dir.path(),\n            sealed_file.path(),\n        )\n    })\n    .expect(\"failed in seal_pre_commit_phase2\");\n    let seal_pre_commit_output = seal_pre_commit_phase2_measurement.return_value;\n\n    let seed = [0u8; 32];\n    let comm_r = seal_pre_commit_output.comm_r;\n\n    let validate_cache_for_commit_measurement =\n        measure(|| validate_cache_for_commit::<_, _, Tree>(cache_dir.path(), sealed_file.path()))\n            .expect(\"failed to validate cache for commit\");\n\n    let seal_commit_phase1_measurement = measure(|| {\n        seal_commit_phase1::<_, Tree>(\n            porep_config,\n            cache_dir.path(),\n            sealed_file.path(),\n            PROVER_ID,\n            sector_id,\n            TICKET_BYTES,\n            seed,\n            seal_pre_commit_output,\n            &piece_infos,\n        )\n    })\n    .expect(\"failed in seal_commit_phase1\");\n    let phase1_output = seal_commit_phase1_measurement.return_value;\n\n    let seal_commit_phase2_measurement =\n        measure(|| seal_commit_phase2::<Tree>(porep_config, phase1_output, PROVER_ID, sector_id))\n            .expect(\"failed in seal_commit_phase2\");\n\n    let pub_replica = PublicReplicaInfo::new(comm_r).expect(\"failed to create public replica info\");\n\n    let priv_replica = PrivateReplicaInfo::<Tree>::new(\n        sealed_file.path().to_path_buf(),\n        comm_r,\n        cache_dir.into_path(),\n    )\n    .expect(\"failed to create private replica info\");\n\n    // Store the replica's private and publicly facing info for proving and verifying respectively.\n    let mut pub_replica_info: BTreeMap<SectorId, PublicReplicaInfo> = BTreeMap::new();\n    let mut priv_replica_info: BTreeMap<SectorId, PrivateReplicaInfo<Tree>> = BTreeMap::new();\n\n    pub_replica_info.insert(sector_id, pub_replica);\n    priv_replica_info.insert(sector_id, priv_replica);\n\n    // Measure PoSt generation and verification.\n    let post_config = PoStConfig {\n        sector_size: SectorSize(sector_size),\n        challenge_count: WINDOW_POST_CHALLENGE_COUNT,\n        sector_count: *WINDOW_POST_SECTOR_COUNT\n            .read()\n            .unwrap()\n            .get(&sector_size)\n            .unwrap(),\n        typ: PoStType::Window,\n        priority: true,\n    };\n\n    let gen_window_post_measurement = measure(|| {\n        generate_window_post::<Tree>(&post_config, &RANDOMNESS, &priv_replica_info, PROVER_ID)\n    })\n    .expect(\"failed to generate window post\");\n\n    let proof = &gen_window_post_measurement.return_value;\n\n    let verify_window_post_measurement = measure(|| {\n        verify_window_post::<Tree>(\n            &post_config,\n            &RANDOMNESS,\n            &pub_replica_info,\n            PROVER_ID,\n            &proof,\n        )\n    })\n    .expect(\"failed to verify window post proof\");\n\n    let report = Report {\n        inputs: Inputs { sector_size },\n        outputs: Outputs {\n            seal_pre_commit_phase1_cpu_time_ms: seal_pre_commit_phase1_measurement\n                .cpu_time\n                .as_millis() as u64,\n            seal_pre_commit_phase1_wall_time_ms: seal_pre_commit_phase1_measurement\n                .wall_time\n                .as_millis() as u64,\n            validate_cache_for_precommit_phase2_cpu_time_ms:\n                validate_cache_for_precommit_phase2_measurement\n                    .cpu_time\n                    .as_millis() as u64,\n            validate_cache_for_precommit_phase2_wall_time_ms:\n                validate_cache_for_precommit_phase2_measurement\n                    .wall_time\n                    .as_millis() as u64,\n            seal_pre_commit_phase2_cpu_time_ms: seal_pre_commit_phase2_measurement\n                .cpu_time\n                .as_millis() as u64,\n            seal_pre_commit_phase2_wall_time_ms: seal_pre_commit_phase2_measurement\n                .wall_time\n                .as_millis() as u64,\n            validate_cache_for_commit_cpu_time_ms: validate_cache_for_commit_measurement\n                .cpu_time\n                .as_millis() as u64,\n            validate_cache_for_commit_wall_time_ms: validate_cache_for_commit_measurement\n                .wall_time\n                .as_millis() as u64,\n            seal_commit_phase1_cpu_time_ms: seal_commit_phase1_measurement.cpu_time.as_millis()\n                as u64,\n            seal_commit_phase1_wall_time_ms: seal_commit_phase1_measurement.wall_time.as_millis()\n                as u64,\n            seal_commit_phase2_cpu_time_ms: seal_commit_phase2_measurement.cpu_time.as_millis()\n                as u64,\n            seal_commit_phase2_wall_time_ms: seal_commit_phase2_measurement.wall_time.as_millis()\n                as u64,\n            gen_window_post_cpu_time_ms: gen_window_post_measurement.cpu_time.as_millis() as u64,\n            gen_window_post_wall_time_ms: gen_window_post_measurement.wall_time.as_millis() as u64,\n            verify_window_post_cpu_time_ms: verify_window_post_measurement.cpu_time.as_millis()\n                as u64,\n            verify_window_post_wall_time_ms: verify_window_post_measurement.wall_time.as_millis()\n                as u64,\n        },\n    };\n\n    // Create a JSON serializable report that we print to stdout (that will later be parsed using\n    // the CLI JSON parser `jq`).\n    report.print();\n    Ok(())\n}\n\npub fn run(sector_size: usize) -> anyhow::Result<()> {\n    info!(\"Benchy Window PoSt: sector-size={}\", sector_size,);\n\n    with_shape!(\n        sector_size as u64,\n        run_window_post_bench,\n        sector_size as u64\n    )\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/benchy/winning_post.rs",
    "content": "use std::io::stdout;\n\nuse anyhow::anyhow;\nuse fil_proofs_tooling::shared::{create_replica, PROVER_ID, RANDOMNESS};\nuse fil_proofs_tooling::{measure, Metadata};\nuse filecoin_proofs::constants::{WINNING_POST_CHALLENGE_COUNT, WINNING_POST_SECTOR_COUNT};\nuse filecoin_proofs::types::PoStConfig;\nuse filecoin_proofs::{\n    generate_winning_post, generate_winning_post_sector_challenge, verify_winning_post, with_shape,\n    PoStType,\n};\nuse log::info;\nuse serde::Serialize;\nuse storage_proofs_core::api_version::ApiVersion;\nuse storage_proofs_core::merkle::MerkleTreeTrait;\n\n#[derive(Serialize)]\n#[serde(rename_all = \"kebab-case\")]\nstruct Inputs {\n    sector_size: u64,\n}\n\n#[derive(Serialize)]\n#[serde(rename_all = \"kebab-case\")]\nstruct Outputs {\n    gen_winning_post_cpu_time_ms: u64,\n    gen_winning_post_wall_time_ms: u64,\n    verify_winning_post_cpu_time_ms: u64,\n    verify_winning_post_wall_time_ms: u64,\n    gen_winning_post_sector_challenge_cpu_time_ms: u64,\n    gen_winning_post_sector_challenge_wall_time_ms: u64,\n}\n\n#[derive(Serialize)]\n#[serde(rename_all = \"kebab-case\")]\nstruct Report {\n    inputs: Inputs,\n    outputs: Outputs,\n}\n\nimpl Report {\n    /// Print all results to stdout\n    pub fn print(&self) {\n        let wrapped = Metadata::wrap(&self).expect(\"failed to retrieve metadata\");\n        serde_json::to_writer(stdout(), &wrapped).expect(\"cannot write report JSON to stdout\");\n    }\n}\n\npub fn run_fallback_post_bench<Tree: 'static + MerkleTreeTrait>(\n    sector_size: u64,\n    api_version: ApiVersion,\n) -> anyhow::Result<()> {\n    if WINNING_POST_SECTOR_COUNT != 1 {\n        return Err(anyhow!(\n            \"This benchmark only works with WINNING_POST_SECTOR_COUNT == 1\"\n        ));\n    }\n    let arbitrary_porep_id = [66; 32];\n    let (sector_id, replica_output) =\n        create_replica::<Tree>(sector_size, arbitrary_porep_id, api_version);\n\n    // Store the replica's private and publicly facing info for proving and verifying respectively.\n    let pub_replica_info = vec![(sector_id, replica_output.public_replica_info)];\n    let priv_replica_info = vec![(sector_id, replica_output.private_replica_info)];\n\n    let post_config = PoStConfig {\n        sector_size: sector_size.into(),\n        sector_count: WINNING_POST_SECTOR_COUNT,\n        challenge_count: WINNING_POST_CHALLENGE_COUNT,\n        typ: PoStType::Winning,\n        priority: true,\n        api_version,\n    };\n\n    let gen_winning_post_sector_challenge_measurement = measure(|| {\n        generate_winning_post_sector_challenge::<Tree>(\n            &post_config,\n            &RANDOMNESS,\n            WINNING_POST_SECTOR_COUNT as u64,\n            PROVER_ID,\n        )\n    })\n    .expect(\"failed to generate winning post sector challenge\");\n\n    let gen_winning_post_measurement = measure(|| {\n        generate_winning_post::<Tree>(&post_config, &RANDOMNESS, &priv_replica_info[..], PROVER_ID)\n    })\n    .expect(\"failed to generate winning post\");\n\n    let proof = &gen_winning_post_measurement.return_value;\n\n    let verify_winning_post_measurement = measure(|| {\n        verify_winning_post::<Tree>(\n            &post_config,\n            &RANDOMNESS,\n            &pub_replica_info[..],\n            PROVER_ID,\n            &proof,\n        )\n    })\n    .expect(\"failed to verify winning post proof\");\n\n    // Create a JSON serializable report that we print to stdout (that will later be parsed using\n    // the CLI JSON parser `jq`).\n    let report = Report {\n        inputs: Inputs { sector_size },\n        outputs: Outputs {\n            gen_winning_post_cpu_time_ms: gen_winning_post_measurement.cpu_time.as_millis() as u64,\n            gen_winning_post_wall_time_ms: gen_winning_post_measurement.wall_time.as_millis()\n                as u64,\n            verify_winning_post_cpu_time_ms: verify_winning_post_measurement.cpu_time.as_millis()\n                as u64,\n            verify_winning_post_wall_time_ms: verify_winning_post_measurement.wall_time.as_millis()\n                as u64,\n            gen_winning_post_sector_challenge_cpu_time_ms:\n                gen_winning_post_sector_challenge_measurement\n                    .cpu_time\n                    .as_millis() as u64,\n            gen_winning_post_sector_challenge_wall_time_ms:\n                gen_winning_post_sector_challenge_measurement\n                    .wall_time\n                    .as_millis() as u64,\n        },\n    };\n    report.print();\n    Ok(())\n}\n\npub fn run(sector_size: usize, api_version: ApiVersion) -> anyhow::Result<()> {\n    info!(\n        \"Benchy Winning PoSt: sector-size={}, api_version={}\",\n        sector_size, api_version\n    );\n\n    with_shape!(\n        sector_size as u64,\n        run_fallback_post_bench,\n        sector_size as u64,\n        api_version,\n    )\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/check_parameters/main.rs",
    "content": "use std::path::PathBuf;\n\nuse anyhow::Result;\nuse bellperson::bls::Bls12;\nuse bellperson::groth16::MappedParameters;\nuse clap::{value_t, App, Arg, SubCommand};\n\nuse storage_proofs_core::parameter_cache::read_cached_params;\n\nfn run_map(parameter_file: &PathBuf) -> Result<MappedParameters<Bls12>> {\n    read_cached_params(parameter_file)\n}\n\nfn main() -> Result<()> {\n    fil_logger::init();\n\n    let map_cmd = SubCommand::with_name(\"map\")\n        .about(\"build mapped parameters\")\n        .arg(\n            Arg::with_name(\"param\")\n                .long(\"parameter-file\")\n                .help(\"The parameter file to map\")\n                .required(true)\n                .takes_value(true),\n        );\n\n    let matches = App::new(\"check_parameters\")\n        .version(\"0.1\")\n        .subcommand(map_cmd)\n        .get_matches();\n\n    match matches.subcommand() {\n        (\"map\", Some(m)) => {\n            let parameter_file = value_t!(m, \"param\", PathBuf)?;\n            run_map(&parameter_file)?;\n        }\n        _ => panic!(\"Unrecognized subcommand\"),\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/circuitinfo/main.rs",
    "content": "use std::str::FromStr;\n\nuse bellperson::{bls::Bls12, util_cs::bench_cs::BenchCS, Circuit};\nuse dialoguer::{theme::ColorfulTheme, MultiSelect};\nuse filecoin_proofs::{\n    parameters::{public_params, window_post_public_params, winning_post_public_params},\n    with_shape, DefaultPieceHasher, PaddedBytesAmount, PoRepConfig, PoRepProofPartitions,\n    PoStConfig, PoStType, SectorSize, POREP_PARTITIONS, PUBLISHED_SECTOR_SIZES,\n    WINDOW_POST_CHALLENGE_COUNT, WINDOW_POST_SECTOR_COUNT, WINNING_POST_CHALLENGE_COUNT,\n    WINNING_POST_SECTOR_COUNT,\n};\nuse humansize::{file_size_opts, FileSize};\nuse log::{info, warn};\nuse storage_proofs_core::{\n    api_version::ApiVersion, compound_proof::CompoundProof, merkle::MerkleTreeTrait,\n};\nuse storage_proofs_porep::stacked::{StackedCompound, StackedDrg};\nuse storage_proofs_post::fallback::{FallbackPoSt, FallbackPoStCircuit, FallbackPoStCompound};\nuse structopt::StructOpt;\n\nstruct CircuitInfo {\n    constraints: usize,\n    inputs: usize,\n}\n\nfn circuit_info<C: Circuit<Bls12>>(circuit: C) -> CircuitInfo {\n    let mut cs_blank = BenchCS::new();\n    circuit\n        .synthesize(&mut cs_blank)\n        .expect(\"failed to synthesize\");\n\n    CircuitInfo {\n        constraints: cs_blank.num_constraints(),\n        inputs: cs_blank.num_inputs(),\n    }\n}\n\nfn get_porep_info<Tree: 'static + MerkleTreeTrait>(porep_config: PoRepConfig) -> CircuitInfo {\n    info!(\"PoRep info\");\n\n    let public_params = public_params(\n        PaddedBytesAmount::from(porep_config),\n        usize::from(PoRepProofPartitions::from(porep_config)),\n        porep_config.porep_id,\n        porep_config.api_version,\n    )\n    .expect(\"failed to get public params from config\");\n\n    let circuit = <StackedCompound<Tree, DefaultPieceHasher> as CompoundProof<\n        StackedDrg<Tree, DefaultPieceHasher>,\n        _,\n    >>::blank_circuit(&public_params);\n\n    circuit_info(circuit)\n}\n\nfn get_winning_post_info<Tree: 'static + MerkleTreeTrait>(post_config: &PoStConfig) -> CircuitInfo {\n    info!(\"Winning PoSt info\");\n\n    let post_public_params = winning_post_public_params::<Tree>(post_config)\n        .expect(\"failed to get public params from config\");\n\n    let circuit: FallbackPoStCircuit<Tree> = <FallbackPoStCompound<Tree> as CompoundProof<\n        FallbackPoSt<Tree>,\n        FallbackPoStCircuit<Tree>,\n    >>::blank_circuit(&post_public_params);\n\n    circuit_info(circuit)\n}\n\nfn get_window_post_info<Tree: 'static + MerkleTreeTrait>(post_config: &PoStConfig) -> CircuitInfo {\n    info!(\"Window PoSt info\");\n\n    let post_public_params = window_post_public_params::<Tree>(post_config)\n        .expect(\"failed to get public params from config\");\n\n    let circuit: FallbackPoStCircuit<Tree> = <FallbackPoStCompound<Tree> as CompoundProof<\n        FallbackPoSt<Tree>,\n        FallbackPoStCircuit<Tree>,\n    >>::blank_circuit(&post_public_params);\n\n    circuit_info(circuit)\n}\n\n#[derive(Debug, StructOpt)]\n#[structopt(name = \"circuitinfo\")]\nstruct Opt {\n    #[structopt(long)]\n    winning: bool,\n    #[structopt(long)]\n    window: bool,\n    #[structopt(long)]\n    porep: bool,\n    #[structopt(short = \"z\", long, use_delimiter = true)]\n    constraints_for_sector_sizes: Vec<u64>,\n    #[structopt(default_value = \"1.0.0\", long)]\n    api_version: String,\n}\n\nfn winning_post_info(sector_size: u64, api_version: ApiVersion) -> CircuitInfo {\n    with_shape!(\n        sector_size,\n        get_winning_post_info,\n        &PoStConfig {\n            sector_size: SectorSize(sector_size),\n            challenge_count: WINNING_POST_CHALLENGE_COUNT,\n            sector_count: WINNING_POST_SECTOR_COUNT,\n            typ: PoStType::Winning,\n            priority: true,\n            api_version,\n        }\n    )\n}\n\nfn window_post_info(sector_size: u64, api_version: ApiVersion) -> CircuitInfo {\n    with_shape!(\n        sector_size,\n        get_window_post_info,\n        &PoStConfig {\n            sector_size: SectorSize(sector_size),\n            challenge_count: WINDOW_POST_CHALLENGE_COUNT,\n            sector_count: *WINDOW_POST_SECTOR_COUNT\n                .read()\n                .expect(\"WINDOW_POST_SECTOR_COUNT poisoned\")\n                .get(&sector_size)\n                .expect(\"unknown sector size\"),\n            typ: PoStType::Window,\n            priority: true,\n            api_version,\n        }\n    )\n}\n\nfn porep_info(sector_size: u64, api_version: ApiVersion) -> (CircuitInfo, usize) {\n    let partitions = PoRepProofPartitions(\n        *POREP_PARTITIONS\n            .read()\n            .expect(\"POREP_PARTITIONS poisoned\")\n            .get(&sector_size)\n            .expect(\"unknown sector size\"),\n    );\n    let info = with_shape!(\n        sector_size,\n        get_porep_info,\n        PoRepConfig {\n            sector_size: SectorSize(sector_size),\n            partitions,\n            porep_id: [0; 32],\n            api_version,\n        }\n    );\n    (info, partitions.into())\n}\n\n// Run this from the command-line to get info about circuits.\npub fn main() {\n    // The logger is used and every message from this tool is also logged into those logs.\n    // Though the information is also printed to stdout, so that users who haven't set the\n    // `RUST_LOG` environment variable also see warngings/progress.\n    fil_logger::init();\n\n    let opts = Opt::from_args();\n\n    // Display interactive menu if no sizes are given\n    let sizes: Vec<u64> = if opts.constraints_for_sector_sizes.is_empty() {\n        let sector_sizes = PUBLISHED_SECTOR_SIZES\n            .iter()\n            .map(|sector_size| {\n                // Right aligning the numbers makes them easier to read\n                format!(\n                    \"{: >7}\",\n                    sector_size\n                        .file_size(file_size_opts::BINARY)\n                        .expect(\"failed to format sector size\"),\n                )\n            })\n            .collect::<Vec<_>>();\n\n        let selected_sector_sizes = MultiSelect::with_theme(&ColorfulTheme::default())\n            .with_prompt(\"Select the sizes for which constraints should be counted [use space key to select]\")\n            .items(&sector_sizes[..])\n            .interact()\n            .expect(\"interaction failed\");\n\n        // Extract the selected sizes\n        PUBLISHED_SECTOR_SIZES\n            .iter()\n            .enumerate()\n            .filter_map(|(index, size)| {\n                if selected_sector_sizes.contains(&index) {\n                    Some(*size)\n                } else {\n                    None\n                }\n            })\n            .collect()\n    } else {\n        opts.constraints_for_sector_sizes\n            .into_iter()\n            .filter(|size| {\n                if PUBLISHED_SECTOR_SIZES.contains(size) {\n                    return true;\n                }\n\n                warn!(\"ignoring invalid sector size: {}\", size);\n                println!(\"ignoring invalid sector size: {}\", size);\n                false\n            })\n            .collect()\n    };\n\n    if sizes.is_empty() {\n        info!(\"No valid sector sizes given. Abort.\");\n        println!(\"No valid sector sizes given. Abort.\");\n    }\n\n    let count_winning = opts.winning;\n    let count_window = opts.window;\n    let count_porep = opts.porep;\n    let api_version = ApiVersion::from_str(&opts.api_version)\n        .expect(\"Failed to parse api_version from semver string\");\n\n    for sector_size in sizes {\n        let human_size = sector_size\n            .file_size(file_size_opts::BINARY)\n            .expect(\"failed to format sector size\");\n        println!(\"Getting circuit info for sector size: {}\", human_size);\n\n        if count_winning {\n            let info = winning_post_info(sector_size, api_version);\n            println!(\n                \"{} Winning PoSt constraints: {}, public inputs: {}, partitions: 1\",\n                human_size, info.constraints, info.inputs\n            );\n        }\n\n        if count_window {\n            let info = window_post_info(sector_size, api_version);\n            println!(\n                \"{} Window PoSt constraints (per partition): {}, public inputs (per partition): {}, partitions: <depends on input size>\",\n                human_size, info.constraints, info.inputs\n            );\n        }\n\n        if count_porep {\n            let (info, partitions) = porep_info(sector_size, api_version);\n            println!(\n                \"{} PoRep constraints: {}, public inputs: {}, partitions: {}\",\n                human_size, info.constraints, info.inputs, partitions\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/fdlimit/main.rs",
    "content": "use anyhow::Result;\n\nfn main() -> Result<()> {\n    fil_logger::init();\n\n    let res = fdlimit::raise_fd_limit().expect(\"failed to raise fd limit\");\n    println!(\"File descriptor limit was raised to {}\", res);\n\n    Ok(())\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/gen_graph_cache/main.rs",
    "content": "use std::collections::BTreeMap;\nuse std::fs::File;\nuse std::io::BufWriter;\nuse std::path::Path;\n\nuse anyhow::Result;\nuse clap::{value_t, App, Arg};\nuse filecoin_hashers::sha256::Sha256Hasher;\nuse filecoin_proofs::{\n    with_shape, DRG_DEGREE, EXP_DEGREE, SECTOR_SIZE_2_KIB, SECTOR_SIZE_32_GIB, SECTOR_SIZE_512_MIB,\n    SECTOR_SIZE_64_GIB, SECTOR_SIZE_8_MIB,\n};\nuse serde::{Deserialize, Serialize};\nuse storage_proofs_core::{api_version::ApiVersion, merkle::MerkleTreeTrait, proof::ProofScheme};\nuse storage_proofs_porep::stacked::{LayerChallenges, SetupParams, StackedDrg};\n\nconst PARENT_CACHE_JSON_OUTPUT: &str = \"./parent_cache.json\";\n\npub type ParentCacheSummaryMap = BTreeMap<String, ParentCacheSummary>;\n\n#[derive(Debug, Deserialize, Serialize)]\npub struct ParentCacheSummary {\n    pub sector_size: usize,\n    pub digest: String,\n}\n\nfn gen_graph_cache<Tree: 'static + MerkleTreeTrait>(\n    sector_size: usize,\n    porep_id: [u8; 32],\n    api_version: ApiVersion,\n    parent_cache_summary_map: &mut ParentCacheSummaryMap,\n) -> Result<()> {\n    let nodes = (sector_size / 32) as usize;\n\n    // Note that layers and challenge_count don't affect the graph, so\n    // we just use dummy values of 1 for the setup params.\n    let layers = 1;\n    let challenge_count = 1;\n    let layer_challenges = LayerChallenges::new(layers, challenge_count);\n\n    let sp = SetupParams {\n        nodes,\n        degree: DRG_DEGREE,\n        expansion_degree: EXP_DEGREE,\n        porep_id,\n        layer_challenges,\n        api_version,\n    };\n\n    let pp = StackedDrg::<Tree, Sha256Hasher>::setup(&sp).expect(\"failed to setup DRG\");\n    let parent_cache = pp.graph.parent_cache()?;\n\n    let data = ParentCacheSummary {\n        digest: parent_cache.digest,\n        sector_size: parent_cache.sector_size,\n    };\n    parent_cache_summary_map.insert(\n        parent_cache\n            .path\n            .file_stem()\n            .expect(\"file_stem failure\")\n            .to_str()\n            .expect(\"file stem to_str failure\")\n            .to_string(),\n        data,\n    );\n\n    Ok(())\n}\n\nfn main() -> Result<()> {\n    fil_logger::init();\n\n    let matches = App::new(\"gen_graph_cache\")\n        .version(\"0.1\")\n        .about(\"Generates and/or verifies parent graph cache files\")\n        .arg(\n            Arg::with_name(\"json\")\n                .long(\"json\")\n                .help(\"Creates a new json output file.\")\n                .default_value(\"false\"),\n        )\n        .arg(\n            Arg::with_name(\"size\")\n                .long(\"size\")\n                .help(\"Generate and/or verify the graph cache files for a single sector size\")\n                .default_value(\"0\"),\n        )\n        .get_matches();\n\n    // NOTE: The porep_ids below are tied to the versioned values provided in\n    // filecoin-proofs-api:src/registry [porep_id()] that matches the specified\n    // sector size and must be updated when that value is updated for the proper\n    // graph cache generation/validation.\n    //\n    // If this value changes, previously existing cache files will no longer be\n    // used and new cache files will be generated.\n    let sector_sizes_and_porep_ids: Vec<(u64, [u8; 32], ApiVersion)> = vec![\n        (\n            SECTOR_SIZE_2_KIB,\n            [\n                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                0, 0, 0, 0,\n            ],\n            ApiVersion::V1_0_0,\n        ),\n        (\n            SECTOR_SIZE_8_MIB,\n            [\n                1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                0, 0, 0, 0,\n            ],\n            ApiVersion::V1_0_0,\n        ),\n        (\n            SECTOR_SIZE_512_MIB,\n            [\n                2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                0, 0, 0, 0,\n            ],\n            ApiVersion::V1_0_0,\n        ),\n        (\n            SECTOR_SIZE_32_GIB,\n            [\n                3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                0, 0, 0, 0,\n            ],\n            ApiVersion::V1_0_0,\n        ),\n        (\n            SECTOR_SIZE_64_GIB,\n            [\n                4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                0, 0, 0, 0,\n            ],\n            ApiVersion::V1_0_0,\n        ),\n        (\n            SECTOR_SIZE_2_KIB, // v1.1.0\n            [\n                5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                0, 0, 0, 0,\n            ],\n            ApiVersion::V1_1_0,\n        ),\n        (\n            SECTOR_SIZE_8_MIB, // v1.1.0\n            [\n                6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                0, 0, 0, 0,\n            ],\n            ApiVersion::V1_1_0,\n        ),\n        (\n            SECTOR_SIZE_512_MIB, // v1.1.0\n            [\n                7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                0, 0, 0, 0,\n            ],\n            ApiVersion::V1_1_0,\n        ),\n        (\n            SECTOR_SIZE_32_GIB, // v1.1.0\n            [\n                8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                0, 0, 0, 0,\n            ],\n            ApiVersion::V1_1_0,\n        ),\n        (\n            SECTOR_SIZE_64_GIB, // v1.1.0\n            [\n                9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                0, 0, 0, 0,\n            ],\n            ApiVersion::V1_1_0,\n        ),\n    ];\n\n    let supported_sector_sizes = sector_sizes_and_porep_ids\n        .iter()\n        .map(|vals| vals.0)\n        .collect::<Vec<u64>>();\n    let mut parent_cache_summary_map: ParentCacheSummaryMap = BTreeMap::new();\n\n    let size = value_t!(matches, \"size\", u64).expect(\"failed to get size\");\n    let json = value_t!(matches, \"json\", bool).expect(\"failed to get json\");\n\n    if size == 0 {\n        println!(\n            \"gen_graph_cache: sizes {:?}, output json {}\",\n            supported_sector_sizes, json\n        );\n    } else if supported_sector_sizes.contains(&size) {\n        println!(\"gen_graph_cache: size {}, output json {}\", size, json);\n    } else {\n        println!(\n            \"Unsupported sector size {} (must be one of {:?})\",\n            size, supported_sector_sizes\n        );\n        return Ok(());\n    }\n\n    for (sector_size, porep_id, api_version) in sector_sizes_and_porep_ids {\n        // 'size' 0 indicates no size was specified, so we run all sizes.\n        if size != 0 && size != sector_size {\n            continue;\n        }\n\n        with_shape!(\n            sector_size as u64,\n            gen_graph_cache,\n            sector_size as usize,\n            porep_id,\n            api_version,\n            &mut parent_cache_summary_map,\n        )?;\n    }\n\n    // Output all json to PARENT_CACHE_JSON_OUTPUT in the current\n    // directory.\n    if json {\n        let json_output_path = Path::new(PARENT_CACHE_JSON_OUTPUT);\n        let json_file = File::create(&json_output_path)?;\n        let writer = BufWriter::new(json_file);\n        serde_json::to_writer_pretty(writer, &parent_cache_summary_map)?;\n        println!(\"Wrote {:?}\", json_output_path);\n    } else {\n        println!(\"{:?}\", parent_cache_summary_map);\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/gpu-cpu-test/README.md",
    "content": "GPU CPU Test\n============\n\nThis is a test utility to test whether it works to prioritize certain proofs. When a proof is prioritized, it will run on the GPU and all other proofs will be pushed to the CPU.\n\nThis utility is meant to be run manually. It spawns multiple threads/processes that run proofs. Those get killed after 5 minutes of running. The overall test runs longer as some input data needs to be generated. By default, one thread/process will always be prioritized to run on the GPU. The other one might be moved to the CPU.\n\nTo check whether the prioritization is working, run it first with default parameters:\n\n    $ RUST_LOG=debug cargo run --release --bin gpu-cpu-test\n\nOccasionally you should see log messaged like\n\n    2020-05-15T12:35:48.680 366073 low-02 WARN bellperson::gpu::locks > GPU acquired by a high priority process! Freeing up Multiexp kernels...\n\n\nwhich indicate that the high priority proof indeed pushes lower priority ones down from the GPU onto the CPU.\n\nOnce the test is completed there should be log messages that contain the results, the number of proofs run per thread:\n\n    Thread high info: RunInfo { elapsed: 301.714277787s, iterations: 51 }\n    Thread low-01 info: RunInfo { elapsed: 306.615414259s, iterations: 15 }\n    Thread low-02 info: RunInfo { elapsed: 303.641817512s, iterations: 17 }\n\nThe high priority proof clearly was able to run more proofs than the lower priority ones.\n\nTo double check the result, you can also run the test without special priorities. Then the number of proofs run should be similar across all the threads as you can see below (the first thread is always called `high` even if it doesn't run with high priority):\n\n    $ RUST_LOG=debug cargo run --release --bin gpu-cpu-test -- --gpu-stealing=false\n    Thread high info: RunInfo { elapsed: 307.515676843s, iterations: 34 }\n    Thread low-01 info: RunInfo { elapsed: 305.585567866s, iterations: 34 }\n    Thread low-02 info: RunInfo { elapsed: 302.7105106s, iterations: 34 }\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/gpu-cpu-test/main.rs",
    "content": "//requires nightly, or later stable version\n//#![warn(clippy::unwrap_used)]\n\nuse std::collections::HashMap;\nuse std::process::{self, Child, Command, Stdio};\nuse std::str;\nuse std::sync::mpsc::{self, Receiver, Sender, TryRecvError};\nuse std::thread;\nuse std::time::{Duration, Instant};\n\nuse clap::{arg_enum, value_t, App, Arg};\nuse fil_proofs_tooling::shared::{create_replica, PROVER_ID, RANDOMNESS};\nuse filecoin_proofs::constants::{SectorShape8MiB, SECTOR_SIZE_8_MIB};\nuse filecoin_proofs::types::{PoStConfig, SectorSize};\nuse filecoin_proofs::{\n    generate_winning_post, PoStType, PrivateReplicaInfo, WINNING_POST_CHALLENGE_COUNT,\n    WINNING_POST_SECTOR_COUNT,\n};\nuse log::{debug, info};\nuse storage_proofs_core::api_version::ApiVersion;\nuse storage_proofs_core::sector::SectorId;\n\nconst FIXED_API_VERSION: ApiVersion = ApiVersion::V1_0_0;\n\ntype MerkleTree = SectorShape8MiB;\nconst SECTOR_SIZE: u64 = SECTOR_SIZE_8_MIB;\nconst TIMEOUT: u64 = 5 * 60;\nconst POST_CONFIG: PoStConfig = PoStConfig {\n    sector_size: SectorSize(SECTOR_SIZE),\n    challenge_count: WINNING_POST_CHALLENGE_COUNT,\n    sector_count: WINNING_POST_SECTOR_COUNT,\n    typ: PoStType::Winning,\n    priority: false,\n    api_version: FIXED_API_VERSION,\n};\n\narg_enum! {\n    #[derive(Debug)]\n    pub enum Mode {\n        Threads,\n        Processes,\n    }\n}\n\n#[derive(Debug)]\npub struct RunInfo {\n    elapsed: Duration,\n    iterations: u8,\n}\n\npub fn colored_with_thread(\n    writer: &mut dyn std::io::Write,\n    now: &mut flexi_logger::DeferredNow,\n    record: &flexi_logger::Record,\n) -> Result<(), std::io::Error> {\n    let level = record.level();\n    write!(\n        writer,\n        \"{} {} {} {} {} > {}\",\n        now.now().format(\"%Y-%m-%dT%H:%M:%S%.3f\"),\n        process::id(),\n        thread::current()\n            .name()\n            .unwrap_or(&format!(\"{:?}\", thread::current().id())),\n        flexi_logger::style(level, level),\n        record.module_path().unwrap_or(\"<unnamed>\"),\n        record.args(),\n    )\n}\n\nfn generate_post(priv_replica_info: &[(SectorId, PrivateReplicaInfo<MerkleTree>)]) {\n    generate_winning_post::<MerkleTree>(&POST_CONFIG, &RANDOMNESS, priv_replica_info, PROVER_ID)\n        .expect(\"failed to generate PoSt\");\n}\n\nfn generate_post_in_priority(priv_replica_info: &[(SectorId, PrivateReplicaInfo<MerkleTree>)]) {\n    let mut post_config = POST_CONFIG;\n    post_config.priority = true;\n    generate_winning_post::<MerkleTree>(&post_config, &RANDOMNESS, priv_replica_info, PROVER_ID)\n        .expect(\"failed to generate PoSt with high priority\");\n}\n\nfn thread_fun(\n    rx: Receiver<()>,\n    gpu_stealing: bool,\n    priv_replica_infos: &[(SectorId, PrivateReplicaInfo<MerkleTree>)],\n) -> RunInfo {\n    let timing = Instant::now();\n    let mut iteration = 0;\n    while iteration < std::u8::MAX {\n        info!(\"iter {}\", iteration);\n\n        // This is the higher priority proof, get it on the GPU even if there is one running\n        // already there\n        if gpu_stealing {\n            // Run the actual proof\n            generate_post_in_priority(&priv_replica_infos);\n        } else {\n            // Run the actual proof\n            generate_post(&priv_replica_infos);\n        }\n\n        // Waiting for this thread to be killed\n        match rx.try_recv() {\n            Ok(_) | Err(TryRecvError::Disconnected) => {\n                debug!(\"High priority proofs received kill message\");\n                break;\n            }\n            Err(TryRecvError::Empty) => (),\n        }\n        iteration += 1;\n    }\n    RunInfo {\n        elapsed: timing.elapsed(),\n        iterations: iteration,\n    }\n}\n\nfn spawn_thread(\n    name: &str,\n    gpu_stealing: bool,\n    priv_replica_info: (SectorId, PrivateReplicaInfo<MerkleTree>),\n) -> (Sender<()>, thread::JoinHandle<RunInfo>) {\n    let (tx, rx) = mpsc::channel();\n\n    let thread_config = thread::Builder::new().name(name.to_string());\n    let handler = thread_config\n        .spawn(move || -> RunInfo { thread_fun(rx, gpu_stealing, &[priv_replica_info]) })\n        .expect(\"Could not spawn thread\");\n\n    (tx, handler)\n}\n\nfn threads_mode(parallel: u8, gpu_stealing: bool) {\n    // All channels we send a termination message to\n    let mut senders = Vec::new();\n    // All thread handles that get terminated\n    let mut threads: Vec<Option<thread::JoinHandle<_>>> = Vec::new();\n    let arbitrary_porep_id = [234; 32];\n\n    // Create fixtures only once for both threads\n    let (sector_id, replica_output) =\n        create_replica::<MerkleTree>(SECTOR_SIZE, arbitrary_porep_id, FIXED_API_VERSION);\n    let priv_replica_info = (sector_id, replica_output.private_replica_info);\n\n    // Put each proof into it's own scope (the other one is due to the if statement)\n    {\n        let (tx, handler) = spawn_thread(\"high\", gpu_stealing, priv_replica_info.clone());\n        senders.push(tx);\n        threads.push(Some(handler));\n    }\n\n    (1..parallel).for_each(|ii| {\n        let name = format!(\"low-{:02}\", ii);\n        let (tx, handler) = spawn_thread(&name, false, priv_replica_info.clone());\n        senders.push(tx);\n        threads.push(Some(handler));\n    });\n\n    // Terminate all threads after that amount of time\n    let timeout = Duration::from_secs(TIMEOUT);\n    thread::sleep(timeout);\n    info!(\"Waited long enough to kill all threads\");\n    for tx in senders {\n        tx.send(()).expect(\"tx channel send failed\");\n    }\n\n    for thread in &mut threads {\n        if let Some(handler) = thread.take() {\n            let thread_name = handler\n                .thread()\n                .name()\n                .unwrap_or(&format!(\"{:?}\", handler.thread().id()))\n                .to_string();\n            let run_info = handler.join().expect(\"thread being joined has panicked\");\n            info!(\"Thread {} info: {:?}\", thread_name, run_info);\n            // Also print it, so that we can get that information in processes mode\n            println!(\"Thread {} info: {:?}\", thread_name, run_info);\n        }\n    }\n}\n\nfn processes_mode(parallel: u8, gpu_stealing: bool) {\n    let mut children = HashMap::new();\n\n    // Put each process into it's own scope (the other one is due to the if statement)\n    {\n        let name = \"high\";\n        let child = spawn_process(&name, gpu_stealing);\n        children.insert(name.to_string(), child);\n    }\n\n    (1..parallel).for_each(|ii| {\n        let name = format!(\"low-{:02}\", ii);\n        let child = spawn_process(&name, false);\n        children.insert(name, child);\n    });\n\n    // Wait for all processes to finish and log their output\n    for (name, child) in children {\n        let output = child.wait_with_output().expect(\"failed to wait for child\");\n        info!(\n            \"Process {} info: {}\",\n            name,\n            str::from_utf8(&output.stdout).expect(\"failed to parse UTF-8\")\n        );\n    }\n}\n\nfn spawn_process(name: &str, gpu_stealing: bool) -> Child {\n    // Runs this this programm again in it's own process, but this time it is spawning a single\n    // thread to run the actual proof.\n    Command::new(\"cargo\")\n        .arg(\"run\")\n        .arg(\"--release\")\n        .args(&[\"--bin\", \"gpu-cpu-test\"])\n        .arg(\"--\")\n        .args(&[\"--gpu-stealing\", &gpu_stealing.to_string()])\n        .args(&[\"--parallel\", \"1\"])\n        .args(&[\"--mode\", \"threads\"])\n        // Print logging to the main process stderr\n        .stderr(Stdio::inherit())\n        // Use the stdout to return a result\n        .stdout(Stdio::piped())\n        .spawn()\n        .unwrap_or_else(|_| panic!(\"failed to execute process {}\", name))\n}\n\nfn main() {\n    flexi_logger::Logger::with_env()\n        .format(colored_with_thread)\n        .start()\n        .expect(\"Initializing logger failed. Was another logger already initialized?\");\n\n    let matches = App::new(\"gpu-cpu-test\")\n        .version(\"0.1\")\n        .about(\"Tests if moving proofs from GPU to CPU works\")\n        .arg(\n            Arg::with_name(\"parallel\")\n                .long(\"parallel\")\n                .help(\"Run multiple proofs in parallel.\")\n                .default_value(\"3\"),\n        )\n        .arg(\n            Arg::with_name(\"gpu-stealing\")\n                .long(\"gpu-stealing\")\n                .help(\"Force high priority proof on the GPU and let low priority one continue on CPU.\")\n                .default_value(\"true\"),\n        )\n        .arg(\n            Arg::with_name(\"mode\")\n              .long(\"mode\")\n              .help(\"Whether to run with threads or processes.\")\n               .possible_values(&[\"threads\", \"processes\"])\n               .case_insensitive(true)\n               .default_value(\"threads\"),\n        )\n        .get_matches();\n\n    let parallel = value_t!(matches, \"parallel\", u8).expect(\"failed to get parallel\");\n    if parallel == 1 {\n        info!(\"Running high priority proof only\")\n    } else {\n        info!(\"Running high and low priority proofs in parallel\")\n    }\n    let gpu_stealing = value_t!(matches, \"gpu-stealing\", bool).expect(\"failed to get gpu-stealing\");\n    if gpu_stealing {\n        info!(\"Force low piority proofs to CPU\")\n    } else {\n        info!(\"Let everyone queue up to run on GPU\")\n    }\n    let mode = value_t!(matches, \"mode\", Mode).unwrap_or_else(|e| e.exit());\n    match mode {\n        Mode::Threads => info!(\"Using threads\"),\n        Mode::Processes => info!(\"Using processes\"),\n    }\n\n    match mode {\n        Mode::Threads => {\n            threads_mode(parallel, gpu_stealing);\n        }\n        Mode::Processes => {\n            processes_mode(parallel, gpu_stealing);\n        }\n    }\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/micro.rs",
    "content": "use std::io::{self, BufRead};\n\nuse anyhow::{anyhow, Context, Result};\nuse commandspec::command;\nuse fil_proofs_tooling::metadata::Metadata;\nuse regex::Regex;\nuse serde::Serialize;\n\n#[derive(Debug, Default, Clone, PartialEq, Serialize)]\n#[serde(rename_all = \"kebab-case\")]\nstruct Interval {\n    start: f64,\n    end: f64,\n    unit: Option<String>,\n}\n\n#[derive(Debug, Default, Clone, PartialEq, Serialize)]\n#[serde(rename_all = \"kebab-case\")]\nstruct Point {\n    value: f64,\n    unit: Option<String>,\n}\n\n#[derive(Debug, Default, Clone, PartialEq, Serialize)]\n#[serde(rename_all = \"kebab-case\")]\nstruct CriterionResult {\n    name: String,\n    samples: u32,\n    time_med: Point,\n    time: Interval,\n    throughput: Option<Interval>,\n    throughput_med: Option<Point>,\n    slope: Option<Interval>,\n    mean: Option<Interval>,\n    median: Option<Interval>,\n    r_2: Option<Interval>,\n    std_dev: Option<Interval>,\n    med_abs_dev: Option<Interval>,\n}\n\nfn make_detail_re(name: &str) -> Regex {\n    Regex::new(&format!(r\"{}\\s+\\[(\\d+\\.\\d+ \\w+) (\\d+\\.\\d+ \\w+)\\]\", name)).expect(\"invalid regex\")\n}\n\n/// Parses the output of `cargo bench -p storage-proofs --bench <benchmark> -- --verbose --colors never`.\nfn parse_criterion_out(s: impl AsRef<str>) -> Result<Vec<CriterionResult>> {\n    let mut res = Vec::new();\n\n    let start_re = Regex::new(r\"^Benchmarking ([^:]+)$\").expect(\"invalid regex\");\n    let sample_re = Regex::new(r\"Collecting (\\d+) samples\").expect(\"invalid regex\");\n    let time_re = Regex::new(r\"time:\\s+\\[(\\d+\\.\\d+ \\w+) (\\d+\\.\\d+ \\w+) (\\d+\\.\\d+ \\w+)]\")\n        .expect(\"invalid regex\");\n\n    let throughput_re =\n        Regex::new(r\"thrpt:\\s+\\[(\\d+\\.\\d+ \\w+/s) (\\d+\\.\\d+ \\w+/s) (\\d+\\.\\d+ \\w+/s)]\")\n            .expect(\"invalid regex\");\n\n    let slope_re = make_detail_re(\"slope\");\n    let r_2_re = Regex::new(r\"R\\^2\\s+\\[(\\d+\\.\\d+) (\\d+\\.\\d+)\\]\").expect(\"invalid regex\");\n    let mean_re = make_detail_re(\"mean\");\n    let std_dev_re = make_detail_re(r\"std\\. dev\\.\");\n    let median_re = make_detail_re(\"median\");\n    let med_abs_dev_re = make_detail_re(r\"med\\. abs\\. dev\\.\");\n\n    #[allow(clippy::type_complexity)]\n    let mut current: Option<(\n        String,\n        Option<u32>,\n        Option<Point>,\n        Option<Interval>,\n        Option<Interval>,\n        Option<Point>,\n        Option<Interval>,\n        Option<Interval>,\n        Option<Interval>,\n        Option<Interval>,\n        Option<Interval>,\n        Option<Interval>,\n    )> = None;\n\n    for line in s.as_ref().lines() {\n        if let Some(caps) = start_re.captures(line) {\n            if current.is_some() {\n                let r = current.take().unwrap();\n                res.push(CriterionResult {\n                    name: r.0,\n                    samples: r.1.unwrap_or_default(),\n                    time_med: r.2.unwrap_or_default(),\n                    time: r.3.unwrap_or_default(),\n                    throughput: r.4,\n                    throughput_med: r.5,\n                    slope: r.6,\n                    mean: r.7,\n                    median: r.8,\n                    r_2: r.9,\n                    std_dev: r.10,\n                    med_abs_dev: r.11,\n                });\n            }\n            current = Some((\n                caps[1].to_string(),\n                None,\n                None,\n                None,\n                None,\n                None,\n                None,\n                None,\n                None,\n                None,\n                None,\n                None,\n            ));\n        }\n\n        if let Some(ref mut current) = current {\n            // Samples\n            if let Some(caps) = sample_re.captures(line) {\n                current.1 = Some(caps[1].parse().unwrap_or_default());\n            }\n\n            // Time\n            if let Some(caps) = time_re.captures(line) {\n                current.2 = Some(Point {\n                    value: time_to_us(&caps[2]),\n                    unit: Some(\"us\".to_string()),\n                });\n                current.3 = Some(Interval {\n                    start: time_to_us(&caps[1]),\n                    end: time_to_us(&caps[3]),\n                    unit: Some(\"us\".to_string()),\n                });\n            }\n\n            // Throughput\n            if let Some(caps) = throughput_re.captures(line) {\n                current.4 = Some(Interval {\n                    start: throughput_val(&caps[1]),\n                    end: throughput_val(&caps[3]),\n                    unit: Some(throughput_to_uom(&caps[1])),\n                });\n                current.5 = Some(Point {\n                    value: throughput_val(&caps[2]),\n                    unit: Some(throughput_to_uom(&caps[2])),\n                });\n            }\n\n            // Slope\n            if let Some(caps) = slope_re.captures(line) {\n                current.6 = Some(Interval {\n                    start: time_to_us(&caps[1]),\n                    end: time_to_us(&caps[2]),\n                    unit: Some(\"us\".to_string()),\n                });\n            }\n\n            // Mean\n            if let Some(caps) = mean_re.captures(line) {\n                current.7 = Some(Interval {\n                    start: time_to_us(&caps[1]),\n                    end: time_to_us(&caps[2]),\n                    unit: Some(\"us\".to_string()),\n                });\n            }\n\n            // median\n            if let Some(caps) = median_re.captures(line) {\n                current.8 = Some(Interval {\n                    start: time_to_us(&caps[1]),\n                    end: time_to_us(&caps[2]),\n                    unit: Some(\"us\".to_string()),\n                });\n            }\n\n            // R^2\n            if let Some(caps) = r_2_re.captures(line) {\n                current.9 = Some(Interval {\n                    start: caps[1].parse().unwrap(),\n                    end: caps[2].parse().unwrap(),\n                    unit: None,\n                });\n            }\n\n            // std.dev\n            if let Some(caps) = std_dev_re.captures(line) {\n                current.10 = Some(Interval {\n                    start: time_to_us(&caps[1]),\n                    end: time_to_us(&caps[2]),\n                    unit: Some(\"us\".to_string()),\n                });\n            }\n\n            // med.abs.dev\n            if let Some(caps) = med_abs_dev_re.captures(line) {\n                current.11 = Some(Interval {\n                    start: time_to_us(&caps[1]),\n                    end: time_to_us(&caps[2]),\n                    unit: Some(\"us\".to_string()),\n                });\n            }\n        }\n    }\n\n    if current.is_some() {\n        let r = current.take().unwrap();\n        res.push(CriterionResult {\n            name: r.0,\n            samples: r.1.unwrap_or_default(),\n            time_med: r.2.unwrap_or_default(),\n            time: r.3.unwrap_or_default(),\n            throughput: r.4,\n            throughput_med: r.5,\n            slope: r.6,\n            mean: r.7,\n            median: r.8,\n            r_2: r.9,\n            std_dev: r.10,\n            med_abs_dev: r.11,\n        });\n    }\n    Ok(res)\n}\n\n/// parses a string of the form \"521.80 KiB/s\".\nfn throughput_to_uom(s: &str) -> String {\n    let parts = s.trim().split_whitespace().collect::<Vec<_>>();\n    assert_eq!(parts.len(), 2, \"invalid val: {:?}\", parts);\n    let _: f64 = parts[0].parse().expect(\"invalid number\");\n    parts[1].to_string()\n}\n\n/// parses a string of the form \"521.80 KiB/s\".\nfn throughput_val(s: &str) -> f64 {\n    let parts = s.trim().split_whitespace().collect::<Vec<_>>();\n    assert_eq!(parts.len(), 2, \"invalid val: {:?}\", parts);\n    let ts: f64 = parts[0].parse().expect(\"invalid number\");\n    ts\n}\n\n/// parses a string of the form \"123.12 us\".\nfn time_to_us(s: &str) -> f64 {\n    let parts = s.trim().split_whitespace().collect::<Vec<_>>();\n    assert_eq!(parts.len(), 2, \"invalid val: {:?}\", parts);\n    let ts: f64 = parts[0].parse().expect(\"invalid number\");\n    let normalized = match parts[1] {\n        \"ps\" => ts / 1_000_000.,\n        \"ns\" => ts / 1000.,\n        \"us\" => ts,\n        \"ms\" => ts * 1000.,\n        \"s\" => ts * 1000. * 1000.,\n        _ => panic!(\"unknown unit: {}\", parts[1]),\n    };\n\n    (normalized * 10000.0).round() / 10000.0\n}\n\nfn run_benches(mut args: Vec<String>) -> Result<()> {\n    let is_verbose = if let Some(index) = args.iter().position(|a| a.as_str() == \"--verbose\") {\n        args.remove(index);\n        true\n    } else {\n        false\n    };\n\n    let mut cmd = command!(\n        r\"\n        cargo bench -p storage-proofs {args} -- --verbose --color never\n    \",\n        args = args\n    )\n    .map_err(|err| anyhow!(\"{:?}\", err))?;\n\n    let process = cmd.stdout(std::process::Stdio::piped()).spawn()?;\n\n    let stdout = process.stdout.context(\"Failed to capture stdout\")?;\n\n    let reader = std::io::BufReader::new(stdout);\n    let mut stdout = String::new();\n    reader.lines().for_each(|line| {\n        let line = line.unwrap();\n        if is_verbose {\n            println!(\"{}\", &line);\n        }\n        stdout += &line;\n        stdout += \"\\n\";\n    });\n\n    let parsed_results = parse_criterion_out(stdout)?;\n\n    let wrapped = Metadata::wrap(parsed_results)?;\n\n    serde_json::to_writer(io::stdout(), &wrapped).context(\"cannot write report-JSON to stdout\")?;\n\n    Ok(())\n}\n\nfn main() {\n    let pass_through = std::env::args().skip(1).collect();\n\n    match run_benches(pass_through) {\n        Ok(()) => {}\n        Err(err) => {\n            eprintln!(\"{}\", err);\n            std::process::exit(1);\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_time_to_us() {\n        assert_eq!(time_to_us(\"123.12 us\"), 123.12);\n        assert_eq!(time_to_us(\"1.0 s\"), 1_000_000.);\n    }\n\n    #[test]\n    fn test_throughput_uom() {\n        assert_eq!(throughput_to_uom(\"521.80 KiB/s\"), \"KiB/s\");\n        assert_eq!(throughput_to_uom(\"521.80 MiB/hr\"), \"MiB/hr\");\n    }\n\n    #[test]\n    fn test_parse_criterion_no_throughput() {\n        let stdout = \"Benchmarking merkletree/blake2s/128\nBenchmarking merkletree/blake2s/128: Warming up for 3.0000 s\nBenchmarking merkletree/blake2s/128: Collecting 20 samples in estimated 5.0192 s (39060 iterations)\nBenchmarking merkletree/blake2s/128: Analyzing\nmerkletree/blake2s/128  time:   [141.11 us 151.42 us 159.66 us]\n                    change: [-25.163% -21.490% -17.475%] (p = 0.00 < 0.05)\n                    Performance has improved.\nFound 4 outliers among 20 measurements (20.00%)\n1 (5.00%) high mild\n3 (15.00%) high severe\nslope  [141.11 us 159.66 us] R^2            [0.8124914 0.8320154]\nmean   [140.55 us 150.62 us] std. dev.      [5.6028 us 15.213 us]\nmedian [138.33 us 143.23 us] med. abs. dev. [1.7507 ms 8.4109 ms]\";\n\n        let parsed = parse_criterion_out(stdout).unwrap();\n        assert_eq!(\n            parsed,\n            vec![CriterionResult {\n                name: \"merkletree/blake2s/128\".into(),\n                samples: 20,\n                time_med: Point {\n                    unit: Some(\"us\".to_string()),\n                    value: 151.42,\n                },\n                time: Interval {\n                    start: 141.11,\n                    end: 159.66,\n                    unit: Some(\"us\".to_string())\n                },\n                throughput: None,\n                throughput_med: None,\n                slope: Some(Interval {\n                    start: 141.11,\n                    end: 159.66,\n                    unit: Some(\"us\".to_string())\n                }),\n                mean: Some(Interval {\n                    start: 140.55,\n                    end: 150.62,\n                    unit: Some(\"us\".to_string())\n                }),\n                median: Some(Interval {\n                    start: 138.33,\n                    end: 143.23,\n                    unit: Some(\"us\".to_string())\n                }),\n                r_2: Some(Interval {\n                    start: 0.8124914,\n                    end: 0.8320154,\n                    unit: None\n                }),\n                std_dev: Some(Interval {\n                    start: 5.6028,\n                    end: 15.213,\n                    unit: Some(\"us\".to_string())\n                }),\n                med_abs_dev: Some(Interval {\n                    start: 1750.7,\n                    end: 8410.9,\n                    unit: Some(\"us\".to_string())\n                }),\n            }]\n        );\n    }\n\n    #[test]\n    fn test_parse_criterion_with_throughput() {\n        let with_throughput = \"Benchmarking merkletree/blake2s/128\nBenchmarking merkletree/blake2s/128: Warming up for 3.0000 s\nBenchmarking merkletree/blake2s/128: Collecting 20 samples in estimated 5.0192 s (39060 iterations)\nBenchmarking merkletree/blake2s/128: Analyzing\nmerkletree/blake2s/128\n                    time:   [141.11 us 151.42 us 159.66 us]\n                    thrpt:  [68.055 MiB/s 68.172 MiB/s 68.644 MiB/s]\n             change:\n                    time:   [-25.163% -21.490% -17.475%] (p = 0.00 < 0.05)\n                    thrpt:  [-25.163% -21.490% -17.475%] (p = 0.00 < 0.05)\n                    Performance has improved.\nFound 4 outliers among 20 measurements (20.00%)\n1 (5.00%) high mild\n3 (15.00%) high severe\nslope  [141.11 us 159.66 us] R^2            [0.8124914 0.8320154]\nmean   [140.55 us 150.62 us] std. dev.      [5.6028 us 15.213 us]\nmedian [138.33 us 143.23 us] med. abs. dev. [1.7507 ms 8.4109 ms]\";\n\n        let parsed = parse_criterion_out(with_throughput).unwrap();\n        assert_eq!(\n            parsed,\n            vec![CriterionResult {\n                name: \"merkletree/blake2s/128\".into(),\n                samples: 20,\n                time_med: Point {\n                    unit: Some(\"us\".to_string()),\n                    value: 151.42,\n                },\n                time: Interval {\n                    start: 141.11,\n                    end: 159.66,\n                    unit: Some(\"us\".to_string())\n                },\n                throughput: Some(Interval {\n                    start: 68.055,\n                    end: 68.644,\n                    unit: Some(\"MiB/s\".to_string())\n                }),\n                throughput_med: Some(Point {\n                    value: 68.172,\n                    unit: Some(\"MiB/s\".to_string())\n                }),\n                slope: Some(Interval {\n                    start: 141.11,\n                    end: 159.66,\n                    unit: Some(\"us\".to_string())\n                }),\n                mean: Some(Interval {\n                    start: 140.55,\n                    end: 150.62,\n                    unit: Some(\"us\".to_string())\n                }),\n                median: Some(Interval {\n                    start: 138.33,\n                    end: 143.23,\n                    unit: Some(\"us\".to_string())\n                }),\n                r_2: Some(Interval {\n                    start: 0.8124914,\n                    end: 0.8320154,\n                    unit: None\n                }),\n                std_dev: Some(Interval {\n                    start: 5.6028,\n                    end: 15.213,\n                    unit: Some(\"us\".to_string())\n                }),\n                med_abs_dev: Some(Interval {\n                    start: 1750.7,\n                    end: 8410.9,\n                    unit: Some(\"us\".to_string())\n                }),\n            }]\n        );\n    }\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/settings/main.rs",
    "content": "use anyhow::Result;\n\nuse storage_proofs_core::settings::SETTINGS;\n\nfn main() -> Result<()> {\n    println!(\"{:#?}\", *SETTINGS);\n    Ok(())\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/bin/update_tree_r_cache/main.rs",
    "content": "use std::fs::{self, create_dir_all, remove_dir_all, OpenOptions};\nuse std::path::{Path, PathBuf};\n\nuse anyhow::{ensure, Context, Result};\nuse bincode::deserialize;\nuse clap::{value_t, App, Arg, SubCommand};\nuse filecoin_hashers::Hasher;\nuse filecoin_proofs::{\n    is_sector_shape_base, is_sector_shape_sub2, is_sector_shape_sub8, is_sector_shape_top2,\n    with_shape, DefaultTreeDomain, PersistentAux, SectorShapeBase, SectorShapeSub2,\n    SectorShapeSub8, SectorShapeTop2, OCT_ARITY,\n};\nuse generic_array::typenum::Unsigned;\nuse memmap::MmapOptions;\nuse merkletree::{\n    merkle::get_merkle_tree_len,\n    store::{ExternalReader, ReplicaConfig, Store, StoreConfig},\n};\nuse storage_proofs_core::{\n    cache_key::CacheKey,\n    merkle::{\n        create_lc_tree, get_base_tree_count, split_config_and_replica, LCStore, LCTree,\n        MerkleTreeTrait,\n    },\n    util::{default_rows_to_discard, NODE_SIZE},\n};\nuse tempfile::tempdir;\n\nfn get_tree_r_info(\n    sector_size: usize,\n    cache: &PathBuf,\n    replica_path: &PathBuf,\n) -> Result<(usize, usize, Vec<StoreConfig>, ReplicaConfig)> {\n    let tree_count = with_shape!(sector_size as u64, get_base_tree_count);\n\n    // Number of nodes per base tree\n    let base_tree_leafs = sector_size / NODE_SIZE / tree_count;\n\n    // If the cache dir doesn't exist, create it\n    if !Path::new(&cache).exists() {\n        create_dir_all(&cache)?;\n    }\n\n    // Create a StoreConfig from the provided cache path\n    let tree_r_last_config = StoreConfig::new(\n        &cache,\n        CacheKey::CommRLastTree.to_string(),\n        default_rows_to_discard(base_tree_leafs, OCT_ARITY),\n    );\n\n    // Split the config based on the number of nodes required\n    let (configs, replica_config) = split_config_and_replica(\n        tree_r_last_config,\n        replica_path.clone(),\n        base_tree_leafs,\n        tree_count,\n    )?;\n\n    Ok((tree_count, base_tree_leafs, configs, replica_config))\n}\n\nfn get_tree_r_last_root(\n    base_tree_leafs: usize,\n    sector_size: u64,\n    configs: &[StoreConfig],\n    replica_config: &ReplicaConfig,\n) -> Result<DefaultTreeDomain> {\n    let base_tree_len = get_merkle_tree_len(base_tree_leafs, OCT_ARITY)?;\n    let tree_r_last_root = if is_sector_shape_base(sector_size) {\n        ensure!(configs.len() == 1, \"Invalid tree-shape specified\");\n        let store = LCStore::<DefaultTreeDomain>::new_from_disk_with_reader(\n            base_tree_len,\n            OCT_ARITY,\n            &configs[0],\n            ExternalReader::new_from_path(&replica_config.path)?,\n        )?;\n\n        let tree_r_last = SectorShapeBase::from_data_store(store, base_tree_leafs)?;\n        tree_r_last.root()\n    } else if is_sector_shape_sub2(sector_size) {\n        let tree_r_last = SectorShapeSub2::from_store_configs_and_replica(\n            base_tree_leafs,\n            &configs,\n            &replica_config,\n        )?;\n        tree_r_last.root()\n    } else if is_sector_shape_sub8(sector_size) {\n        let tree_r_last = SectorShapeSub8::from_store_configs_and_replica(\n            base_tree_leafs,\n            &configs,\n            &replica_config,\n        )?;\n        tree_r_last.root()\n    } else if is_sector_shape_top2(sector_size) {\n        let tree_r_last = SectorShapeTop2::from_sub_tree_store_configs_and_replica(\n            base_tree_leafs,\n            &configs,\n            &replica_config,\n        )?;\n        tree_r_last.root()\n    } else {\n        panic!(\"Unsupported sector size\");\n    };\n\n    Ok(tree_r_last_root)\n}\n\nfn get_persistent_aux(cache: &PathBuf) -> Result<PersistentAux<DefaultTreeDomain>> {\n    let p_aux: PersistentAux<DefaultTreeDomain> = {\n        let p_aux_path = cache.join(CacheKey::PAux.to_string());\n        let p_aux_bytes = fs::read(&p_aux_path)\n            .with_context(|| format!(\"could not read file p_aux={:?}\", p_aux_path))?;\n\n        deserialize(&p_aux_bytes)\n    }?;\n\n    Ok(p_aux)\n}\n\nfn build_tree_r_last<Tree: MerkleTreeTrait>(\n    sector_size: usize,\n    cache: &PathBuf,\n    replica_path: &PathBuf,\n) -> Result<(<Tree::Hasher as Hasher>::Domain, Vec<DefaultTreeDomain>)> {\n    let (tree_count, base_tree_leafs, configs, replica_config) =\n        get_tree_r_info(sector_size, &cache, &replica_path)?;\n\n    let f_data = OpenOptions::new()\n        .read(true)\n        .write(true)\n        .open(&replica_path)\n        .with_context(|| format!(\"could not open replica_path={:?}\", replica_path))?;\n    let input_mmap = unsafe {\n        MmapOptions::new()\n            .map(&f_data)\n            .with_context(|| format!(\"could not mmap replica_path={:?}\", replica_path))?\n    };\n\n    let mut base_tree_roots: Vec<DefaultTreeDomain> = Vec::with_capacity(tree_count);\n    for (i, config) in configs.iter().enumerate().take(tree_count) {\n        let offset = replica_config.offsets[i];\n\n        let slice = &input_mmap[offset..(offset + (sector_size / tree_count))];\n        let store_path = StoreConfig::data_path(&config.path, &config.id);\n        println!(\n            \"Building tree_r_last {}/{}, [nodes={}, rows_to_discard={}, offsets={}-{}] in {:?}\",\n            i + 1,\n            tree_count,\n            base_tree_leafs,\n            config.rows_to_discard,\n            offset,\n            (offset + (sector_size / tree_count)),\n            &store_path\n        );\n        let tree = SectorShapeBase::from_byte_slice_with_config(slice, config.clone())?;\n        base_tree_roots.push(tree.root());\n    }\n\n    let tree_r_last = create_lc_tree::<\n        LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n    >(\n        get_merkle_tree_len(base_tree_leafs, Tree::Arity::to_usize())?,\n        &configs,\n        &replica_config,\n    )?;\n\n    Ok((tree_r_last.root(), base_tree_roots))\n}\n\nfn run_rebuild(\n    sector_size: usize,\n    cache: PathBuf,\n    replica_path: PathBuf,\n) -> Result<(DefaultTreeDomain, Vec<DefaultTreeDomain>)> {\n    with_shape!(\n        sector_size as u64,\n        build_tree_r_last,\n        sector_size,\n        &cache,\n        &replica_path\n    )\n}\n\nfn run_inspect(sector_size: usize, cache: PathBuf, replica_path: PathBuf) -> Result<()> {\n    let (_tree_count, base_tree_leafs, configs, replica_config) =\n        get_tree_r_info(sector_size, &cache, &replica_path)?;\n    let tree_r_last_root = get_tree_r_last_root(\n        base_tree_leafs,\n        sector_size as u64,\n        &configs,\n        &replica_config,\n    )?;\n    let p_aux = get_persistent_aux(&cache)?;\n\n    println!(\"CommRLast from p_aux: {:?}\", p_aux.comm_r_last);\n    println!(\n        \"CommRLast [cached tree_r_last root]: {:?}\",\n        tree_r_last_root\n    );\n    let status = if tree_r_last_root == p_aux.comm_r_last {\n        \"MATCH\"\n    } else {\n        \"MISMATCH\"\n    };\n    println!(\"Cached inspection shows a {} of CommRLast\", status);\n\n    Ok(())\n}\n\nfn run_verify(sector_size: usize, cache: PathBuf, replica_path: PathBuf) -> Result<()> {\n    let (tree_count, base_tree_leafs, configs, replica_config) =\n        get_tree_r_info(sector_size, &cache, &replica_path)?;\n    let base_tree_len = get_merkle_tree_len(base_tree_leafs, OCT_ARITY)?;\n\n    let match_str = |a, b| -> &str {\n        if a == b {\n            \"MATCH\"\n        } else {\n            \"MISMATCH\"\n        }\n    };\n\n    // First, read the roots from the cached trees on disk\n    let mut cached_base_tree_roots: Vec<DefaultTreeDomain> = Vec::with_capacity(tree_count);\n    for (i, config) in configs.iter().enumerate().take(tree_count) {\n        let store = LCStore::new_from_disk_with_reader(\n            base_tree_len,\n            OCT_ARITY,\n            &config,\n            ExternalReader::new_from_config(&replica_config, i)?,\n        )?;\n        cached_base_tree_roots.push(store.last()?);\n    }\n\n    // Retrieve the tree_r_last root from the cached trees on disk.\n    let tree_r_last_root = get_tree_r_last_root(\n        base_tree_leafs,\n        sector_size as u64,\n        &configs,\n        &replica_config,\n    )?;\n\n    // Read comm_r_last from the persistent aux in the cache dir\n    let p_aux: PersistentAux<DefaultTreeDomain> = {\n        let p_aux_path = cache.join(CacheKey::PAux.to_string());\n        let p_aux_bytes = fs::read(&p_aux_path)\n            .with_context(|| format!(\"could not read file p_aux={:?}\", p_aux_path))?;\n\n        deserialize(&p_aux_bytes)\n    }?;\n\n    // Rebuild each of the tree_r_last base trees (in a new temp dir so as not to interfere\n    // with any existing ones on disk) and check if the roots match what's cached on disk\n    let tmp_dir = tempdir().unwrap();\n    let tmp_path = tmp_dir.path();\n    create_dir_all(&tmp_path)?;\n\n    let (rebuilt_tree_r_last_root, rebuilt_base_tree_roots) =\n        run_rebuild(sector_size, tmp_path.to_path_buf(), replica_path)?;\n\n    remove_dir_all(&tmp_path)?;\n\n    let status = match_str(tree_r_last_root, p_aux.comm_r_last);\n    let rebuilt_status = match_str(rebuilt_tree_r_last_root, p_aux.comm_r_last);\n\n    println!();\n    for (i, (cached_root, rebuilt_root)) in cached_base_tree_roots\n        .iter()\n        .zip(rebuilt_base_tree_roots)\n        .enumerate()\n    {\n        println!(\n            \"tree_r_last {}/{} inspection shows a {} of base tree root {:?}\",\n            i + 1,\n            tree_count,\n            match_str(*cached_root, rebuilt_root),\n            rebuilt_root\n        );\n        if *cached_root != rebuilt_root {\n            println!(\n                \"Cached root {:?}, Rebuilt root {:?}\",\n                cached_root, rebuilt_root\n            );\n        }\n    }\n\n    println!();\n    println!(\n        \"CommRLast from p_aux                : {:?}\",\n        p_aux.comm_r_last\n    );\n    println!(\n        \"CommRLast [cached tree_r_last root] : {:?}\",\n        tree_r_last_root\n    );\n    println!(\n        \"CommRLast [rebuilt tree_r_last root]: {:?}\",\n        rebuilt_tree_r_last_root\n    );\n    println!();\n    println!(\n        \" Cached inspection shows a {} of CommRLast {:?}\",\n        status, tree_r_last_root\n    );\n    println!(\n        \"Rebuilt inspection shows a {} of CommRLast {:?}\",\n        rebuilt_status, rebuilt_tree_r_last_root\n    );\n\n    Ok(())\n}\n\nfn main() -> Result<()> {\n    fil_logger::init();\n\n    let rebuild_cmd = SubCommand::with_name(\"rebuild\")\n        .about(\"Rebuild tree_r_last trees from replica\")\n        .arg(\n            Arg::with_name(\"size\")\n                .required(true)\n                .long(\"size\")\n                .default_value(\"34359738368\")\n                .help(\"The data size in bytes\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"replica\")\n                .long(\"replica\")\n                .help(\"The replica file\")\n                .required(true)\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"cache\")\n                .long(\"cache\")\n                .help(\"The cache directory for the output trees\")\n                .required(true)\n                .takes_value(true),\n        );\n\n    let inspect_cmd = SubCommand::with_name(\"inspect\")\n        .about(\"Inspect tree_r_last trees and match with p_aux in cache\")\n        .arg(\n            Arg::with_name(\"size\")\n                .required(true)\n                .long(\"size\")\n                .default_value(\"34359738368\")\n                .help(\"The data size in bytes\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"replica\")\n                .long(\"replica\")\n                .help(\"The replica file\")\n                .required(true)\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"cache\")\n                .long(\"cache\")\n                .help(\"The cache directory for the output trees\")\n                .required(true)\n                .takes_value(true),\n        );\n\n    let verify_cmd = SubCommand::with_name(\"verify\")\n        .about(\"Verify tree_r_last trees and check for cache mis-match\")\n        .arg(\n            Arg::with_name(\"size\")\n                .required(true)\n                .long(\"size\")\n                .default_value(\"34359738368\")\n                .help(\"The data size in bytes\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"replica\")\n                .long(\"replica\")\n                .help(\"The replica file\")\n                .required(true)\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"cache\")\n                .long(\"cache\")\n                .help(\"The cache directory for the output trees\")\n                .required(true)\n                .takes_value(true),\n        );\n\n    let matches = App::new(\"update_tree_r_cache\")\n        .version(\"0.1\")\n        .subcommand(rebuild_cmd)\n        .subcommand(inspect_cmd)\n        .subcommand(verify_cmd)\n        .get_matches();\n\n    match matches.subcommand() {\n        (\"rebuild\", Some(m)) => {\n            let cache = value_t!(m, \"cache\", PathBuf)?;\n            let replica = value_t!(m, \"replica\", PathBuf)?;\n            let size = value_t!(m, \"size\", usize)\n                .expect(\"could not convert `size` CLI argument to `usize`\");\n            run_rebuild(size, cache, replica)?;\n        }\n        (\"inspect\", Some(m)) => {\n            let cache = value_t!(m, \"cache\", PathBuf)?;\n            let replica = value_t!(m, \"replica\", PathBuf)?;\n            let size = value_t!(m, \"size\", usize)\n                .expect(\"could not convert `size` CLI argument to `usize`\");\n            run_inspect(size, cache, replica)?;\n        }\n        (\"verify\", Some(m)) => {\n            let cache = value_t!(m, \"cache\", PathBuf)?;\n            let replica = value_t!(m, \"replica\", PathBuf)?;\n            let size = value_t!(m, \"size\", usize)\n                .expect(\"could not convert `size` CLI argument to `usize`\");\n            run_verify(size, cache, replica)?;\n        }\n        _ => panic!(\"Unrecognized subcommand\"),\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/lib.rs",
    "content": "#![deny(clippy::all, clippy::perf, clippy::correctness, rust_2018_idioms)]\n#![warn(clippy::unwrap_used)]\n#![warn(clippy::needless_collect)]\n\npub mod measure;\npub mod metadata;\npub mod shared;\npub use measure::{measure, FuncMeasurement};\npub use metadata::Metadata;\npub use shared::{create_replica, create_replicas};\n"
  },
  {
    "path": "fil-proofs-tooling/src/measure.rs",
    "content": "use std::time::{Duration, Instant};\n\nuse anyhow::Result;\nuse cpu_time::ProcessTime;\n\npub struct FuncMeasurement<T> {\n    pub cpu_time: Duration,\n    pub wall_time: Duration,\n    pub return_value: T,\n}\n\npub fn measure<T, F>(f: F) -> Result<FuncMeasurement<T>>\nwhere\n    F: FnOnce() -> Result<T>,\n{\n    let cpu_time_start = ProcessTime::now();\n    let wall_start_time = Instant::now();\n\n    let x = f()?;\n\n    Ok(FuncMeasurement {\n        cpu_time: cpu_time_start.elapsed(),\n        wall_time: wall_start_time.elapsed(),\n        return_value: x,\n    })\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/metadata.rs",
    "content": "use anyhow::{anyhow, Result};\nuse chrono::{DateTime, TimeZone, Utc};\nuse git2::Repository;\nuse serde::Serialize;\n\n/// Captures metadata about the current setup.\n#[derive(Debug, Serialize)]\n#[serde(rename_all = \"kebab-case\")]\npub struct Metadata<T> {\n    git: GitMetadata,\n    system: SystemMetadata,\n    benchmarks: T,\n}\n\nimpl<T> Metadata<T> {\n    pub fn wrap(benchmarks: T) -> Result<Self> {\n        Ok(Metadata {\n            git: GitMetadata::new()?,\n            system: SystemMetadata::new()?,\n            benchmarks,\n        })\n    }\n}\n\n/// Captures git specific metadata about the current repo.\n#[derive(Debug, Serialize)]\n#[serde(rename_all = \"kebab-case\")]\npub struct GitMetadata {\n    hash: String,\n    date: DateTime<Utc>,\n}\n\nimpl GitMetadata {\n    pub fn new() -> Result<Self> {\n        let repo_path = if let Ok(mdir) = std::env::var(\"CARGO_MANIFEST_DIR\") {\n            std::path::Path::new(&mdir).into()\n        } else {\n            std::env::current_dir()?\n        };\n        let repo = Repository::discover(&repo_path)?;\n        let head = repo.head()?;\n        let commit = head.peel_to_commit()?;\n        let date = Utc.timestamp(commit.time().seconds(), 0);\n\n        Ok(GitMetadata {\n            hash: commit.id().to_string(),\n            date,\n        })\n    }\n}\n\n#[derive(Debug, Serialize)]\n#[serde(rename_all = \"kebab-case\")]\npub struct SystemMetadata {\n    system: String,\n    release: String,\n    version: String,\n    architecture: String,\n    processor: String,\n    processor_base_frequency_hz: u16,\n    processor_max_frequency_hz: u16,\n    processor_features: String,\n    processor_cores_logical: u64,\n    processor_cores_physical: u64,\n    memory_total_bytes: u64,\n}\n\nimpl SystemMetadata {\n    pub fn new() -> Result<Self> {\n        use async_std::task::block_on;\n        let host = block_on(async { heim::host::platform().await })\n            .map_err(|_| anyhow!(\"Failed to retrieve host information\"))?;\n        let memory = block_on(async { heim::memory::memory().await })\n            .map_err(|_| anyhow!(\"Failed to retrieve memory information\"))?;\n        let cpu_logical = block_on(async { heim::cpu::logical_count().await })\n            .map_err(|_| anyhow!(\"Failed to retrieve cpu logical count information\"))?;\n        let cpu_physical = block_on(async { heim::cpu::physical_count().await })\n            .map_err(|_| anyhow!(\"Failed to retrieve cpu physical count information\"))?;\n\n        let (processor, base, max, features) = {\n            #[cfg(target_arch = \"x86_64\")]\n            {\n                let cpuid = raw_cpuid::CpuId::new();\n                let processor = cpuid\n                    .get_extended_function_info()\n                    .and_then(|info| info.processor_brand_string().map(|s| s.to_string()))\n                    .unwrap_or_default();\n                let (base, max) = cpuid\n                    .get_processor_frequency_info()\n                    .map(|info| {\n                        (\n                            info.processor_base_frequency(),\n                            info.processor_max_frequency(),\n                        )\n                    })\n                    .unwrap_or_default();\n                (\n                    processor,\n                    base,\n                    max,\n                    cpuid\n                        .get_feature_info()\n                        .map(|info| format!(\"{:?}\", info))\n                        .unwrap_or_default(),\n                )\n            }\n            #[cfg(not(target_arch = \"x86_64\"))]\n            {\n                (\"unknown\".into(), 0, 0, \"unknown\".into())\n            }\n        };\n\n        Ok(SystemMetadata {\n            system: host.system().into(),\n            release: host.release().into(),\n            version: host.version().into(),\n            architecture: host.architecture().as_str().into(),\n            processor,\n            processor_base_frequency_hz: base,\n            processor_max_frequency_hz: max,\n            processor_features: features,\n            processor_cores_logical: cpu_logical,\n            processor_cores_physical: cpu_physical.unwrap_or_default(),\n            memory_total_bytes: memory.total().get::<uom::si::information::byte>(),\n        })\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_metadata() {\n        let m = Metadata::wrap(()).expect(\"failed to create metadata\");\n        println!(\"{:#?}\", m);\n\n        assert!(m.system.memory_total_bytes > 0);\n    }\n}\n"
  },
  {
    "path": "fil-proofs-tooling/src/shared.rs",
    "content": "use std::cmp::min;\nuse std::io::{BufWriter, Seek, SeekFrom, Write};\n\nuse filecoin_proofs::{\n    add_piece, seal_pre_commit_phase1, seal_pre_commit_phase2, validate_cache_for_precommit_phase2,\n    MerkleTreeTrait, PaddedBytesAmount, PieceInfo, PoRepConfig, PoRepProofPartitions,\n    PrivateReplicaInfo, PublicReplicaInfo, SealPreCommitOutput, SectorSize, UnpaddedBytesAmount,\n    POREP_PARTITIONS,\n};\nuse log::info;\nuse rand::{random, thread_rng, RngCore};\nuse rayon::prelude::{\n    IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator,\n};\nuse storage_proofs_core::{api_version::ApiVersion, sector::SectorId};\nuse tempfile::{tempdir, NamedTempFile};\n\nuse crate::{measure, FuncMeasurement};\n\npub const PROVER_ID: [u8; 32] = [9; 32];\npub const RANDOMNESS: [u8; 32] = [44; 32];\npub const TICKET_BYTES: [u8; 32] = [1; 32];\n\npub struct PreCommitReplicaOutput<Tree: 'static + MerkleTreeTrait> {\n    pub piece_info: Vec<PieceInfo>,\n    pub private_replica_info: PrivateReplicaInfo<Tree>,\n    pub public_replica_info: PublicReplicaInfo,\n}\n\npub fn create_piece(piece_bytes: UnpaddedBytesAmount) -> NamedTempFile {\n    info!(\"create_piece\");\n    let mut file = NamedTempFile::new().expect(\"failed to create piece file\");\n    {\n        let mut writer = BufWriter::new(&mut file);\n        let mut len = u64::from(piece_bytes) as usize;\n        let chunk_size = 8 * 1024 * 1024;\n        let mut buffer = vec![0u8; chunk_size];\n        thread_rng().fill_bytes(&mut buffer);\n\n        while len > 0 {\n            let to_write = min(len, chunk_size);\n            writer\n                .write_all(&buffer[..to_write])\n                .expect(\"failed to write buffer\");\n            len -= to_write;\n        }\n    }\n    assert_eq!(\n        u64::from(piece_bytes),\n        file.as_file()\n            .metadata()\n            .expect(\"failed to get file metadata\")\n            .len()\n    );\n\n    file.as_file_mut()\n        .sync_all()\n        .expect(\"failed to sync piece file\");\n\n    file.as_file_mut()\n        .seek(SeekFrom::Start(0))\n        .expect(\"failed to seek to beginning of piece file\");\n\n    file\n}\n\n/// Create a replica for a single sector\npub fn create_replica<Tree: 'static + MerkleTreeTrait>(\n    sector_size: u64,\n    porep_id: [u8; 32],\n    api_version: ApiVersion,\n) -> (SectorId, PreCommitReplicaOutput<Tree>) {\n    let (_porep_config, result) =\n        create_replicas::<Tree>(SectorSize(sector_size), 1, false, porep_id, api_version);\n    // Extract the sector ID and replica output out of the result\n    result\n        .expect(\"create_replicas() failed when called with only_add==false\")\n        .0\n        .pop()\n        .expect(\"failed to create replica outputs\")\n}\n\n#[allow(clippy::type_complexity)]\npub fn create_replicas<Tree: 'static + MerkleTreeTrait>(\n    sector_size: SectorSize,\n    qty_sectors: usize,\n    only_add: bool,\n    porep_id: [u8; 32],\n    api_version: ApiVersion,\n) -> (\n    PoRepConfig,\n    Option<(\n        Vec<(SectorId, PreCommitReplicaOutput<Tree>)>,\n        FuncMeasurement<Vec<SealPreCommitOutput>>,\n    )>,\n) {\n    info!(\"creating replicas: {:?} - {}\", sector_size, qty_sectors);\n    let sector_size_unpadded_bytes_ammount =\n        UnpaddedBytesAmount::from(PaddedBytesAmount::from(sector_size));\n\n    let porep_config = PoRepConfig {\n        sector_size,\n        partitions: PoRepProofPartitions(\n            *POREP_PARTITIONS\n                .read()\n                .expect(\"poisoned read access\")\n                .get(&u64::from(sector_size))\n                .expect(\"unknown sector size\"),\n        ),\n        porep_id,\n        api_version,\n    };\n\n    let mut out: Vec<(SectorId, PreCommitReplicaOutput<Tree>)> = Default::default();\n    let mut sector_ids = Vec::new();\n    let mut cache_dirs = Vec::new();\n    let mut staged_files = Vec::new();\n    let mut sealed_files = Vec::new();\n\n    for i in 0..qty_sectors {\n        info!(\"creating sector {}/{}\", i, qty_sectors);\n\n        sector_ids.push(SectorId::from(random::<u64>()));\n        cache_dirs.push(tempdir().expect(\"failed to create cache dir\"));\n\n        let staged_file =\n            NamedTempFile::new().expect(\"could not create temp file for staged sector\");\n\n        let sealed_file =\n            NamedTempFile::new().expect(\"could not create temp file for sealed sector\");\n        // Prevent that the sealed sector file gets deleted when `sealed_file` runs out of scope\n        let (_, sealed_path) = sealed_file\n            .keep()\n            .expect(\"failed to leep sealed sector file around\");\n\n        sealed_files.push(sealed_path);\n        staged_files.push(staged_file);\n    }\n\n    let piece_files: Vec<_> = (0..qty_sectors)\n        .into_par_iter()\n        .map(|_i| {\n            create_piece(UnpaddedBytesAmount::from(PaddedBytesAmount::from(\n                sector_size,\n            )))\n        })\n        .collect();\n\n    info!(\"adding pieces\");\n    let mut piece_infos = Vec::new();\n    for (i, (mut piece_file, mut staged_file)) in piece_files\n        .into_iter()\n        .zip(staged_files.iter_mut())\n        .enumerate()\n    {\n        info!(\"add piece {}\", i);\n        let (info, _) = add_piece(\n            &mut piece_file,\n            &mut staged_file,\n            sector_size_unpadded_bytes_ammount,\n            &[],\n        )\n        .expect(\"failed to add piece\");\n        piece_infos.push(vec![info]);\n    }\n\n    if only_add {\n        return (porep_config, None);\n    }\n\n    let seal_pre_commit_outputs = measure(|| {\n        let phase1s = cache_dirs\n            .par_iter()\n            .zip(staged_files.par_iter())\n            .zip(sealed_files.par_iter())\n            .zip(sector_ids.par_iter())\n            .zip(piece_infos.par_iter())\n            .map(\n                |((((cache_dir, staged_file), sealed_file), sector_id), piece_infos)| {\n                    seal_pre_commit_phase1(\n                        porep_config,\n                        cache_dir,\n                        staged_file,\n                        sealed_file,\n                        PROVER_ID,\n                        *sector_id,\n                        TICKET_BYTES,\n                        piece_infos,\n                    )\n                },\n            )\n            .collect::<Result<Vec<_>, _>>()?;\n\n        phase1s\n            .into_iter()\n            .enumerate()\n            .map(|(i, phase1)| {\n                validate_cache_for_precommit_phase2::<_, _, Tree>(\n                    &cache_dirs[i],\n                    &sealed_files[i],\n                    &phase1,\n                )?;\n                seal_pre_commit_phase2(porep_config, phase1, &cache_dirs[i], &sealed_files[i])\n            })\n            .collect::<Result<Vec<_>, _>>()\n    })\n    .expect(\"seal_pre_commit produced an error\");\n\n    info!(\"collecting infos\");\n\n    let priv_infos = sealed_files\n        .iter()\n        .zip(seal_pre_commit_outputs.return_value.iter())\n        .zip(cache_dirs.into_iter())\n        .map(|((sealed_file, seal_pre_commit_output), cache_dir)| {\n            PrivateReplicaInfo::new(\n                sealed_file.to_path_buf(),\n                seal_pre_commit_output.comm_r,\n                cache_dir.into_path(),\n            )\n            .expect(\"failed to create PrivateReplicaInfo\")\n        })\n        .collect::<Vec<_>>();\n\n    let pub_infos = seal_pre_commit_outputs\n        .return_value\n        .iter()\n        .map(|sp| PublicReplicaInfo::new(sp.comm_r).expect(\"failed to create PublicReplicaInfo\"))\n        .collect::<Vec<_>>();\n\n    for (((sector_id, piece_info), priv_info), pub_info) in sector_ids\n        .into_iter()\n        .zip(piece_infos.into_iter())\n        .zip(priv_infos.into_iter())\n        .zip(pub_infos.into_iter())\n    {\n        out.push((\n            sector_id,\n            PreCommitReplicaOutput {\n                piece_info,\n                private_replica_info: priv_info,\n                public_replica_info: pub_info,\n            },\n        ));\n    }\n\n    (porep_config, Some((out, seal_pre_commit_outputs)))\n}\n"
  },
  {
    "path": "filecoin-hashers/Cargo.toml",
    "content": "[package]\nname = \"filecoin-hashers\"\ndescription = \"Hashers used in filecoin and their abstractions.\"\nversion = \"2.0.1\"\nauthors = [\"dignifiedquire <dignifiedquire@gmail.com>\", \"porcuquine <porcuquine@users.noreply.github.com>\"]\nlicense = \"MIT OR Apache-2.0\"\nedition = \"2018\"\nrepository = \"https://github.com/filecoin-project/rust-fil-proofs\"\nreadme = \"README.md\"\n\n[dependencies]\nbellperson = { version = \"0.13\", default-features = false }\ngeneric-array = \"0.14.4\"\nmerkletree = \"0.21.0\"\nff = { version = \"0.2.3\", package = \"fff\" }\nanyhow = \"1.0.34\"\nserde = \"1.0.117\"\nrand = \"0.7.3\"\n\nneptune = { version = \"2.2.0\", default-features = false, optional = true }\nlazy_static = { version = \"1.4.0\", optional = true }\nblake2s_simd = { version = \"0.5.11\", optional = true }\nsha2 = { version = \"0.9.2\", optional = true }\nhex = \"0.4.2\"\n\n[features]\ndefault = [\"gpu\", \"pairing\", \"blake2s\", \"poseidon\", \"sha256\"]\n\ngpu = [\"bellperson/gpu\", \"neptune/opencl\"]\n\npairing = [\"bellperson/pairing\", \"neptune/pairing\", \"bellperson/pairing-serde\"]\nblst = [\"bellperson/blst\", \"neptune/blst\", \"bellperson/blst-serde\"]\n\n# available hashers\nblake2s = [\"blake2s_simd\"]\nposeidon = [\"neptune\", \"lazy_static\"]\nsha256 = [\"sha2\"]\n\n[dev-dependencies]\nrand_xorshift = \"0.2.0\"\nserde_json = \"1.0.59\"\n"
  },
  {
    "path": "filecoin-hashers/LICENSE-APACHE",
    "content": "                              Apache License\n                        Version 2.0, January 2004\n                     http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n   \"License\" shall mean the terms and conditions for use, reproduction,\n   and distribution as defined by Sections 1 through 9 of this document.\n\n   \"Licensor\" shall mean the copyright owner or entity authorized by\n   the copyright owner that is granting the License.\n\n   \"Legal Entity\" shall mean the union of the acting entity and all\n   other entities that control, are controlled by, or are under common\n   control with that entity. For the purposes of this definition,\n   \"control\" means (i) the power, direct or indirect, to cause the\n   direction or management of such entity, whether by contract or\n   otherwise, or (ii) ownership of fifty percent (50%) or more of the\n   outstanding shares, or (iii) beneficial ownership of such entity.\n\n   \"You\" (or \"Your\") shall mean an individual or Legal Entity\n   exercising permissions granted by this License.\n\n   \"Source\" form shall mean the preferred form for making modifications,\n   including but not limited to software source code, documentation\n   source, and configuration files.\n\n   \"Object\" form shall mean any form resulting from mechanical\n   transformation or translation of a Source form, including but\n   not limited to compiled object code, generated documentation,\n   and conversions to other media types.\n\n   \"Work\" shall mean the work of authorship, whether in Source or\n   Object form, made available under the License, as indicated by a\n   copyright notice that is included in or attached to the work\n   (an example is provided in the Appendix below).\n\n   \"Derivative Works\" shall mean any work, whether in Source or Object\n   form, that is based on (or derived from) the Work and for which the\n   editorial revisions, annotations, elaborations, or other modifications\n   represent, as a whole, an original work of authorship. For the purposes\n   of this License, Derivative Works shall not include works that remain\n   separable from, or merely link (or bind by name) to the interfaces of,\n   the Work and Derivative Works thereof.\n\n   \"Contribution\" shall mean any work of authorship, including\n   the original version of the Work and any modifications or additions\n   to that Work or Derivative Works thereof, that is intentionally\n   submitted to Licensor for inclusion in the Work by the copyright owner\n   or by an individual or Legal Entity authorized to submit on behalf of\n   the copyright owner. For the purposes of this definition, \"submitted\"\n   means any form of electronic, verbal, or written communication sent\n   to the Licensor or its representatives, including but not limited to\n   communication on electronic mailing lists, source code control systems,\n   and issue tracking systems that are managed by, or on behalf of, the\n   Licensor for the purpose of discussing and improving the Work, but\n   excluding communication that is conspicuously marked or otherwise\n   designated in writing by the copyright owner as \"Not a Contribution.\"\n\n   \"Contributor\" shall mean Licensor and any individual or Legal Entity\n   on behalf of whom a Contribution has been received by Licensor and\n   subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   copyright license to reproduce, prepare Derivative Works of,\n   publicly display, publicly perform, sublicense, and distribute the\n   Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   (except as stated in this section) patent license to make, have made,\n   use, offer to sell, sell, import, and otherwise transfer the Work,\n   where such license applies only to those patent claims licensable\n   by such Contributor that are necessarily infringed by their\n   Contribution(s) alone or by combination of their Contribution(s)\n   with the Work to which such Contribution(s) was submitted. If You\n   institute patent litigation against any entity (including a\n   cross-claim or counterclaim in a lawsuit) alleging that the Work\n   or a Contribution incorporated within the Work constitutes direct\n   or contributory patent infringement, then any patent licenses\n   granted to You under this License for that Work shall terminate\n   as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n   Work or Derivative Works thereof in any medium, with or without\n   modifications, and in Source or Object form, provided that You\n   meet the following conditions:\n\n   (a) You must give any other recipients of the Work or\n       Derivative Works a copy of this License; and\n\n   (b) You must cause any modified files to carry prominent notices\n       stating that You changed the files; and\n\n   (c) You must retain, in the Source form of any Derivative Works\n       that You distribute, all copyright, patent, trademark, and\n       attribution notices from the Source form of the Work,\n       excluding those notices that do not pertain to any part of\n       the Derivative Works; and\n\n   (d) If the Work includes a \"NOTICE\" text file as part of its\n       distribution, then any Derivative Works that You distribute must\n       include a readable copy of the attribution notices contained\n       within such NOTICE file, excluding those notices that do not\n       pertain to any part of the Derivative Works, in at least one\n       of the following places: within a NOTICE text file distributed\n       as part of the Derivative Works; within the Source form or\n       documentation, if provided along with the Derivative Works; or,\n       within a display generated by the Derivative Works, if and\n       wherever such third-party notices normally appear. The contents\n       of the NOTICE file are for informational purposes only and\n       do not modify the License. You may add Your own attribution\n       notices within Derivative Works that You distribute, alongside\n       or as an addendum to the NOTICE text from the Work, provided\n       that such additional attribution notices cannot be construed\n       as modifying the License.\n\n   You may add Your own copyright statement to Your modifications and\n   may provide additional or different license terms and conditions\n   for use, reproduction, or distribution of Your modifications, or\n   for any such Derivative Works as a whole, provided Your use,\n   reproduction, and distribution of the Work otherwise complies with\n   the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n   any Contribution intentionally submitted for inclusion in the Work\n   by You to the Licensor shall be under the terms and conditions of\n   this License, without any additional terms or conditions.\n   Notwithstanding the above, nothing herein shall supersede or modify\n   the terms of any separate license agreement you may have executed\n   with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n   names, trademarks, service marks, or product names of the Licensor,\n   except as required for reasonable and customary use in describing the\n   origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n   agreed to in writing, Licensor provides the Work (and each\n   Contributor provides its Contributions) on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n   implied, including, without limitation, any warranties or conditions\n   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n   PARTICULAR PURPOSE. You are solely responsible for determining the\n   appropriateness of using or redistributing the Work and assume any\n   risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n   whether in tort (including negligence), contract, or otherwise,\n   unless required by applicable law (such as deliberate and grossly\n   negligent acts) or agreed to in writing, shall any Contributor be\n   liable to You for damages, including any direct, indirect, special,\n   incidental, or consequential damages of any character arising as a\n   result of this License or out of the use or inability to use the\n   Work (including but not limited to damages for loss of goodwill,\n   work stoppage, computer failure or malfunction, or any and all\n   other commercial damages or losses), even if such Contributor\n   has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n   the Work or Derivative Works thereof, You may choose to offer,\n   and charge a fee for, acceptance of support, warranty, indemnity,\n   or other liability obligations and/or rights consistent with this\n   License. However, in accepting such obligations, You may act only\n   on Your own behalf and on Your sole responsibility, not on behalf\n   of any other Contributor, and only if You agree to indemnify,\n   defend, and hold each Contributor harmless for any liability\n   incurred by, or claims asserted against, such Contributor by reason\n   of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n   To apply the Apache License to your work, attach the following\n   boilerplate notice, with the fields enclosed by brackets \"[]\"\n   replaced with your own identifying information. (Don't include\n   the brackets!)  The text should be enclosed in the appropriate\n   comment syntax for the file format. We also recommend that a\n   file or class name and description of purpose be included on the\n   same \"printed page\" as the copyright notice for easier\n   identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License."
  },
  {
    "path": "filecoin-hashers/LICENSE-MIT",
    "content": "Permission is hereby granted, free of charge, to any\nperson obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the\nSoftware without restriction, including without\nlimitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice\nshall be included in all copies or substantial portions\nof the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\nANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\nSHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\nIN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE."
  },
  {
    "path": "filecoin-hashers/README.md",
    "content": "# worlddatabase\n\n> Different hash functions and abstraction over them.\n\n\nAvailable hashers are\n\n- `blake2s`\n- `poseidon`\n- `sha2 256`\n\n## License\n\nMIT or Apache 2.0\n"
  },
  {
    "path": "filecoin-hashers/src/blake2s.rs",
    "content": "use std::fmt::{self, Debug, Formatter};\nuse std::hash::Hasher as StdHasher;\nuse std::panic::panic_any;\n\nuse anyhow::ensure;\nuse bellperson::{\n    bls::{Bls12, Fr, FrRepr},\n    gadgets::{\n        blake2s::blake2s as blake2s_circuit, boolean::Boolean, multipack, num::AllocatedNum,\n    },\n    ConstraintSystem, SynthesisError,\n};\nuse blake2s_simd::{Hash as Blake2sHash, Params as Blake2s, State};\nuse ff::{Field, PrimeField, PrimeFieldRepr};\nuse merkletree::{\n    hash::{Algorithm, Hashable},\n    merkle::Element,\n};\nuse rand::RngCore;\nuse serde::{Deserialize, Serialize};\n\nuse crate::types::{Domain, HashFunction, Hasher};\n\n#[derive(Default, Copy, Clone, PartialEq, Eq, Debug)]\npub struct Blake2sHasher {}\n\nimpl Hasher for Blake2sHasher {\n    type Domain = Blake2sDomain;\n    type Function = Blake2sFunction;\n\n    fn name() -> String {\n        \"Blake2sHasher\".into()\n    }\n}\n\n#[derive(Clone)]\npub struct Blake2sFunction(State);\n\nimpl Default for Blake2sFunction {\n    fn default() -> Self {\n        Blake2sFunction(Blake2s::new().hash_length(32).to_state())\n    }\n}\n\nimpl PartialEq for Blake2sFunction {\n    fn eq(&self, other: &Self) -> bool {\n        format!(\"{:?}\", self) == format!(\"{:?}\", other)\n    }\n}\n\nimpl Eq for Blake2sFunction {}\n\nimpl Debug for Blake2sFunction {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"Blake2sFunction({:?})\", self.0)\n    }\n}\n\nimpl StdHasher for Blake2sFunction {\n    #[inline]\n    fn write(&mut self, msg: &[u8]) {\n        self.0.update(msg);\n    }\n\n    #[inline]\n    fn finish(&self) -> u64 {\n        unreachable!(\"unused by Function -- should never be called\")\n    }\n}\n\n#[derive(\n    Copy, Clone, PartialEq, Eq, Debug, PartialOrd, Ord, Default, Serialize, Deserialize, Hash,\n)]\npub struct Blake2sDomain(pub [u8; 32]);\n\nimpl AsRef<Blake2sDomain> for Blake2sDomain {\n    fn as_ref(&self) -> &Self {\n        self\n    }\n}\n\nimpl Blake2sDomain {\n    pub fn trim_to_fr32(&mut self) {\n        // strip last two bits, to ensure result is in Fr.\n        self.0[31] &= 0b0011_1111;\n    }\n}\n\nimpl AsRef<[u8]> for Blake2sDomain {\n    fn as_ref(&self) -> &[u8] {\n        &self.0[..]\n    }\n}\n\nimpl Hashable<Blake2sFunction> for Blake2sDomain {\n    fn hash(&self, state: &mut Blake2sFunction) {\n        state.write(self.as_ref())\n    }\n}\n\nimpl From<Fr> for Blake2sDomain {\n    fn from(val: Fr) -> Self {\n        let mut res = Self::default();\n        val.into_repr()\n            .write_le(&mut res.0[0..32])\n            .expect(\"write_le failure\");\n\n        res\n    }\n}\n\nimpl From<FrRepr> for Blake2sDomain {\n    fn from(val: FrRepr) -> Self {\n        let mut res = Self::default();\n        val.write_le(&mut res.0[0..32]).expect(\"write_le failure\");\n\n        res\n    }\n}\n\nimpl Element for Blake2sDomain {\n    fn byte_len() -> usize {\n        32\n    }\n\n    fn from_slice(bytes: &[u8]) -> Self {\n        match Blake2sDomain::try_from_bytes(bytes) {\n            Ok(res) => res,\n            Err(err) => panic_any(err),\n        }\n    }\n\n    fn copy_to_slice(&self, bytes: &mut [u8]) {\n        bytes.copy_from_slice(&self.0);\n    }\n}\n\nimpl From<Blake2sDomain> for Fr {\n    fn from(val: Blake2sDomain) -> Self {\n        let mut res = FrRepr::default();\n        res.read_le(&val.0[0..32]).expect(\"read_le failure\");\n\n        Fr::from_repr(res).expect(\"from_repr failure\")\n    }\n}\n\nimpl Domain for Blake2sDomain {\n    fn into_bytes(&self) -> Vec<u8> {\n        self.0.to_vec()\n    }\n\n    fn try_from_bytes(raw: &[u8]) -> anyhow::Result<Self> {\n        ensure!(\n            raw.len() == 32 && u32::from(raw[31]) <= Fr::NUM_BITS,\n            \"invalid amount of bytes\"\n        );\n\n        let mut res = Blake2sDomain::default();\n        res.0.copy_from_slice(&raw[0..32]);\n        Ok(res)\n    }\n\n    fn write_bytes(&self, dest: &mut [u8]) -> anyhow::Result<()> {\n        ensure!(dest.len() >= 32, \"too many bytes\");\n        dest[0..32].copy_from_slice(&self.0[..]);\n        Ok(())\n    }\n\n    fn random<R: RngCore>(rng: &mut R) -> Self {\n        // generating an Fr and converting it, to ensure we stay in the field\n        Fr::random(rng).into()\n    }\n}\n\nimpl Into<Blake2sDomain> for Blake2sHash {\n    fn into(self) -> Blake2sDomain {\n        let mut res = Blake2sDomain::default();\n        res.0[..].copy_from_slice(self.as_ref());\n        res.trim_to_fr32();\n\n        res\n    }\n}\n\nimpl HashFunction<Blake2sDomain> for Blake2sFunction {\n    fn hash(data: &[u8]) -> Blake2sDomain {\n        Blake2s::new()\n            .hash_length(32)\n            .to_state()\n            .update(data)\n            .finalize()\n            .into()\n    }\n\n    fn hash2(a: &Blake2sDomain, b: &Blake2sDomain) -> Blake2sDomain {\n        Blake2s::new()\n            .hash_length(32)\n            .to_state()\n            .update(a.as_ref())\n            .update(b.as_ref())\n            .finalize()\n            .into()\n    }\n\n    fn hash_multi_leaf_circuit<Arity, CS: ConstraintSystem<Bls12>>(\n        mut cs: CS,\n        leaves: &[AllocatedNum<Bls12>],\n        _height: usize,\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError> {\n        let mut bits = Vec::with_capacity(leaves.len() * Fr::CAPACITY as usize);\n        for (i, leaf) in leaves.iter().enumerate() {\n            bits.extend_from_slice(\n                &leaf.to_bits_le(cs.namespace(|| format!(\"{}_num_into_bits\", i)))?,\n            );\n            while bits.len() % 8 != 0 {\n                bits.push(Boolean::Constant(false));\n            }\n        }\n        Self::hash_circuit(cs, &bits)\n    }\n\n    fn hash_leaf_bits_circuit<CS: ConstraintSystem<Bls12>>(\n        cs: CS,\n        left: &[Boolean],\n        right: &[Boolean],\n        _height: usize,\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError> {\n        let mut preimage: Vec<Boolean> = vec![];\n\n        preimage.extend_from_slice(left);\n        while preimage.len() % 8 != 0 {\n            preimage.push(Boolean::Constant(false));\n        }\n\n        preimage.extend_from_slice(right);\n        while preimage.len() % 8 != 0 {\n            preimage.push(Boolean::Constant(false));\n        }\n\n        Self::hash_circuit(cs, &preimage[..])\n    }\n\n    fn hash_circuit<CS: ConstraintSystem<Bls12>>(\n        mut cs: CS,\n        bits: &[Boolean],\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError> {\n        let personalization = vec![0u8; 8];\n        let alloc_bits = blake2s_circuit(cs.namespace(|| \"hash\"), bits, &personalization)?;\n\n        multipack::pack_bits(cs.namespace(|| \"pack\"), &alloc_bits)\n    }\n\n    fn hash2_circuit<CS>(\n        mut cs: CS,\n        a_num: &AllocatedNum<Bls12>,\n        b_num: &AllocatedNum<Bls12>,\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>,\n    {\n        // Allocate as booleans\n        let a = a_num.to_bits_le(cs.namespace(|| \"a_bits\"))?;\n        let b = b_num.to_bits_le(cs.namespace(|| \"b_bits\"))?;\n\n        let mut preimage: Vec<Boolean> = vec![];\n\n        preimage.extend_from_slice(&a);\n        while preimage.len() % 8 != 0 {\n            preimage.push(Boolean::Constant(false));\n        }\n\n        preimage.extend_from_slice(&b);\n        while preimage.len() % 8 != 0 {\n            preimage.push(Boolean::Constant(false));\n        }\n\n        Self::hash_circuit(cs, &preimage[..])\n    }\n}\n\nimpl Algorithm<Blake2sDomain> for Blake2sFunction {\n    #[inline]\n    fn hash(&mut self) -> Blake2sDomain {\n        self.0.clone().finalize().into()\n    }\n\n    #[inline]\n    fn reset(&mut self) {\n        self.0 = Blake2s::new().hash_length(32).to_state()\n    }\n\n    fn leaf(&mut self, leaf: Blake2sDomain) -> Blake2sDomain {\n        leaf\n    }\n\n    fn node(&mut self, left: Blake2sDomain, right: Blake2sDomain, _height: usize) -> Blake2sDomain {\n        left.hash(self);\n        right.hash(self);\n        self.hash()\n    }\n\n    fn multi_node(&mut self, parts: &[Blake2sDomain], _height: usize) -> Blake2sDomain {\n        for part in parts {\n            part.hash(self)\n        }\n        self.hash()\n    }\n}\n\nimpl From<[u8; 32]> for Blake2sDomain {\n    #[inline]\n    fn from(val: [u8; 32]) -> Self {\n        Blake2sDomain(val)\n    }\n}\n\nimpl From<Blake2sDomain> for [u8; 32] {\n    #[inline]\n    fn from(val: Blake2sDomain) -> Self {\n        val.0\n    }\n}\n"
  },
  {
    "path": "filecoin-hashers/src/lib.rs",
    "content": "#![deny(clippy::all, clippy::perf, clippy::correctness, rust_2018_idioms)]\n#![allow(clippy::upper_case_acronyms)]\n#![warn(clippy::unwrap_used)]\n#![warn(clippy::from_over_into)]\n#![warn(clippy::wrong_self_convention)]\n\n#[cfg(feature = \"blake2s\")]\npub mod blake2s;\n#[cfg(feature = \"poseidon\")]\npub mod poseidon;\n#[cfg(feature = \"poseidon\")]\nmod poseidon_types;\n#[cfg(feature = \"sha256\")]\npub mod sha256;\n\nmod types;\n\npub use self::types::*;\n"
  },
  {
    "path": "filecoin-hashers/src/poseidon.rs",
    "content": "use std::cmp::Ordering;\nuse std::hash::{Hash as StdHash, Hasher as StdHasher};\nuse std::mem::size_of;\nuse std::panic::panic_any;\nuse std::slice;\n\nuse anyhow::ensure;\nuse bellperson::{\n    bls::{Bls12, Fr, FrRepr},\n    gadgets::{boolean::Boolean, num::AllocatedNum},\n    ConstraintSystem, SynthesisError,\n};\nuse ff::{Field, PrimeField, PrimeFieldRepr, ScalarEngine};\nuse generic_array::typenum::{marker_traits::Unsigned, U2};\nuse merkletree::{\n    hash::{Algorithm as LightAlgorithm, Hashable},\n    merkle::Element,\n};\nuse neptune::{circuit::poseidon_hash, poseidon::Poseidon};\nuse rand::RngCore;\nuse serde::{Deserialize, Serialize};\n\nuse crate::types::{\n    Domain, HashFunction, Hasher, PoseidonArity, PoseidonMDArity, POSEIDON_CONSTANTS_16,\n    POSEIDON_CONSTANTS_2, POSEIDON_CONSTANTS_4, POSEIDON_CONSTANTS_8, POSEIDON_MD_CONSTANTS,\n};\n\n#[derive(Default, Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]\npub struct PoseidonHasher {}\n\nimpl Hasher for PoseidonHasher {\n    type Domain = PoseidonDomain;\n    type Function = PoseidonFunction;\n\n    fn name() -> String {\n        \"poseidon_hasher\".into()\n    }\n}\n\n#[derive(Copy, Clone, Debug, PartialEq, Eq)]\npub struct PoseidonFunction(Fr);\n\nimpl Default for PoseidonFunction {\n    fn default() -> PoseidonFunction {\n        PoseidonFunction(Fr::from_repr(FrRepr::default()).expect(\"failed default\"))\n    }\n}\n\nimpl Hashable<PoseidonFunction> for Fr {\n    fn hash(&self, state: &mut PoseidonFunction) {\n        let mut bytes = Vec::with_capacity(32);\n        self.into_repr()\n            .write_le(&mut bytes)\n            .expect(\"write_le failure\");\n        state.write(&bytes);\n    }\n}\n\nimpl Hashable<PoseidonFunction> for PoseidonDomain {\n    fn hash(&self, state: &mut PoseidonFunction) {\n        let mut bytes = Vec::with_capacity(32);\n        self.0\n            .write_le(&mut bytes)\n            .expect(\"Failed to write `FrRepr`\");\n        state.write(&bytes);\n    }\n}\n\n#[derive(Copy, Clone, Debug, Serialize, Deserialize)]\npub struct PoseidonDomain(pub FrRepr);\n\nimpl AsRef<PoseidonDomain> for PoseidonDomain {\n    fn as_ref(&self) -> &PoseidonDomain {\n        self\n    }\n}\n\nimpl StdHash for PoseidonDomain {\n    fn hash<H: StdHasher>(&self, state: &mut H) {\n        let raw: &[u64] = self.0.as_ref();\n        StdHash::hash(raw, state);\n    }\n}\n\nimpl PartialEq for PoseidonDomain {\n    fn eq(&self, other: &Self) -> bool {\n        self.0.as_ref() == other.0.as_ref()\n    }\n}\n\nimpl Eq for PoseidonDomain {}\n\nimpl Default for PoseidonDomain {\n    fn default() -> PoseidonDomain {\n        PoseidonDomain(FrRepr::default())\n    }\n}\n\nimpl Ord for PoseidonDomain {\n    #[inline(always)]\n    fn cmp(&self, other: &PoseidonDomain) -> Ordering {\n        (self.0).cmp(&other.0)\n    }\n}\n\nimpl PartialOrd for PoseidonDomain {\n    #[inline(always)]\n    fn partial_cmp(&self, other: &PoseidonDomain) -> Option<Ordering> {\n        Some((self.0).cmp(&other.0))\n    }\n}\n\nimpl AsRef<[u8]> for PoseidonDomain {\n    #[inline]\n    fn as_ref(&self) -> &[u8] {\n        as_ref(&(self.0).0)\n    }\n}\n\n// This is unsafe, and I wish it wasn't here, but I really need AsRef<[u8]> to work, without allocating.\n// https://internals.rust-lang.org/t/safe-trasnsmute-for-slices-e-g-u64-u32-particularly-simd-types/2871\n// https://github.com/briansmith/ring/blob/abb3fdfc08562f3f02e95fb551604a871fd4195e/src/polyfill.rs#L93-L110\n#[inline(always)]\n#[allow(clippy::needless_lifetimes)]\nfn as_ref<'a>(src: &'a [u64; 4]) -> &'a [u8] {\n    unsafe { slice::from_raw_parts(src.as_ptr() as *const u8, src.len() * size_of::<u64>()) }\n}\n\nimpl Domain for PoseidonDomain {\n    fn into_bytes(&self) -> Vec<u8> {\n        let mut out = Vec::with_capacity(PoseidonDomain::byte_len());\n        self.0.write_le(&mut out).expect(\"write_le failure\");\n\n        out\n    }\n\n    fn try_from_bytes(raw: &[u8]) -> anyhow::Result<Self> {\n        ensure!(\n            raw.len() == PoseidonDomain::byte_len(),\n            \"invalid amount of bytes\"\n        );\n        let mut res: FrRepr = Default::default();\n        res.read_le(raw)?;\n\n        Ok(PoseidonDomain(res))\n    }\n\n    fn write_bytes(&self, dest: &mut [u8]) -> anyhow::Result<()> {\n        self.0.write_le(dest)?;\n        Ok(())\n    }\n\n    fn random<R: RngCore>(rng: &mut R) -> Self {\n        // generating an Fr and converting it, to ensure we stay in the field\n        Fr::random(rng).into()\n    }\n}\n\nimpl Element for PoseidonDomain {\n    fn byte_len() -> usize {\n        32\n    }\n\n    fn from_slice(bytes: &[u8]) -> Self {\n        match PoseidonDomain::try_from_bytes(bytes) {\n            Ok(res) => res,\n            Err(err) => panic_any(err),\n        }\n    }\n\n    fn copy_to_slice(&self, bytes: &mut [u8]) {\n        bytes.copy_from_slice(&self.into_bytes());\n    }\n}\n\nimpl StdHasher for PoseidonFunction {\n    #[inline]\n    fn write(&mut self, msg: &[u8]) {\n        self.0 = Fr::from_repr(shared_hash(msg).0).expect(\"from_repr failure\");\n    }\n\n    #[inline]\n    fn finish(&self) -> u64 {\n        unimplemented!()\n    }\n}\n\nfn shared_hash(data: &[u8]) -> PoseidonDomain {\n    // FIXME: We shouldn't unwrap here, but doing otherwise will require an interface change.\n    // We could truncate so `bytes_into_frs` cannot fail, then ensure `data` is always `fr_safe`.\n    let preimage = data\n        .chunks(32)\n        .map(|ref chunk| {\n            <Bls12 as ScalarEngine>::Fr::from_repr(PoseidonDomain::from_slice(chunk).0)\n                .expect(\"from_repr failure\")\n        })\n        .collect::<Vec<_>>();\n\n    shared_hash_frs(&preimage).into()\n}\n\nfn shared_hash_frs(preimage: &[<Bls12 as ScalarEngine>::Fr]) -> <Bls12 as ScalarEngine>::Fr {\n    match preimage.len() {\n        2 => {\n            let mut p = Poseidon::new_with_preimage(&preimage, &POSEIDON_CONSTANTS_2);\n            p.hash()\n        }\n        4 => {\n            let mut p = Poseidon::new_with_preimage(&preimage, &POSEIDON_CONSTANTS_4);\n            p.hash()\n        }\n        8 => {\n            let mut p = Poseidon::new_with_preimage(&preimage, &POSEIDON_CONSTANTS_8);\n            p.hash()\n        }\n        16 => {\n            let mut p = Poseidon::new_with_preimage(&preimage, &POSEIDON_CONSTANTS_16);\n            p.hash()\n        }\n\n        _ => panic_any(format!(\n            \"Unsupported arity for Poseidon hasher: {}\",\n            preimage.len()\n        )),\n    }\n}\n\nimpl HashFunction<PoseidonDomain> for PoseidonFunction {\n    fn hash(data: &[u8]) -> PoseidonDomain {\n        shared_hash(data)\n    }\n\n    fn hash2(a: &PoseidonDomain, b: &PoseidonDomain) -> PoseidonDomain {\n        let mut p =\n            Poseidon::new_with_preimage(&[(*a).into(), (*b).into()][..], &*POSEIDON_CONSTANTS_2);\n        let fr: <Bls12 as ScalarEngine>::Fr = p.hash();\n        fr.into()\n    }\n\n    fn hash_md(input: &[PoseidonDomain]) -> PoseidonDomain {\n        assert!(input.len() > 1, \"hash_md needs more than one element.\");\n        let arity = PoseidonMDArity::to_usize();\n\n        let mut p = Poseidon::new(&*POSEIDON_MD_CONSTANTS);\n\n        let fr_input = input\n            .iter()\n            .map(|x| <Bls12 as ScalarEngine>::Fr::from_repr(x.0).expect(\"from_repr failure\"))\n            .collect::<Vec<_>>();\n\n        fr_input[1..]\n            .chunks(arity - 1)\n            .fold(fr_input[0], |acc, elts| {\n                p.reset();\n                p.input(acc).expect(\"input failure\"); // These unwraps will panic iff arity is incorrect, but it was checked above.\n                elts.iter().for_each(|elt| {\n                    let _ = p.input(*elt).expect(\"input failure\");\n                });\n                p.hash()\n            })\n            .into()\n    }\n\n    fn hash_leaf_circuit<CS: ConstraintSystem<Bls12>>(\n        cs: CS,\n        left: &AllocatedNum<Bls12>,\n        right: &AllocatedNum<Bls12>,\n        _height: usize,\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError> {\n        let preimage = vec![left.clone(), right.clone()];\n\n        poseidon_hash::<CS, Bls12, U2>(cs, preimage, U2::PARAMETERS())\n    }\n\n    fn hash_multi_leaf_circuit<Arity: 'static + PoseidonArity, CS: ConstraintSystem<Bls12>>(\n        cs: CS,\n        leaves: &[AllocatedNum<Bls12>],\n        _height: usize,\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError> {\n        let params = Arity::PARAMETERS();\n        poseidon_hash::<CS, Bls12, Arity>(cs, leaves.to_vec(), params)\n    }\n\n    fn hash_md_circuit<CS: ConstraintSystem<Bls12>>(\n        cs: &mut CS,\n        elements: &[AllocatedNum<Bls12>],\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError> {\n        let params = PoseidonMDArity::PARAMETERS();\n        let arity = PoseidonMDArity::to_usize();\n\n        let mut hash = elements[0].clone();\n        let mut preimage = vec![hash.clone(); arity]; // Allocate. This will be overwritten.\n        for (hash_num, elts) in elements[1..].chunks(arity - 1).enumerate() {\n            preimage[0] = hash;\n            for (i, elt) in elts.iter().enumerate() {\n                preimage[i + 1] = elt.clone();\n            }\n            // any terminal padding\n            #[allow(clippy::needless_range_loop)]\n            for i in (elts.len() + 1)..arity {\n                preimage[i] =\n                    AllocatedNum::alloc(cs.namespace(|| format!(\"padding {}\", i)), || {\n                        Ok(Fr::zero())\n                    })\n                    .expect(\"alloc failure\");\n            }\n            let cs = cs.namespace(|| format!(\"hash md {}\", hash_num));\n            hash =\n                poseidon_hash::<_, Bls12, PoseidonMDArity>(cs, preimage.clone(), params)?.clone();\n        }\n\n        Ok(hash)\n    }\n\n    fn hash_circuit<CS: ConstraintSystem<Bls12>>(\n        _cs: CS,\n        _bits: &[Boolean],\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError> {\n        unimplemented!();\n    }\n\n    fn hash2_circuit<CS>(\n        cs: CS,\n        a: &AllocatedNum<Bls12>,\n        b: &AllocatedNum<Bls12>,\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>,\n    {\n        let preimage = vec![a.clone(), b.clone()];\n        poseidon_hash::<CS, Bls12, U2>(cs, preimage, U2::PARAMETERS())\n    }\n}\n\nimpl LightAlgorithm<PoseidonDomain> for PoseidonFunction {\n    #[inline]\n    fn hash(&mut self) -> PoseidonDomain {\n        self.0.into()\n    }\n\n    #[inline]\n    fn reset(&mut self) {\n        self.0 = Fr::from_repr(FrRepr::from(0)).expect(\"failed 0\");\n    }\n\n    fn leaf(&mut self, leaf: PoseidonDomain) -> PoseidonDomain {\n        leaf\n    }\n\n    fn node(\n        &mut self,\n        left: PoseidonDomain,\n        right: PoseidonDomain,\n        _height: usize,\n    ) -> PoseidonDomain {\n        shared_hash_frs(&[\n            <Bls12 as ScalarEngine>::Fr::from_repr(left.0).expect(\"from_repr failure\"),\n            <Bls12 as ScalarEngine>::Fr::from_repr(right.0).expect(\"from_repr failure\"),\n        ])\n        .into()\n    }\n\n    fn multi_node(&mut self, parts: &[PoseidonDomain], _height: usize) -> PoseidonDomain {\n        match parts.len() {\n            1 | 2 | 4 | 8 | 16 => shared_hash_frs(\n                &parts\n                    .iter()\n                    .enumerate()\n                    .map(|(i, x)| {\n                        <Bls12 as ScalarEngine>::Fr::from_repr(x.0)\n                            .unwrap_or_else(|_| panic_any(format!(\"from_repr failure at {}\", i)))\n                    })\n                    .collect::<Vec<_>>(),\n            )\n            .into(),\n            arity => panic_any(format!(\"unsupported arity {}\", arity)),\n        }\n    }\n}\n\nimpl From<Fr> for PoseidonDomain {\n    #[inline]\n    fn from(val: Fr) -> Self {\n        PoseidonDomain(val.into_repr())\n    }\n}\n\nimpl From<FrRepr> for PoseidonDomain {\n    #[inline]\n    fn from(val: FrRepr) -> Self {\n        PoseidonDomain(val)\n    }\n}\n\nimpl From<PoseidonDomain> for Fr {\n    #[inline]\n    fn from(val: PoseidonDomain) -> Self {\n        Fr::from_repr(val.0).expect(\"from_repr failure\")\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use std::mem;\n\n    use bellperson::util_cs::test_cs::TestConstraintSystem;\n    use merkletree::{merkle::MerkleTree, store::VecStore};\n\n    #[test]\n    fn test_path() {\n        let values = [\n            PoseidonDomain(Fr::one().into_repr()),\n            PoseidonDomain(Fr::one().into_repr()),\n            PoseidonDomain(Fr::one().into_repr()),\n            PoseidonDomain(Fr::one().into_repr()),\n        ];\n\n        let t = MerkleTree::<PoseidonDomain, PoseidonFunction, VecStore<_>, U2>::new(\n            values.iter().copied(),\n        )\n        .expect(\"merkle tree new failure\");\n\n        let p = t.gen_proof(0).expect(\"gen_proof failure\"); // create a proof for the first value =k Fr::one()\n\n        assert_eq!(*p.path(), vec![0, 0]);\n        assert_eq!(\n            p.validate::<PoseidonFunction>()\n                .expect(\"failed to validate\"),\n            true\n        );\n    }\n\n    // #[test]\n    // fn test_poseidon_quad() {\n    //     let leaves = [Fr::one(), Fr::zero(), Fr::zero(), Fr::one()];\n\n    //     assert_eq!(Fr::zero().into_repr(), shared_hash_frs(&leaves[..]).0);\n    // }\n\n    #[test]\n    fn test_poseidon_hasher() {\n        let leaves = [\n            PoseidonDomain(Fr::one().into_repr()),\n            PoseidonDomain(Fr::zero().into_repr()),\n            PoseidonDomain(Fr::zero().into_repr()),\n            PoseidonDomain(Fr::one().into_repr()),\n        ];\n\n        let t = MerkleTree::<PoseidonDomain, PoseidonFunction, VecStore<_>, U2>::new(\n            leaves.iter().copied(),\n        )\n        .expect(\"merkle tree new failure\");\n\n        assert_eq!(t.leafs(), 4);\n\n        let mut a = PoseidonFunction::default();\n\n        assert_eq!(t.read_at(0).expect(\"read_at failure\"), leaves[0]);\n        assert_eq!(t.read_at(1).expect(\"read_at failure\"), leaves[1]);\n        assert_eq!(t.read_at(2).expect(\"read_at failure\"), leaves[2]);\n        assert_eq!(t.read_at(3).expect(\"read_at failure\"), leaves[3]);\n\n        let i1 = a.node(leaves[0], leaves[1], 0);\n        a.reset();\n        let i2 = a.node(leaves[2], leaves[3], 0);\n        a.reset();\n\n        assert_eq!(t.read_at(4).expect(\"read_at failure\"), i1);\n        assert_eq!(t.read_at(5).expect(\"read_at failure\"), i2);\n\n        let root = a.node(i1, i2, 1);\n        a.reset();\n\n        assert_eq!(\n            t.read_at(4).expect(\"read_at failure\").0,\n            FrRepr([\n                0xb339ff6079800b5e,\n                0xec5907b3dc3094af,\n                0x93c003cc74a24f26,\n                0x042f94ffbe786bc3,\n            ])\n        );\n\n        let expected = FrRepr([\n            0xefbb8be3e291e671,\n            0x77cc72b8cb2b5ad2,\n            0x30eb6385ae6b74ae,\n            0x1effebb7b26ad9eb,\n        ]);\n        let actual = t.read_at(6).expect(\"read_at failure\").0;\n\n        assert_eq!(actual, expected);\n        assert_eq!(t.read_at(6).expect(\"read_at failure\"), root);\n    }\n\n    #[test]\n    fn test_as_ref() {\n        let cases: Vec<[u64; 4]> = vec![\n            [0, 0, 0, 0],\n            [\n                14963070332212552755,\n                2414807501862983188,\n                16116531553419129213,\n                6357427774790868134,\n            ],\n        ];\n\n        for case in cases.into_iter() {\n            let repr = FrRepr(case);\n            let val = PoseidonDomain(repr);\n\n            for _ in 0..100 {\n                assert_eq!(val.into_bytes(), val.into_bytes());\n            }\n\n            let raw: &[u8] = val.as_ref();\n\n            for i in 0..4 {\n                assert_eq!(case[i], unsafe {\n                    let mut val = [0u8; 8];\n                    val.clone_from_slice(&raw[i * 8..(i + 1) * 8]);\n                    mem::transmute::<[u8; 8], u64>(val)\n                });\n            }\n        }\n    }\n\n    #[test]\n    fn test_serialize() {\n        let repr = FrRepr([1, 2, 3, 4]);\n        let val = PoseidonDomain(repr);\n\n        let ser = serde_json::to_string(&val)\n            .expect(\"Failed to serialize `PoseidonDomain` element to JSON string\");\n        let val_back = serde_json::from_str(&ser)\n            .expect(\"Failed to deserialize JSON string to `PoseidonnDomain`\");\n\n        assert_eq!(val, val_back);\n    }\n\n    #[test]\n    fn test_hash_md() {\n        // let arity = PoseidonMDArity::to_usize();\n        let n = 71;\n        let data = vec![PoseidonDomain(Fr::one().into_repr()); n];\n        let hashed = PoseidonFunction::hash_md(&data);\n\n        assert_eq!(\n            hashed,\n            PoseidonDomain(FrRepr([\n                0x351c54133b332c90,\n                0xc26f6d625f4e8195,\n                0x5fd9623643ed9622,\n                0x59f42220e09ff6f7,\n            ]))\n        );\n    }\n    #[test]\n    fn test_hash_md_circuit() {\n        // let arity = PoseidonMDArity::to_usize();\n        let n = 71;\n        let data = vec![PoseidonDomain(Fr::one().into_repr()); n];\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        let circuit_data = (0..n)\n            .map(|n| {\n                AllocatedNum::alloc(cs.namespace(|| format!(\"input {}\", n)), || Ok(Fr::one()))\n                    .expect(\"alloc failure\")\n            })\n            .collect::<Vec<_>>();\n\n        let hashed = PoseidonFunction::hash_md(&data);\n        let hashed_fr = Fr::from_repr(hashed.0).expect(\"from_repr failure\");\n\n        let circuit_hashed = PoseidonFunction::hash_md_circuit(&mut cs, circuit_data.as_slice())\n            .expect(\"hash_md_circuit failure\");\n\n        assert!(cs.is_satisfied());\n        let expected_constraints = 2_770;\n        let actual_constraints = cs.num_constraints();\n\n        assert_eq!(expected_constraints, actual_constraints);\n\n        assert_eq!(\n            hashed_fr,\n            circuit_hashed.get_value().expect(\"get_value failure\")\n        );\n    }\n}\n"
  },
  {
    "path": "filecoin-hashers/src/poseidon_types.rs",
    "content": "use std::fmt::Debug;\n\nuse bellperson::bls::{Bls12, Fr};\nuse generic_array::typenum::{U0, U11, U16, U2, U24, U36, U4, U8};\nuse lazy_static::lazy_static;\nuse neptune::{poseidon::PoseidonConstants, Arity};\n\npub type PoseidonBinaryArity = U2;\npub type PoseidonQuadArity = U4;\npub type PoseidonOctArity = U8;\n\n/// Arity to use by default for `hash_md` with poseidon.\npub type PoseidonMDArity = U36;\n\n/// Arity to use for hasher implementations (Poseidon) which are specialized at compile time.\n/// Must match PoseidonArity\npub const MERKLE_TREE_ARITY: usize = 2;\n\nlazy_static! {\n    pub static ref POSEIDON_CONSTANTS_2: PoseidonConstants::<Bls12, U2> = PoseidonConstants::new();\n    pub static ref POSEIDON_CONSTANTS_4: PoseidonConstants::<Bls12, U4> = PoseidonConstants::new();\n    pub static ref POSEIDON_CONSTANTS_8: PoseidonConstants::<Bls12, U8> = PoseidonConstants::new();\n    pub static ref POSEIDON_CONSTANTS_16: PoseidonConstants::<Bls12, U16> =\n        PoseidonConstants::new();\n    pub static ref POSEIDON_CONSTANTS_24: PoseidonConstants::<Bls12, U24> =\n        PoseidonConstants::new();\n    pub static ref POSEIDON_CONSTANTS_36: PoseidonConstants::<Bls12, U36> =\n        PoseidonConstants::new();\n    pub static ref POSEIDON_CONSTANTS_11: PoseidonConstants::<Bls12, U11> =\n        PoseidonConstants::new();\n    pub static ref POSEIDON_MD_CONSTANTS: PoseidonConstants::<Bls12, PoseidonMDArity> =\n        PoseidonConstants::new();\n}\n\npub trait PoseidonArity: Arity<Fr> + Send + Sync + Clone + Debug {\n    #[allow(non_snake_case)]\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self>;\n}\n\nimpl PoseidonArity for U0 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        unreachable!(\"dummy implementation, do not ever call me\")\n    }\n}\n\nimpl PoseidonArity for U2 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        &*POSEIDON_CONSTANTS_2\n    }\n}\n\nimpl PoseidonArity for U4 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        &*POSEIDON_CONSTANTS_4\n    }\n}\n\nimpl PoseidonArity for U8 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        &*POSEIDON_CONSTANTS_8\n    }\n}\n\nimpl PoseidonArity for U11 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        &*POSEIDON_CONSTANTS_11\n    }\n}\n\nimpl PoseidonArity for U16 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        &*POSEIDON_CONSTANTS_16\n    }\n}\nimpl PoseidonArity for U24 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        &*POSEIDON_CONSTANTS_24\n    }\n}\nimpl PoseidonArity for U36 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        &*POSEIDON_CONSTANTS_36\n    }\n}\n"
  },
  {
    "path": "filecoin-hashers/src/sha256.rs",
    "content": "use std::fmt::{self, Debug, Formatter};\nuse std::hash::Hasher as StdHasher;\nuse std::panic::panic_any;\n\nuse anyhow::ensure;\nuse bellperson::{\n    bls::{Bls12, Fr, FrRepr},\n    gadgets::{boolean::Boolean, multipack, num::AllocatedNum, sha256::sha256 as sha256_circuit},\n    ConstraintSystem, SynthesisError,\n};\nuse ff::{Field, PrimeField, PrimeFieldRepr};\nuse merkletree::{\n    hash::{Algorithm, Hashable},\n    merkle::Element,\n};\nuse rand::RngCore;\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\n\nuse crate::types::{Domain, HashFunction, Hasher};\n\n#[derive(Default, Copy, Clone, Debug, PartialEq, Eq)]\npub struct Sha256Hasher {}\n\nimpl Hasher for Sha256Hasher {\n    type Domain = Sha256Domain;\n    type Function = Sha256Function;\n\n    fn name() -> String {\n        \"sha256_hasher\".into()\n    }\n}\n\n#[derive(Default, Clone, Debug)]\npub struct Sha256Function(Sha256);\n\nimpl StdHasher for Sha256Function {\n    #[inline]\n    fn write(&mut self, msg: &[u8]) {\n        self.0.update(msg)\n    }\n\n    #[inline]\n    fn finish(&self) -> u64 {\n        unreachable!(\"unused by Function -- should never be called\")\n    }\n}\n\n#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Default, Serialize, Deserialize, Hash)]\npub struct Sha256Domain(pub [u8; 32]);\n\nimpl Debug for Sha256Domain {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"Sha256Domain({})\", hex::encode(&self.0))\n    }\n}\n\nimpl AsRef<Sha256Domain> for Sha256Domain {\n    fn as_ref(&self) -> &Self {\n        self\n    }\n}\n\nimpl Sha256Domain {\n    fn trim_to_fr32(&mut self) {\n        // strip last two bits, to ensure result is in Fr.\n        self.0[31] &= 0b0011_1111;\n    }\n}\n\nimpl AsRef<[u8]> for Sha256Domain {\n    fn as_ref(&self) -> &[u8] {\n        &self.0[..]\n    }\n}\n\nimpl Hashable<Sha256Function> for Sha256Domain {\n    fn hash(&self, state: &mut Sha256Function) {\n        state.write(self.as_ref())\n    }\n}\n\nimpl From<Fr> for Sha256Domain {\n    fn from(val: Fr) -> Self {\n        let mut res = Self::default();\n        val.into_repr()\n            .write_le(&mut res.0[0..32])\n            .expect(\"write_le failure\");\n\n        res\n    }\n}\n\nimpl From<FrRepr> for Sha256Domain {\n    fn from(val: FrRepr) -> Self {\n        let mut res = Self::default();\n        val.write_le(&mut res.0[0..32]).expect(\"write_le failure\");\n\n        res\n    }\n}\n\nimpl From<Sha256Domain> for Fr {\n    fn from(val: Sha256Domain) -> Self {\n        let mut res = FrRepr::default();\n        res.read_le(&val.0[0..32]).expect(\"read_le failure\");\n\n        Fr::from_repr(res).expect(\"from_repr failure\")\n    }\n}\n\nimpl Domain for Sha256Domain {\n    fn into_bytes(&self) -> Vec<u8> {\n        self.0.to_vec()\n    }\n\n    fn try_from_bytes(raw: &[u8]) -> anyhow::Result<Self> {\n        ensure!(\n            raw.len() == Sha256Domain::byte_len(),\n            \"invalid number of bytes\"\n        );\n\n        let mut res = Sha256Domain::default();\n        res.0.copy_from_slice(&raw[0..Sha256Domain::byte_len()]);\n        Ok(res)\n    }\n\n    fn write_bytes(&self, dest: &mut [u8]) -> anyhow::Result<()> {\n        ensure!(\n            dest.len() >= Sha256Domain::byte_len(),\n            \"invalid number of bytes\"\n        );\n\n        dest[0..Sha256Domain::byte_len()].copy_from_slice(&self.0[..]);\n        Ok(())\n    }\n\n    fn random<R: RngCore>(rng: &mut R) -> Self {\n        // generating an Fr and converting it, to ensure we stay in the field\n        Fr::random(rng).into()\n    }\n}\n\nimpl Element for Sha256Domain {\n    fn byte_len() -> usize {\n        32\n    }\n\n    fn from_slice(bytes: &[u8]) -> Self {\n        match Sha256Domain::try_from_bytes(bytes) {\n            Ok(res) => res,\n            Err(err) => panic_any(err),\n        }\n    }\n\n    fn copy_to_slice(&self, bytes: &mut [u8]) {\n        bytes.copy_from_slice(&self.0);\n    }\n}\n\nimpl HashFunction<Sha256Domain> for Sha256Function {\n    fn hash(data: &[u8]) -> Sha256Domain {\n        let hashed = Sha256::digest(data);\n        let mut res = Sha256Domain::default();\n        res.0.copy_from_slice(&hashed[..]);\n        res.trim_to_fr32();\n        res\n    }\n\n    fn hash2(a: &Sha256Domain, b: &Sha256Domain) -> Sha256Domain {\n        let hashed = Sha256::new()\n            .chain(AsRef::<[u8]>::as_ref(a))\n            .chain(AsRef::<[u8]>::as_ref(b))\n            .finalize();\n        let mut res = Sha256Domain::default();\n        res.0.copy_from_slice(&hashed[..]);\n        res.trim_to_fr32();\n        res\n    }\n\n    fn hash_multi_leaf_circuit<Arity, CS: ConstraintSystem<Bls12>>(\n        mut cs: CS,\n        leaves: &[AllocatedNum<Bls12>],\n        _height: usize,\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError> {\n        let mut bits = Vec::with_capacity(leaves.len() * Fr::CAPACITY as usize);\n        for (i, leaf) in leaves.iter().enumerate() {\n            let mut padded = leaf.to_bits_le(cs.namespace(|| format!(\"{}_num_into_bits\", i)))?;\n            while padded.len() % 8 != 0 {\n                padded.push(Boolean::Constant(false));\n            }\n\n            bits.extend(\n                padded\n                    .chunks_exact(8)\n                    .flat_map(|chunk| chunk.iter().rev())\n                    .cloned(),\n            );\n        }\n        Self::hash_circuit(cs, &bits)\n    }\n\n    fn hash_leaf_bits_circuit<CS: ConstraintSystem<Bls12>>(\n        cs: CS,\n        left: &[Boolean],\n        right: &[Boolean],\n        _height: usize,\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError> {\n        let mut preimage: Vec<Boolean> = vec![];\n\n        let mut left_padded = left.to_vec();\n        while left_padded.len() % 8 != 0 {\n            left_padded.push(Boolean::Constant(false));\n        }\n\n        preimage.extend(\n            left_padded\n                .chunks_exact(8)\n                .flat_map(|chunk| chunk.iter().rev())\n                .cloned(),\n        );\n\n        let mut right_padded = right.to_vec();\n        while right_padded.len() % 8 != 0 {\n            right_padded.push(Boolean::Constant(false));\n        }\n\n        preimage.extend(\n            right_padded\n                .chunks_exact(8)\n                .flat_map(|chunk| chunk.iter().rev())\n                .cloned(),\n        );\n\n        Self::hash_circuit(cs, &preimage[..])\n    }\n\n    fn hash_circuit<CS: ConstraintSystem<Bls12>>(\n        mut cs: CS,\n        bits: &[Boolean],\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError> {\n        let be_bits = sha256_circuit(cs.namespace(|| \"hash\"), bits)?;\n        let le_bits = be_bits\n            .chunks(8)\n            .flat_map(|chunk| chunk.iter().rev())\n            .cloned()\n            .take(Fr::CAPACITY as usize)\n            .collect::<Vec<_>>();\n        multipack::pack_bits(cs.namespace(|| \"pack_le\"), &le_bits)\n    }\n\n    fn hash2_circuit<CS>(\n        mut cs: CS,\n        a_num: &AllocatedNum<Bls12>,\n        b_num: &AllocatedNum<Bls12>,\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>,\n    {\n        // Allocate as booleans\n        let a = a_num.to_bits_le(cs.namespace(|| \"a_bits\"))?;\n        let b = b_num.to_bits_le(cs.namespace(|| \"b_bits\"))?;\n\n        let mut preimage: Vec<Boolean> = vec![];\n\n        let mut a_padded = a.to_vec();\n        while a_padded.len() % 8 != 0 {\n            a_padded.push(Boolean::Constant(false));\n        }\n\n        preimage.extend(\n            a_padded\n                .chunks_exact(8)\n                .flat_map(|chunk| chunk.iter().rev())\n                .cloned(),\n        );\n\n        let mut b_padded = b.to_vec();\n        while b_padded.len() % 8 != 0 {\n            b_padded.push(Boolean::Constant(false));\n        }\n\n        preimage.extend(\n            b_padded\n                .chunks_exact(8)\n                .flat_map(|chunk| chunk.iter().rev())\n                .cloned(),\n        );\n\n        Self::hash_circuit(cs, &preimage[..])\n    }\n}\n\nimpl Algorithm<Sha256Domain> for Sha256Function {\n    #[inline]\n    fn hash(&mut self) -> Sha256Domain {\n        let mut h = [0u8; 32];\n        h.copy_from_slice(self.0.clone().finalize().as_ref());\n        let mut dd = Sha256Domain::from(h);\n        dd.trim_to_fr32();\n        dd\n    }\n\n    #[inline]\n    fn reset(&mut self) {\n        self.0.reset();\n    }\n\n    fn leaf(&mut self, leaf: Sha256Domain) -> Sha256Domain {\n        leaf\n    }\n\n    fn node(&mut self, left: Sha256Domain, right: Sha256Domain, _height: usize) -> Sha256Domain {\n        left.hash(self);\n        right.hash(self);\n        self.hash()\n    }\n\n    fn multi_node(&mut self, parts: &[Sha256Domain], _height: usize) -> Sha256Domain {\n        for part in parts {\n            part.hash(self)\n        }\n        self.hash()\n    }\n}\n\nimpl From<[u8; 32]> for Sha256Domain {\n    #[inline]\n    fn from(val: [u8; 32]) -> Self {\n        Sha256Domain(val)\n    }\n}\n\nimpl From<Sha256Domain> for [u8; 32] {\n    #[inline]\n    fn from(val: Sha256Domain) -> Self {\n        val.0\n    }\n}\n"
  },
  {
    "path": "filecoin-hashers/src/types.rs",
    "content": "use std::fmt::Debug;\nuse std::hash::Hash as StdHash;\n\n#[cfg(feature = \"poseidon\")]\npub use crate::poseidon_types::*;\n\nuse bellperson::{\n    bls::{Bls12, Fr, FrRepr},\n    gadgets::{boolean::Boolean, num::AllocatedNum},\n    ConstraintSystem, SynthesisError,\n};\nuse merkletree::{\n    hash::{Algorithm as LightAlgorithm, Hashable as LightHashable},\n    merkle::Element,\n};\nuse rand::RngCore;\nuse serde::{de::DeserializeOwned, Serialize};\n\npub trait Domain:\n    Ord\n    + Copy\n    + Clone\n    + AsRef<[u8]>\n    + Default\n    + Debug\n    + Eq\n    + Send\n    + Sync\n    + From<Fr>\n    + From<FrRepr>\n    + Into<Fr>\n    + Serialize\n    + DeserializeOwned\n    + Element\n    + StdHash\n{\n    fn into_bytes(&self) -> Vec<u8>;\n    fn try_from_bytes(raw: &[u8]) -> anyhow::Result<Self>;\n    /// Write itself into the given slice, LittleEndian bytes.\n    fn write_bytes(&self, _: &mut [u8]) -> anyhow::Result<()>;\n\n    fn random<R: RngCore>(rng: &mut R) -> Self;\n}\n\npub trait HashFunction<T: Domain>: Clone + Debug + Send + Sync + LightAlgorithm<T> {\n    fn hash(data: &[u8]) -> T;\n    fn hash2(a: &T, b: &T) -> T;\n    fn hash_md(input: &[T]) -> T {\n        // Default to binary.\n        assert!(input.len() > 1, \"hash_md needs more than one element.\");\n        input\n            .iter()\n            .skip(1)\n            .fold(input[0], |acc, elt| Self::hash2(&acc, elt))\n    }\n\n    fn hash_leaf(data: &dyn LightHashable<Self>) -> T {\n        let mut a = Self::default();\n        data.hash(&mut a);\n        let item_hash = a.hash();\n        a.leaf(item_hash)\n    }\n\n    fn hash_single_node(data: &dyn LightHashable<Self>) -> T {\n        let mut a = Self::default();\n        data.hash(&mut a);\n        a.hash()\n    }\n\n    fn hash_leaf_circuit<CS: ConstraintSystem<Bls12>>(\n        mut cs: CS,\n        left: &AllocatedNum<Bls12>,\n        right: &AllocatedNum<Bls12>,\n        height: usize,\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError> {\n        let left_bits = left.to_bits_le(cs.namespace(|| \"left num into bits\"))?;\n        let right_bits = right.to_bits_le(cs.namespace(|| \"right num into bits\"))?;\n\n        Self::hash_leaf_bits_circuit(cs, &left_bits, &right_bits, height)\n    }\n\n    fn hash_multi_leaf_circuit<Arity: 'static + PoseidonArity, CS: ConstraintSystem<Bls12>>(\n        cs: CS,\n        leaves: &[AllocatedNum<Bls12>],\n        height: usize,\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError>;\n\n    fn hash_md_circuit<CS: ConstraintSystem<Bls12>>(\n        _cs: &mut CS,\n        _elements: &[AllocatedNum<Bls12>],\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError> {\n        unimplemented!();\n    }\n\n    fn hash_leaf_bits_circuit<CS: ConstraintSystem<Bls12>>(\n        _cs: CS,\n        _left: &[Boolean],\n        _right: &[Boolean],\n        _height: usize,\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError> {\n        unimplemented!();\n    }\n\n    fn hash_circuit<CS: ConstraintSystem<Bls12>>(\n        cs: CS,\n        bits: &[Boolean],\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError>;\n\n    fn hash2_circuit<CS>(\n        cs: CS,\n        a: &AllocatedNum<Bls12>,\n        b: &AllocatedNum<Bls12>,\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>;\n}\n\npub trait Hasher: Clone + Debug + Eq + Default + Send + Sync {\n    type Domain: Domain + LightHashable<Self::Function> + AsRef<Self::Domain>;\n    type Function: HashFunction<Self::Domain>;\n\n    fn name() -> String;\n}\n"
  },
  {
    "path": "filecoin-proofs/.gitignore",
    "content": "/target\n**/*.rs.bk\nCargo.lock\n.criterion\nheaptrack*\n"
  },
  {
    "path": "filecoin-proofs/Cargo.toml",
    "content": "[package]\nname = \"filecoin-proofs\"\ndescription = \"The Filecoin specific aspects of storage-proofs, including a C based FFI, to generate and verify proofs.\"\nversion = \"7.0.1\"\nauthors = [\"dignifiedquire <dignifiedquire@gmail.com>\", \"laser <l@s3r.com>\", \"porcuquine <porcuquine@users.noreply.github.com>\"]\nlicense = \"MIT OR Apache-2.0\"\nedition = \"2018\"\nrepository = \"https://github.com/filecoin-project/rust-fil-proofs\"\nreadme = \"README.md\"\n\n[dependencies]\nstorage-proofs-core = { path = \"../storage-proofs-core\", version = \"^7.0.0\", default-features = false}\nstorage-proofs-porep = { path = \"../storage-proofs-porep\", version = \"^7.0.0\", default-features = false }\nstorage-proofs-post = { path = \"../storage-proofs-post\", version = \"^7.0.0\", default-features = false }\nfilecoin-hashers = { version = \"^2.0.0\", path = \"../filecoin-hashers\", default-features = false, features = [\"poseidon\", \"sha256\"] }\nbitvec = \"0.17\"\nrand = \"0.7\"\nlazy_static = \"1.2\"\nmemmap = \"0.7\"\nbyteorder = \"1\"\nitertools = \"0.9\"\nserde = { version = \"1.0\", features = [\"rc\", \"derive\"] }\nserde_json = \"1.0\"\nff = { version = \"0.2.3\", package = \"fff\" }\nblake2b_simd = \"0.5\"\nbellperson = { version = \"0.13\", default-features = false }\nlog = \"0.4.7\"\nfil_logger = \"0.1\"\nrayon = \"1.1.0\"\nblake2s_simd = \"0.5.8\"\nhex = \"0.4.0\"\nmerkletree = \"0.21.0\"\nbincode = \"1.1.2\"\nanyhow = \"1.0.23\"\nrand_xorshift = \"0.2.0\"\nsha2 = \"0.9.1\"\ntypenum = \"1.11.2\"\ngperftools = { version = \"0.2\", optional = true }\ngeneric-array = \"0.14.4\"\ngroupy = \"0.3.0\"\nbyte-slice-cast = \"1.0.0\"\nfr32 = { path = \"../fr32\", version = \"^0.2.0\", default-features = false }\n\n[dev-dependencies]\ncriterion = \"0.3\"\nrexpect = \"0.4.0\"\npretty_assertions = \"0.6.1\"\nfailure = \"0.1.7\"\ntempfile = \"3\"\n\n[features]\ndefault = [\"gpu\", \"pairing\"]\ncpu-profile = [\"gperftools\"]\nheap-profile = [\"gperftools/heap\"]\nsimd = [\"storage-proofs-core/simd\"]\nasm = [\"storage-proofs-core/asm\"]\ngpu = [\n    \"storage-proofs-core/gpu\",\n    \"storage-proofs-porep/gpu\",\n    \"storage-proofs-post/gpu\",\n    \"bellperson/gpu\",\n    \"filecoin-hashers/gpu\",\n    \"fr32/gpu\",\n]\npairing = [\n    \"storage-proofs-core/pairing\",\n    \"storage-proofs-porep/pairing\",\n    \"storage-proofs-post/pairing\",\n    \"bellperson/pairing\",\n    \"filecoin-hashers/pairing\",\n    \"fr32/pairing\",\n]\nblst = [\n    \"storage-proofs-core/blst\",\n    \"storage-proofs-porep/blst\",\n    \"storage-proofs-post/blst\",\n    \"bellperson/blst\",\n    \"filecoin-hashers/blst\",\n    \"fr32/blst\",\n]\n\n[[bench]]\nname = \"preprocessing\"\nharness = false\n"
  },
  {
    "path": "filecoin-proofs/LICENSE-APACHE",
    "content": "                              Apache License\n                        Version 2.0, January 2004\n                     http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n   \"License\" shall mean the terms and conditions for use, reproduction,\n   and distribution as defined by Sections 1 through 9 of this document.\n\n   \"Licensor\" shall mean the copyright owner or entity authorized by\n   the copyright owner that is granting the License.\n\n   \"Legal Entity\" shall mean the union of the acting entity and all\n   other entities that control, are controlled by, or are under common\n   control with that entity. For the purposes of this definition,\n   \"control\" means (i) the power, direct or indirect, to cause the\n   direction or management of such entity, whether by contract or\n   otherwise, or (ii) ownership of fifty percent (50%) or more of the\n   outstanding shares, or (iii) beneficial ownership of such entity.\n\n   \"You\" (or \"Your\") shall mean an individual or Legal Entity\n   exercising permissions granted by this License.\n\n   \"Source\" form shall mean the preferred form for making modifications,\n   including but not limited to software source code, documentation\n   source, and configuration files.\n\n   \"Object\" form shall mean any form resulting from mechanical\n   transformation or translation of a Source form, including but\n   not limited to compiled object code, generated documentation,\n   and conversions to other media types.\n\n   \"Work\" shall mean the work of authorship, whether in Source or\n   Object form, made available under the License, as indicated by a\n   copyright notice that is included in or attached to the work\n   (an example is provided in the Appendix below).\n\n   \"Derivative Works\" shall mean any work, whether in Source or Object\n   form, that is based on (or derived from) the Work and for which the\n   editorial revisions, annotations, elaborations, or other modifications\n   represent, as a whole, an original work of authorship. For the purposes\n   of this License, Derivative Works shall not include works that remain\n   separable from, or merely link (or bind by name) to the interfaces of,\n   the Work and Derivative Works thereof.\n\n   \"Contribution\" shall mean any work of authorship, including\n   the original version of the Work and any modifications or additions\n   to that Work or Derivative Works thereof, that is intentionally\n   submitted to Licensor for inclusion in the Work by the copyright owner\n   or by an individual or Legal Entity authorized to submit on behalf of\n   the copyright owner. For the purposes of this definition, \"submitted\"\n   means any form of electronic, verbal, or written communication sent\n   to the Licensor or its representatives, including but not limited to\n   communication on electronic mailing lists, source code control systems,\n   and issue tracking systems that are managed by, or on behalf of, the\n   Licensor for the purpose of discussing and improving the Work, but\n   excluding communication that is conspicuously marked or otherwise\n   designated in writing by the copyright owner as \"Not a Contribution.\"\n\n   \"Contributor\" shall mean Licensor and any individual or Legal Entity\n   on behalf of whom a Contribution has been received by Licensor and\n   subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   copyright license to reproduce, prepare Derivative Works of,\n   publicly display, publicly perform, sublicense, and distribute the\n   Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   (except as stated in this section) patent license to make, have made,\n   use, offer to sell, sell, import, and otherwise transfer the Work,\n   where such license applies only to those patent claims licensable\n   by such Contributor that are necessarily infringed by their\n   Contribution(s) alone or by combination of their Contribution(s)\n   with the Work to which such Contribution(s) was submitted. If You\n   institute patent litigation against any entity (including a\n   cross-claim or counterclaim in a lawsuit) alleging that the Work\n   or a Contribution incorporated within the Work constitutes direct\n   or contributory patent infringement, then any patent licenses\n   granted to You under this License for that Work shall terminate\n   as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n   Work or Derivative Works thereof in any medium, with or without\n   modifications, and in Source or Object form, provided that You\n   meet the following conditions:\n\n   (a) You must give any other recipients of the Work or\n       Derivative Works a copy of this License; and\n\n   (b) You must cause any modified files to carry prominent notices\n       stating that You changed the files; and\n\n   (c) You must retain, in the Source form of any Derivative Works\n       that You distribute, all copyright, patent, trademark, and\n       attribution notices from the Source form of the Work,\n       excluding those notices that do not pertain to any part of\n       the Derivative Works; and\n\n   (d) If the Work includes a \"NOTICE\" text file as part of its\n       distribution, then any Derivative Works that You distribute must\n       include a readable copy of the attribution notices contained\n       within such NOTICE file, excluding those notices that do not\n       pertain to any part of the Derivative Works, in at least one\n       of the following places: within a NOTICE text file distributed\n       as part of the Derivative Works; within the Source form or\n       documentation, if provided along with the Derivative Works; or,\n       within a display generated by the Derivative Works, if and\n       wherever such third-party notices normally appear. The contents\n       of the NOTICE file are for informational purposes only and\n       do not modify the License. You may add Your own attribution\n       notices within Derivative Works that You distribute, alongside\n       or as an addendum to the NOTICE text from the Work, provided\n       that such additional attribution notices cannot be construed\n       as modifying the License.\n\n   You may add Your own copyright statement to Your modifications and\n   may provide additional or different license terms and conditions\n   for use, reproduction, or distribution of Your modifications, or\n   for any such Derivative Works as a whole, provided Your use,\n   reproduction, and distribution of the Work otherwise complies with\n   the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n   any Contribution intentionally submitted for inclusion in the Work\n   by You to the Licensor shall be under the terms and conditions of\n   this License, without any additional terms or conditions.\n   Notwithstanding the above, nothing herein shall supersede or modify\n   the terms of any separate license agreement you may have executed\n   with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n   names, trademarks, service marks, or product names of the Licensor,\n   except as required for reasonable and customary use in describing the\n   origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n   agreed to in writing, Licensor provides the Work (and each\n   Contributor provides its Contributions) on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n   implied, including, without limitation, any warranties or conditions\n   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n   PARTICULAR PURPOSE. You are solely responsible for determining the\n   appropriateness of using or redistributing the Work and assume any\n   risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n   whether in tort (including negligence), contract, or otherwise,\n   unless required by applicable law (such as deliberate and grossly\n   negligent acts) or agreed to in writing, shall any Contributor be\n   liable to You for damages, including any direct, indirect, special,\n   incidental, or consequential damages of any character arising as a\n   result of this License or out of the use or inability to use the\n   Work (including but not limited to damages for loss of goodwill,\n   work stoppage, computer failure or malfunction, or any and all\n   other commercial damages or losses), even if such Contributor\n   has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n   the Work or Derivative Works thereof, You may choose to offer,\n   and charge a fee for, acceptance of support, warranty, indemnity,\n   or other liability obligations and/or rights consistent with this\n   License. However, in accepting such obligations, You may act only\n   on Your own behalf and on Your sole responsibility, not on behalf\n   of any other Contributor, and only if You agree to indemnify,\n   defend, and hold each Contributor harmless for any liability\n   incurred by, or claims asserted against, such Contributor by reason\n   of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n   To apply the Apache License to your work, attach the following\n   boilerplate notice, with the fields enclosed by brackets \"[]\"\n   replaced with your own identifying information. (Don't include\n   the brackets!)  The text should be enclosed in the appropriate\n   comment syntax for the file format. We also recommend that a\n   file or class name and description of purpose be included on the\n   same \"printed page\" as the copyright notice for easier\n   identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License."
  },
  {
    "path": "filecoin-proofs/LICENSE-MIT",
    "content": "Permission is hereby granted, free of charge, to any\nperson obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the\nSoftware without restriction, including without\nlimitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice\nshall be included in all copies or substantial portions\nof the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\nANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\nSHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\nIN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE."
  },
  {
    "path": "filecoin-proofs/README.md",
    "content": "# worlddatabase\n\n> The worlddatabase specific aspects of `storage-proofs`, including a C based WD, to generate and verify proofs.\n\n\n## License\n\nMIT or Apache 2.0\n"
  },
  {
    "path": "filecoin-proofs/benches/preprocessing.rs",
    "content": "use std::io::{Cursor, Read};\nuse std::time::Duration;\n\nuse criterion::{criterion_group, criterion_main, Criterion, ParameterizedBenchmark, Throughput};\nuse filecoin_proofs::{add_piece, PaddedBytesAmount, UnpaddedBytesAmount};\nuse fr32::Fr32Reader;\nuse rand::{thread_rng, Rng};\n\n#[cfg(feature = \"cpu-profile\")]\n#[inline(always)]\nfn start_profile(stage: &str) {\n    gperftools::profiler::PROFILER\n        .lock()\n        .expect(\"PROFILER poisoned\")\n        .start(format!(\"./{}.profile\", stage))\n        .expect(\"failed to start profiler\");\n}\n\n#[cfg(not(feature = \"cpu-profile\"))]\n#[inline(always)]\nfn start_profile(_stage: &str) {}\n\n#[cfg(feature = \"cpu-profile\")]\n#[inline(always)]\nfn stop_profile() {\n    gperftools::profiler::PROFILER\n        .lock()\n        .expect(\"PROFILER poisoned\")\n        .stop()\n        .expect(\"failed to start profiler\");\n}\n\n#[cfg(not(feature = \"cpu-profile\"))]\n#[inline(always)]\nfn stop_profile() {}\n\nfn random_data(size: usize) -> Vec<u8> {\n    let mut rng = thread_rng();\n    (0..size).map(|_| rng.gen()).collect()\n}\n\nfn preprocessing_benchmark(c: &mut Criterion) {\n    c.bench(\n        \"preprocessing\",\n        ParameterizedBenchmark::new(\n            \"write_padded\",\n            |b, size| {\n                let data = random_data(*size);\n                let mut buf = Vec::with_capacity(*size);\n\n                start_profile(&format!(\"write_padded_{}\", *size));\n                b.iter(|| {\n                    let mut reader = Fr32Reader::new(Cursor::new(&data));\n                    reader.read_to_end(&mut buf).expect(\"in memory read error\");\n                    assert!(buf.len() >= data.len());\n                    buf.clear();\n                });\n                stop_profile();\n            },\n            vec![128, 256, 512, 256_000, 512_000, 1_024_000, 2_048_000],\n        )\n        .sample_size(10)\n        .throughput(|s| Throughput::Bytes(*s as u64))\n        .warm_up_time(Duration::from_secs(1)),\n    );\n}\n\nfn add_piece_benchmark(c: &mut Criterion) {\n    c.bench(\n        \"preprocessing\",\n        ParameterizedBenchmark::new(\n            \"add_piece\",\n            |b, size| {\n                let padded_size = PaddedBytesAmount(*size as u64);\n                let unpadded_size: UnpaddedBytesAmount = padded_size.into();\n                let data = random_data(unpadded_size.0 as usize);\n                let mut buf = Vec::with_capacity(*size);\n\n                start_profile(&format!(\"add_piece_{}\", *size));\n                b.iter(|| {\n                    add_piece(\n                        Cursor::new(&data),\n                        &mut buf,\n                        unpadded_size,\n                        &[unpadded_size][..],\n                    )\n                    .unwrap();\n                    buf.clear();\n                });\n                stop_profile();\n            },\n            vec![512, 256 * 1024, 512 * 1024, 1024 * 1024, 2 * 1024 * 1024],\n        )\n        .sample_size(10)\n        .throughput(|s| Throughput::Bytes(*s as u64))\n        .warm_up_time(Duration::from_secs(1)),\n    );\n}\n\ncriterion_group!(benches, preprocessing_benchmark, add_piece_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "filecoin-proofs/build.rs",
    "content": "extern crate bindgen;\nextern crate cbindgen;\n\nuse std::env;\nuse std::fs::File;\nuse std::io::Write;\nuse std::path::PathBuf;\nuse std::process::Command;\n\nconst VERSION: &'static str = env!(\"CARGO_PKG_VERSION\");\n\nfn main() {\n    let crate_dir = std::env::var(\"CARGO_MANIFEST_DIR\").unwrap();\n    let out_path = PathBuf::from(env::var(\"OUT_DIR\").unwrap());\n    let target_path = out_path.join(\"../../..\");\n\n    let cfg = cbindgen::Config::from_root_or_default(std::path::Path::new(&crate_dir));\n\n    let c = cbindgen::Builder::new()\n        .with_config(cfg)\n        .with_crate(crate_dir)\n        .with_header(format!(\"/* libproofs Header Version {} */\", VERSION))\n        .with_language(cbindgen::Language::C)\n        .generate();\n\n    // This is needed to ensure we don't panic if there are errors in the crates code\n    // but rather just tell the rest of the system we can't proceed.\n    match c {\n        Ok(res) => {\n            res.write_to_file(target_path.join(\"libfilecoin_proofs.h\"));\n        }\n        Err(err) => {\n            eprintln!(\"unable to generate bindings: {:?}\", err);\n            std::process::exit(1);\n        }\n    }\n\n    let b = bindgen::builder()\n        .header(target_path.join(\"libfilecoin_proofs.h\").to_string_lossy())\n        // Here, we tell Rust to link libfilecoin_proofs so that auto-generated\n        // symbols are linked to symbols in the compiled dylib. For reasons\n        // unbeknown to me, the link attribute needs to precede an extern block.\n        .raw_line(\"#[link(name = \\\"filecoin_proofs\\\")]\\nextern \\\"C\\\" {}\")\n        .generate();\n\n    match b {\n        Ok(res) => {\n            res.write_to_file(out_path.join(\"libfilecoin_proofs.rs\"))\n                .expect(\"could not write file\");\n        }\n        Err(err) => {\n            eprintln!(\"unable to generate bindings: {:?}\", err);\n            std::process::exit(1);\n        }\n    }\n\n    let git_output = Command::new(\"git\")\n        .args(&[\"rev-parse\", \"HEAD\"])\n        .output()\n        .unwrap();\n    let git_hash = String::from_utf8(git_output.stdout).unwrap();\n\n    let libs = if cfg!(target_os = \"linux\") {\n        \"-lutil -lutil -ldl -lrt -lpthread -lgcc_s -lc -lm -lrt -lpthread -lutil -lutil\"\n    } else if cfg!(target_os = \"macos\") {\n        \"-framework Security -lSystem -lresolv -lc -lm\"\n    } else {\n        \"\"\n    };\n\n    let mut pc_file = File::create(target_path.join(\"libfilecoin_proofs.pc\"))\n        .expect(\"unable to generate .pc file: {:?}\");\n\n    write!(\n        pc_file,\n        \"Name: libfilecoin_proofs\nVersion: {version}\nDescription: rust-proofs library\nLibs: {libs}\n\",\n        version = git_hash.trim(),\n        libs = libs\n    )\n    .expect(\"unable to write to .pc file: {:?}\");\n}\n"
  },
  {
    "path": "filecoin-proofs/cbindgen.toml",
    "content": "[export]\n# A list of symbols to not include in the generated bindings\nexclude = [\"MerkleTree\"]\n\n[parse]\n# Whether to parse dependent crates and include their types in the generated\n# bindings\nparse_deps = true\n# A white list of crate names that are allowed to be parsed\ninclude = [\"sector_base\"]\n"
  },
  {
    "path": "filecoin-proofs/examples/beacon-post.rs",
    "content": "extern crate bellman;\nextern crate pairing;\nextern crate rand;\nextern crate sapling_crypto;\n#[macro_use]\nextern crate clap;\n#[cfg(feature = \"profile\")]\nextern crate gperftools;\nextern crate memmap;\nextern crate tempfile;\n#[macro_use]\nextern crate slog;\n\nextern crate filecoin_proofs;\nextern crate storage_proofs;\n\nuse clap::{App, Arg};\n#[cfg(feature = \"profile\")]\nuse gperftools::profiler::PROFILER;\nuse pairing::bls12_381::Bls12;\nuse rand::{Rng, SeedableRng, XorShiftRng};\nuse std::time::{Duration, Instant};\n\nuse filecoin_proofs::FCP_LOG;\nuse storage_proofs::beacon_post::*;\nuse storage_proofs::drgraph::*;\nuse storage_proofs::example_helper::prettyb;\nuse storage_proofs::fr32::fr_into_bytes;\nuse storage_proofs::hasher::pedersen::PedersenDomain;\nuse storage_proofs::hasher::PedersenHasher;\nuse storage_proofs::proof::ProofScheme;\nuse storage_proofs::{vdf_post, vdf_sloth};\n\n#[cfg(feature = \"profile\")]\n#[inline(always)]\nfn start_profile(stage: &str) {\n    PROFILER\n        .lock()\n        .unwrap()\n        .start(format!(\"./{}.profile\", stage))\n        .unwrap();\n}\n\n#[cfg(not(feature = \"profile\"))]\n#[inline(always)]\nfn start_profile(_stage: &str) {}\n\n#[cfg(feature = \"profile\")]\n#[inline(always)]\nfn stop_profile() {\n    PROFILER.lock().unwrap().stop().unwrap();\n}\n\n#[cfg(not(feature = \"profile\"))]\n#[inline(always)]\nfn stop_profile() {}\n\nfn do_the_work(\n    size: usize,\n    vdf: usize,\n    challenge_count: usize,\n    post_epochs: usize,\n    post_periods_count: usize,\n    sectors_count: usize,\n) {\n    let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n    info!(FCP_LOG, \"sector size: {}\", prettyb(size); \"target\" => \"config\");\n    info!(FCP_LOG, \"vdf: {}\", vdf; \"target\" => \"config\");\n    info!(FCP_LOG, \"challenge_count: {}\", challenge_count; \"target\" => \"config\");\n    info!(FCP_LOG, \"post_epochs: {}\", post_epochs; \"target\" => \"config\");\n    info!(FCP_LOG, \"post_periods_count: {:?}\", post_periods_count; \"target\" => \"config\");\n    info!(FCP_LOG, \"sectors_count: {:?}\", sectors_count; \"target\" => \"config\");\n\n    info!(FCP_LOG, \"generating fake data\"; \"target\" => \"status\");\n\n    let nodes_size = size / 32;\n\n    let data: Vec<Vec<u8>> = (0..sectors_count)\n        .map(|_| {\n            (0..nodes_size)\n                .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n                .collect()\n        })\n        .collect();\n\n    let graphs: Vec<_> = (0..sectors_count)\n        .map(|_| BucketGraph::<PedersenHasher>::new(nodes_size, 5, 0, new_seed()))\n        .collect();\n\n    let trees: Vec<_> = graphs\n        .iter()\n        .zip(data.iter())\n        .map(|(graph, data)| graph.merkle_tree(data.as_slice()).unwrap())\n        .collect();\n\n    let sp = SetupParams::<PedersenDomain, vdf_sloth::Sloth> {\n        vdf_post_setup_params: vdf_post::SetupParams::<PedersenDomain, vdf_sloth::Sloth> {\n            challenge_count,\n            sector_size: size,\n            post_epochs,\n            setup_params_vdf: vdf_sloth::SetupParams {\n                key: rng.gen(),\n                rounds: vdf,\n            },\n            sectors_count,\n        },\n        post_periods_count,\n    };\n\n    info!(FCP_LOG, \"running setup\");\n    start_profile(\"setup\");\n    let pub_params = BeaconPoSt::<PedersenHasher, vdf_sloth::Sloth>::setup(&sp).unwrap();\n    stop_profile();\n\n    let pub_inputs = PublicInputs {\n        commitments: trees.iter().map(|t| t.root()).collect(),\n    };\n\n    let trees_ref: Vec<_> = trees.iter().collect();\n    let replicas: Vec<&[u8]> = data.iter().map(|d| &d[..]).collect();\n\n    let priv_inputs = PrivateInputs::<PedersenHasher>::new(&replicas, &trees_ref[..]);\n\n    let mut total_proving = Duration::new(0, 0);\n    info!(FCP_LOG, \"generating proofs\");\n\n    let start = Instant::now();\n    start_profile(\"prove\");\n    let proof = BeaconPoSt::prove(&pub_params, &pub_inputs, &priv_inputs).unwrap();\n    stop_profile();\n\n    total_proving += start.elapsed();\n\n    let proving_avg = total_proving;\n    let proving_avg =\n        f64::from(proving_avg.subsec_nanos()) / 1_000_000_000f64 + (proving_avg.as_secs() as f64);\n\n    info!(FCP_LOG, \"proving_time: {:?} seconds\", proving_avg; \"target\" => \"stats\");\n\n    let samples: u32 = 5;\n    info!(FCP_LOG, \"sampling verifying (samples: {})\", samples);\n    let mut total_verifying = Duration::new(0, 0);\n\n    start_profile(\"verify\");\n    for _ in 0..samples {\n        let start = Instant::now();\n        let verified = BeaconPoSt::verify(&pub_params, &pub_inputs, &proof).unwrap();\n\n        if !verified {\n            info!(FCP_LOG, \"Verification failed.\"; \"target\" => \"results\");\n        };\n        total_verifying += start.elapsed();\n    }\n    info!(FCP_LOG, \"Verification complete\"; \"target\" => \"status\");\n    stop_profile();\n\n    let verifying_avg = total_verifying / samples;\n    let verifying_avg = f64::from(verifying_avg.subsec_nanos()) / 1_000_000_000f64\n        + (verifying_avg.as_secs() as f64);\n    info!(FCP_LOG, \"average_verifying_time: {:?} seconds\", verifying_avg; \"target\" => \"stats\");\n}\n\nfn main() {\n    let matches = App::new(stringify!(\"DrgPoRep Vanilla Bench\"))\n        .version(\"1.0\")\n        .arg(\n            Arg::with_name(\"size\")\n                .required(true)\n                .long(\"size\")\n                .help(\"The data size of a sector in KB\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"vdf\")\n                .help(\"The number of sloth iterations\")\n                .long(\"vdf\")\n                .default_value(\"10\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"challenges\")\n                .long(\"challenges\")\n                .help(\"How many challenges to execute\")\n                .default_value(\"1\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"post-epochs\")\n                .long(\"post-epochs\")\n                .help(\"How many epochs should the PoSt run for\")\n                .default_value(\"10\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"post-periods-count\")\n                .long(\"post-periods-count\")\n                .help(\"How many PoSt periods should there be\")\n                .default_value(\"10\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"sectors\")\n                .long(\"sectors\")\n                .help(\"How many sector are being proven\")\n                .default_value(\"5\")\n                .takes_value(true),\n        )\n        .get_matches();\n\n    let size = value_t!(matches, \"size\", usize).unwrap() * 1024;\n    let vdf = value_t!(matches, \"vdf\", usize).unwrap();\n    let challenge_count = value_t!(matches, \"challenges\", usize).unwrap();\n    let post_epochs = value_t!(matches, \"post-epochs\", usize).unwrap();\n    let post_periods_count = value_t!(matches, \"post-periods-count\", usize).unwrap();\n    let sectors_count = value_t!(matches, \"sectors\", usize).unwrap();\n\n    do_the_work(\n        size,\n        vdf,\n        challenge_count,\n        post_epochs,\n        post_periods_count,\n        sectors_count,\n    );\n}\n"
  },
  {
    "path": "filecoin-proofs/examples/drgporep-vanilla-disk.rs",
    "content": "extern crate bellman;\nextern crate pairing;\nextern crate rand;\nextern crate sapling_crypto;\n#[macro_use]\nextern crate clap;\nextern crate memmap;\nextern crate tempfile;\n\n#[macro_use]\nextern crate slog;\n\nextern crate filecoin_proofs;\nextern crate storage_proofs;\n\nuse clap::{App, Arg};\nuse pairing::bls12_381::{Bls12, Fr};\nuse rand::{Rng, SeedableRng, XorShiftRng};\nuse std::time::{Duration, Instant};\n\nuse storage_proofs::drgporep::*;\nuse storage_proofs::drgraph::*;\nuse storage_proofs::example_helper::prettyb;\nuse storage_proofs::fr32::fr_into_bytes;\nuse storage_proofs::hasher::{Blake2sHasher, Hasher, PedersenHasher, Sha256Hasher};\nuse storage_proofs::porep::PoRep;\nuse storage_proofs::proof::ProofScheme;\n\nuse memmap::MmapMut;\nuse memmap::MmapOptions;\nuse std::fs::File;\nuse std::io::Write;\n\nuse filecoin_proofs::FCP_LOG;\n\nfn file_backed_mmap_from_random_bytes(n: usize) -> MmapMut {\n    let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n    let mut tmpfile: File = tempfile::tempfile().unwrap();\n\n    for _ in 0..n {\n        tmpfile\n            .write_all(&fr_into_bytes::<Bls12>(&rng.gen()))\n            .unwrap();\n    }\n\n    unsafe { MmapOptions::new().map_mut(&tmpfile).unwrap() }\n}\n\nfn do_the_work<H: Hasher>(data_size: usize, m: usize, sloth_iter: usize, challenge_count: usize) {\n    let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n    let challenges = vec![2; challenge_count];\n\n    info!(FCP_LOG, \"data_size:  {}\", prettyb(data_size); \"target\" => \"config\");\n    info!(FCP_LOG, \"challenge_count: {}\", challenge_count; \"target\" => \"config\");\n    info!(FCP_LOG, \"m: {}\", m; \"target\" => \"config\");\n    info!(FCP_LOG, \"sloth: {}\", sloth_iter; \"target\" => \"config\");\n\n    info!(FCP_LOG, \"generating fake data\");\n\n    let nodes = data_size / 32;\n\n    let replica_id: Fr = rng.gen();\n\n    let mut mmapped = file_backed_mmap_from_random_bytes(nodes);\n\n    let sp = SetupParams {\n        drg: DrgParams {\n            nodes,\n            degree: m,\n            expansion_degree: 0,\n            seed: new_seed(),\n        },\n        sloth_iter,\n    };\n\n    info!(FCP_LOG, \"running setup\");\n    let pp = DrgPoRep::<H, BucketGraph<_>>::setup(&sp).unwrap();\n\n    let start = Instant::now();\n    let mut param_duration = Duration::new(0, 0);\n\n    info!(FCP_LOG, \"running replicate\");\n    let (tau, aux) =\n        DrgPoRep::<H, _>::replicate(&pp, &replica_id.into(), &mut mmapped, None).unwrap();\n\n    let pub_inputs = PublicInputs::<H::Domain> {\n        replica_id: replica_id.into(),\n        challenges,\n        tau: Some(tau),\n    };\n\n    let priv_inputs = PrivateInputs::<H> { aux: &aux };\n\n    param_duration += start.elapsed();\n    let samples: u32 = 30;\n\n    let mut total_proving = Duration::new(0, 0);\n    let mut total_verifying = Duration::new(0, 0);\n\n    let mut proofs = Vec::with_capacity(samples as usize);\n    info!(\n        FCP_LOG,\n        \"sampling proving & verifying (samples: {})\", samples\n    );\n    for _ in 0..samples {\n        let start = Instant::now();\n        let proof =\n            DrgPoRep::<H, _>::prove(&pp, &pub_inputs, &priv_inputs).expect(\"failed to prove\");\n        total_proving += start.elapsed();\n\n        let start = Instant::now();\n        DrgPoRep::<H, _>::verify(&pp, &pub_inputs, &proof).expect(\"failed to verify\");\n        total_verifying += start.elapsed();\n        proofs.push(proof);\n    }\n\n    // -- print statistics\n\n    let serialized_proofs = proofs.iter().fold(Vec::new(), |mut acc, p| {\n        acc.extend(p.serialize());\n        acc\n    });\n    let avg_proof_size = serialized_proofs.len() / samples as usize;\n\n    let proving_avg = total_proving / samples;\n    let proving_avg =\n        f64::from(proving_avg.subsec_nanos()) / 1_000_000_000f64 + (proving_avg.as_secs() as f64);\n\n    let verifying_avg = total_verifying / samples;\n    let verifying_avg = f64::from(verifying_avg.subsec_nanos()) / 1_000_000_000f64\n        + (verifying_avg.as_secs() as f64);\n\n    info!(FCP_LOG, \"avg_proving_time: {:?} seconds\", proving_avg; \"target\" => \"stats\");\n    info!(FCP_LOG, \"avg_verifying_time: {:?} seconds\", verifying_avg; \"target\" => \"stats\");\n    info!(FCP_LOG, \"replication_time={:?}\", param_duration; \"target\" => \"stats\");\n    info!(FCP_LOG, \"avg_proof_size: {}\", prettyb(avg_proof_size); \"target\" => \"stats\");\n}\n\nfn main() {\n    let matches = App::new(stringify!(\"DrgPoRep Vanilla Bench\"))\n        .version(\"1.0\")\n        .arg(\n            Arg::with_name(\"size\")\n                .required(true)\n                .long(\"size\")\n                .help(\"The data size in KB\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"m\")\n                .help(\"The size of m\")\n                .long(\"m\")\n                .default_value(\"6\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"sloth\")\n                .help(\"The number of sloth iterations, defaults to 1\")\n                .long(\"sloth\")\n                .default_value(\"1\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"challenges\")\n                .long(\"challenges\")\n                .help(\"How many challenges to execute, defaults to 1\")\n                .default_value(\"1\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"hasher\")\n                .long(\"hasher\")\n                .help(\"Which hasher should be used.Available: \\\"pedersen\\\", \\\"sha256\\\", \\\"blake2s\\\" (default \\\"pedersen\\\")\")\n                .default_value(\"pedersen\")\n                .takes_value(true),\n        )\n        .get_matches();\n\n    let data_size = value_t!(matches, \"size\", usize).unwrap() * 1024;\n    let m = value_t!(matches, \"m\", usize).unwrap();\n    let sloth_iter = value_t!(matches, \"sloth\", usize).unwrap();\n    let challenge_count = value_t!(matches, \"challenges\", usize).unwrap();\n\n    let hasher = value_t!(matches, \"hasher\", String).unwrap();\n    info!(FCP_LOG, \"hasher: {}\", hasher; \"target\" => \"config\");\n    match hasher.as_ref() {\n        \"pedersen\" => {\n            do_the_work::<PedersenHasher>(data_size, m, sloth_iter, challenge_count);\n        }\n        \"sha256\" => {\n            do_the_work::<Sha256Hasher>(data_size, m, sloth_iter, challenge_count);\n        }\n        \"blake2s\" => {\n            do_the_work::<Blake2sHasher>(data_size, m, sloth_iter, challenge_count);\n        }\n        _ => panic!(format!(\"invalid hasher: {}\", hasher)),\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/examples/drgporep-vanilla.rs",
    "content": "extern crate bellman;\nextern crate pairing;\nextern crate rand;\nextern crate sapling_crypto;\n#[macro_use]\nextern crate clap;\n#[cfg(feature = \"profile\")]\nextern crate gperftools;\n#[macro_use]\nextern crate slog;\n\nextern crate filecoin_proofs;\nextern crate storage_proofs;\n\nuse clap::{App, Arg};\nuse pairing::bls12_381::Bls12;\nuse rand::{Rng, SeedableRng, XorShiftRng};\nuse std::time::{Duration, Instant};\n\n#[cfg(feature = \"profile\")]\nuse gperftools::profiler::PROFILER;\n\nuse storage_proofs::drgporep::*;\nuse storage_proofs::drgraph::*;\nuse storage_proofs::example_helper::prettyb;\nuse storage_proofs::fr32::fr_into_bytes;\nuse storage_proofs::hasher::{Blake2sHasher, Hasher, PedersenHasher, Sha256Hasher};\nuse storage_proofs::porep::PoRep;\nuse storage_proofs::proof::ProofScheme;\n\nuse filecoin_proofs::FCP_LOG;\n\n#[cfg(feature = \"profile\")]\n#[inline(always)]\nfn start_profile(stage: &str) {\n    PROFILER\n        .lock()\n        .unwrap()\n        .start(format!(\"./{}.profile\", stage))\n        .unwrap();\n}\n\n#[cfg(not(feature = \"profile\"))]\n#[inline(always)]\nfn start_profile(_stage: &str) {}\n\n#[cfg(feature = \"profile\")]\n#[inline(always)]\nfn stop_profile() {\n    PROFILER.lock().unwrap().stop().unwrap();\n}\n\n#[cfg(not(feature = \"profile\"))]\n#[inline(always)]\nfn stop_profile() {}\n\nfn do_the_work<H: Hasher>(data_size: usize, m: usize, sloth_iter: usize, challenge_count: usize) {\n    let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n    let challenges = vec![2; challenge_count];\n\n    info!(FCP_LOG, \"data_size:  {}\", prettyb(data_size); \"target\" => \"stats\");\n    info!(FCP_LOG, \"challenge_count: {}\", challenge_count; \"target\" => \"stats\");\n    info!(FCP_LOG, \"m: {}\", m; \"target\" => \"stats\");\n    info!(FCP_LOG, \"sloth: {}\", sloth_iter; \"target\" => \"stats\");\n\n    info!(FCP_LOG, \"generating fake data\");\n\n    let nodes = data_size / 32;\n\n    let replica_id: H::Domain = rng.gen();\n    let mut data: Vec<u8> = (0..nodes)\n        .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n        .collect();\n\n    let sp = SetupParams {\n        drg: DrgParams {\n            nodes,\n            degree: m,\n            expansion_degree: 0,\n            seed: new_seed(),\n        },\n        sloth_iter,\n    };\n\n    info!(FCP_LOG, \"running setup\");\n    start_profile(\"setup\");\n    let pp = DrgPoRep::<H, BucketGraph<H>>::setup(&sp).unwrap();\n    stop_profile();\n\n    let start = Instant::now();\n    let mut param_duration = Duration::new(0, 0);\n\n    info!(FCP_LOG, \"running replicate\");\n\n    start_profile(\"replicate\");\n    let (tau, aux) =\n        DrgPoRep::<H, _>::replicate(&pp, &replica_id, data.as_mut_slice(), None).unwrap();\n    stop_profile();\n    let pub_inputs = PublicInputs {\n        replica_id,\n        challenges,\n        tau: Some(tau),\n    };\n\n    let priv_inputs = PrivateInputs::<H> { aux: &aux };\n\n    param_duration += start.elapsed();\n    let samples: u32 = 30;\n\n    let mut total_proving = Duration::new(0, 0);\n    let mut total_verifying = Duration::new(0, 0);\n\n    let mut proofs = Vec::with_capacity(samples as usize);\n    info!(\n        FCP_LOG,\n        \"sampling proving & verifying (samples: {})\", samples\n    );\n    for _ in 0..samples {\n        let start = Instant::now();\n        start_profile(\"prove\");\n        let proof =\n            DrgPoRep::<H, _>::prove(&pp, &pub_inputs, &priv_inputs).expect(\"failed to prove\");\n        stop_profile();\n        total_proving += start.elapsed();\n\n        let start = Instant::now();\n        start_profile(\"verify\");\n        DrgPoRep::<H, _>::verify(&pp, &pub_inputs, &proof).expect(\"failed to verify\");\n        stop_profile();\n        total_verifying += start.elapsed();\n        proofs.push(proof);\n    }\n\n    // -- print statistics\n\n    let serialized_proofs = proofs.iter().fold(Vec::new(), |mut acc, p| {\n        acc.extend(p.serialize());\n        acc\n    });\n    let avg_proof_size = serialized_proofs.len() / samples as usize;\n\n    let proving_avg = total_proving / samples;\n    let proving_avg =\n        f64::from(proving_avg.subsec_nanos()) / 1_000_000_000f64 + (proving_avg.as_secs() as f64);\n\n    let verifying_avg = total_verifying / samples;\n    let verifying_avg = f64::from(verifying_avg.subsec_nanos()) / 1_000_000_000f64\n        + (verifying_avg.as_secs() as f64);\n\n    info!(FCP_LOG, \"avg_proving_time: {:?} seconds\", proving_avg; \"target\" => \"stats\");\n    info!(FCP_LOG, \"avg_verifying_time: {:?} seconds\", verifying_avg; \"target\" => \"stats\");\n    info!(FCP_LOG, \"replication_time: {:?}\", param_duration; \"target\" => \"stats\");\n    info!(FCP_LOG, \"avg_proof_size: {}\", prettyb(avg_proof_size); \"target\" => \"stats\");\n}\n\nfn main() {\n    let matches = App::new(stringify!(\"DrgPoRep Vanilla Bench\"))\n        .version(\"1.0\")\n        .arg(\n            Arg::with_name(\"size\")\n                .required(true)\n                .long(\"size\")\n                .help(\"The data size in KB\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"m\")\n                .help(\"The size of m\")\n                .long(\"m\")\n                .default_value(\"6\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"sloth\")\n                .help(\"The number of sloth iterations, defaults to 1\")\n                .long(\"sloth\")\n                .default_value(\"1\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"challenges\")\n                .long(\"challenges\")\n                .help(\"How many challenges to execute, defaults to 1\")\n                .default_value(\"1\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"hasher\")\n                .long(\"hasher\")\n                .help(\"Which hasher should be used.Available: \\\"pedersen\\\", \\\"sha256\\\", \\\"blake2s\\\" (default \\\"pedersen\\\")\")\n                .default_value(\"pedersen\")\n                .takes_value(true),\n        )\n        .get_matches();\n\n    let data_size = value_t!(matches, \"size\", usize).unwrap() * 1024;\n    let m = value_t!(matches, \"m\", usize).unwrap();\n    let sloth_iter = value_t!(matches, \"sloth\", usize).unwrap();\n    let challenge_count = value_t!(matches, \"challenges\", usize).unwrap();\n    let hasher = value_t!(matches, \"hasher\", String).unwrap();\n    info!(FCP_LOG, \"hasher: {}\", hasher; \"target\" => \"config\");\n    match hasher.as_ref() {\n        \"pedersen\" => {\n            do_the_work::<PedersenHasher>(data_size, m, sloth_iter, challenge_count);\n        }\n        \"sha256\" => {\n            do_the_work::<Sha256Hasher>(data_size, m, sloth_iter, challenge_count);\n        }\n        \"blake2s\" => {\n            do_the_work::<Blake2sHasher>(data_size, m, sloth_iter, challenge_count);\n        }\n        _ => panic!(format!(\"invalid hasher: {}\", hasher)),\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/examples/drgporep.rs",
    "content": "extern crate bellman;\nextern crate pairing;\nextern crate pbr;\nextern crate rand;\nextern crate sapling_crypto;\n\nextern crate storage_proofs;\n\nuse bellman::groth16::*;\nuse bellman::{Circuit, ConstraintSystem, SynthesisError};\nuse pairing::bls12_381::Bls12;\nuse rand::Rng;\nuse sapling_crypto::jubjub::{JubjubBls12, JubjubEngine};\n\nuse storage_proofs::circuit;\nuse storage_proofs::circuit::variables::Root;\n\nuse storage_proofs::example_helper::Example;\nuse storage_proofs::test_helper::fake_drgpoprep_proof;\n\nstruct DrgPoRepExample<'a, E: JubjubEngine> {\n    params: &'a E::Params,\n    sloth_iter: usize,\n    replica_nodes: Vec<Option<E::Fr>>,\n    replica_nodes_paths: Vec<Vec<Option<(E::Fr, bool)>>>,\n    replica_root: Root<E>,\n    replica_parents: Vec<Vec<Option<E::Fr>>>,\n    replica_parents_paths: Vec<Vec<Vec<Option<(E::Fr, bool)>>>>,\n    data_nodes: Vec<Option<E::Fr>>,\n    data_nodes_paths: Vec<Vec<Option<(E::Fr, bool)>>>,\n    data_root: Root<E>,\n    replica_id: Option<E::Fr>,\n    m: usize,\n}\n\nimpl<'a> Circuit<Bls12> for DrgPoRepExample<'a, Bls12> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        circuit::drgporep::DrgPoRepCircuit::synthesize(\n            cs.namespace(|| \"drgporep\"),\n            self.params,\n            self.sloth_iter,\n            self.replica_nodes,\n            self.replica_nodes_paths,\n            self.replica_root,\n            self.replica_parents,\n            self.replica_parents_paths,\n            self.data_nodes,\n            self.data_nodes_paths,\n            self.data_root,\n            self.replica_id,\n            self.m,\n            false,\n        )\n    }\n}\n\n#[derive(Default)]\nstruct DrgPoRepApp {}\n\nconst SLOTH_ROUNDS: usize = 1;\n\nimpl<'a> Example<'a, DrgPoRepExample<'a, Bls12>> for DrgPoRepApp {\n    fn name() -> String {\n        \"DrgPoRep\".to_string()\n    }\n\n    fn generate_groth_params<R: Rng>(\n        &mut self,\n        rng: &mut R,\n        jubjub_params: &'a JubjubBls12,\n        tree_depth: usize,\n        challenge_count: usize,\n        m: usize,\n        sloth_iter: usize,\n    ) -> Parameters<Bls12> {\n        generate_random_parameters::<Bls12, _, _>(\n            DrgPoRepExample {\n                params: jubjub_params,\n                sloth_iter,\n                replica_nodes: vec![None; challenge_count],\n                replica_nodes_paths: vec![vec![None; tree_depth]; challenge_count],\n                replica_root: Root::Val(None),\n                replica_parents: vec![vec![None; m]; challenge_count],\n                replica_parents_paths: vec![vec![vec![None; tree_depth]; m]; challenge_count],\n                data_nodes: vec![None; challenge_count],\n                data_nodes_paths: vec![vec![None; tree_depth]; challenge_count],\n                data_root: Root::Val(None),\n                replica_id: None,\n                m,\n            },\n            rng,\n        )\n        .unwrap()\n    }\n\n    fn samples() -> usize {\n        5\n    }\n\n    fn create_circuit<R: Rng>(\n        &mut self,\n        rng: &mut R,\n        engine_params: &'a JubjubBls12,\n        tree_depth: usize,\n        challenge_count: usize,\n        _leaves: usize,\n        m: usize,\n        sloth_iter: usize,\n    ) -> DrgPoRepExample<'a, Bls12> {\n        let f = fake_drgpoprep_proof(rng, tree_depth, m, SLOTH_ROUNDS, challenge_count);\n\n        // create an instance of our circut (with the witness)\n        DrgPoRepExample {\n            params: engine_params,\n            sloth_iter,\n            replica_nodes: f.replica_nodes.into_iter().map(|r| Some(r)).collect(),\n            replica_nodes_paths: f.replica_nodes_paths,\n            replica_root: Root::Val(Some(f.replica_root)),\n            replica_parents: f\n                .replica_parents\n                .iter()\n                .map(|parents| parents.iter().map(|parent| Some(*parent)).collect())\n                .collect(),\n            replica_parents_paths: f.replica_parents_paths,\n            data_nodes: f.data_nodes.into_iter().map(|d| Some(d)).collect(),\n            data_nodes_paths: f.data_nodes_paths,\n            data_root: Root::Val(Some(f.data_root)),\n            replica_id: Some(f.replica_id),\n            m,\n        }\n    }\n\n    fn verify_proof(\n        &mut self,\n        _proof: &Proof<Bls12>,\n        _pvk: &PreparedVerifyingKey<Bls12>,\n    ) -> Option<bool> {\n        // not implemented yet\n        None\n    }\n}\n\nfn main() {\n    DrgPoRepApp::main()\n}\n"
  },
  {
    "path": "filecoin-proofs/examples/encoding.rs",
    "content": "extern crate bellman;\nextern crate pairing;\nextern crate rand;\nextern crate sapling_crypto;\n#[macro_use]\nextern crate clap;\n#[cfg(feature = \"profile\")]\nextern crate gperftools;\nextern crate memmap;\nextern crate tempfile;\n#[macro_use]\nextern crate slog;\n\nextern crate filecoin_proofs;\nextern crate storage_proofs;\n\nuse clap::{App, Arg};\n#[cfg(feature = \"profile\")]\nuse gperftools::profiler::PROFILER;\nuse memmap::MmapMut;\nuse memmap::MmapOptions;\nuse pairing::bls12_381::Bls12;\nuse rand::{Rng, SeedableRng, XorShiftRng};\nuse std::fs::File;\nuse std::io::Write;\nuse std::time::Instant;\n\nuse storage_proofs::drgporep;\nuse storage_proofs::drgraph::*;\nuse storage_proofs::example_helper::prettyb;\nuse storage_proofs::fr32::fr_into_bytes;\nuse storage_proofs::hasher::{Hasher, PedersenHasher};\nuse storage_proofs::layered_drgporep;\nuse storage_proofs::proof::ProofScheme;\nuse storage_proofs::vde;\nuse storage_proofs::zigzag_drgporep::*;\n\nuse filecoin_proofs::FCP_LOG;\n\n#[cfg(feature = \"profile\")]\n#[inline(always)]\nfn start_profile(stage: &str) {\n    PROFILER\n        .lock()\n        .unwrap()\n        .start(format!(\"./{}.profile\", stage))\n        .unwrap();\n}\n\n#[cfg(not(feature = \"profile\"))]\n#[inline(always)]\nfn start_profile(_stage: &str) {}\n\n#[cfg(feature = \"profile\")]\n#[inline(always)]\nfn stop_profile() {\n    PROFILER.lock().unwrap().stop().unwrap();\n}\n\n#[cfg(not(feature = \"profile\"))]\n#[inline(always)]\nfn stop_profile() {}\n\nfn file_backed_mmap_from_random_bytes(n: usize) -> MmapMut {\n    let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n    let mut tmpfile: File = tempfile::tempfile().unwrap();\n\n    for _ in 0..n {\n        tmpfile\n            .write_all(&fr_into_bytes::<Bls12>(&rng.gen()))\n            .unwrap();\n    }\n\n    unsafe { MmapOptions::new().map_mut(&tmpfile).unwrap() }\n}\n\npub fn file_backed_mmap_from(data: &[u8]) -> MmapMut {\n    let mut tmpfile: File = tempfile::tempfile().unwrap();\n    tmpfile.write_all(data).unwrap();\n\n    unsafe { MmapOptions::new().map_mut(&tmpfile).unwrap() }\n}\n\nfn do_the_work<H: 'static>(data_size: usize, m: usize, expansion_degree: usize, sloth_iter: usize)\nwhere\n    H: Hasher,\n{\n    let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n    info!(FCP_LOG, \"data size: {}\", prettyb(data_size); \"target\" => \"config\");\n    info!(FCP_LOG, \"m: {}\", m; \"target\" => \"config\");\n    info!(FCP_LOG, \"expansion_degree: {}\", expansion_degree; \"target\" => \"config\");\n    info!(FCP_LOG, \"sloth: {}\", sloth_iter; \"target\" => \"config\");\n    info!(FCP_LOG, \"generating fake data\"; \"target\" => \"status\");\n\n    let nodes = data_size / 32;\n\n    let mut data = file_backed_mmap_from_random_bytes(nodes);\n\n    let replica_id: H::Domain = rng.gen();\n\n    let sp = layered_drgporep::SetupParams {\n        drg_porep_setup_params: drgporep::SetupParams {\n            drg: drgporep::DrgParams {\n                nodes,\n                degree: m,\n                expansion_degree,\n                seed: new_seed(),\n            },\n            sloth_iter,\n        },\n        layers: 1,\n        challenge_count: 1,\n    };\n\n    info!(FCP_LOG, \"running setup\");\n    start_profile(\"setup\");\n    let pp = ZigZagDrgPoRep::<H>::setup(&sp).unwrap();\n    let drgpp = pp.drg_porep_public_params;\n    stop_profile();\n\n    let start = Instant::now();\n\n    info!(FCP_LOG, \"encoding\");\n\n    start_profile(\"encode\");\n    vde::encode(&drgpp.graph, drgpp.sloth_iter, &replica_id, &mut data).unwrap();\n    stop_profile();\n\n    let encoding_time = start.elapsed();\n    info!(FCP_LOG, \"encoding_time: {:?}\", encoding_time; \"target\" => \"stats\");\n\n    info!(\n        FCP_LOG,\n        \"encoding time/byte: {:?}\",\n        encoding_time / data_size as u32; \"target\" => \"stats\"\n    );\n    info!(\n        FCP_LOG,\n        \"encoding time/GiB: {:?}\",\n        (1 << 30) * encoding_time / data_size as u32; \"target\" => \"stats\"\n    );\n}\n\nfn main() {\n    let matches = App::new(stringify!(\"DrgPoRep Vanilla Bench\"))\n        .version(\"1.0\")\n        .arg(\n            Arg::with_name(\"size\")\n                .required(true)\n                .long(\"size\")\n                .help(\"The data size in KB\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"m\")\n                .help(\"The size of m\")\n                .long(\"m\")\n                .default_value(\"5\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"exp\")\n                .help(\"Expansion degree\")\n                .long(\"expansion\")\n                .default_value(\"6\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"sloth\")\n                .help(\"The number of sloth iterations\")\n                .long(\"sloth\")\n                .default_value(\"0\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"layers\")\n                .long(\"layers\")\n                .help(\"How many layers to use\")\n                .default_value(\"10\")\n                .takes_value(true),\n        )\n        .get_matches();\n\n    let data_size = value_t!(matches, \"size\", usize).unwrap() * 1024;\n    let m = value_t!(matches, \"m\", usize).unwrap();\n    let expansion_degree = value_t!(matches, \"exp\", usize).unwrap();\n    let sloth_iter = value_t!(matches, \"sloth\", usize).unwrap();\n\n    do_the_work::<PedersenHasher>(data_size, m, expansion_degree, sloth_iter);\n}\n"
  },
  {
    "path": "filecoin-proofs/examples/ffi/.gitignore",
    "content": "libfilecoin_proofs.rs\n"
  },
  {
    "path": "filecoin-proofs/examples/ffi/main.rs",
    "content": "#![allow(non_upper_case_globals)]\n#![allow(non_camel_case_types)]\n#![allow(non_snake_case)]\n\nextern crate ffi_toolkit;\nextern crate libc;\nextern crate rand;\nextern crate tempfile;\n#[macro_use(defer)]\nextern crate scopeguard;\nextern crate sector_base;\n\ninclude!(concat!(env!(\"OUT_DIR\"), \"/libfilecoin_proofs.rs\"));\n\nuse ffi_toolkit::c_str_to_rust_str;\nuse ffi_toolkit::free_c_str;\nuse ffi_toolkit::rust_str_to_c_str;\nuse rand::{thread_rng, Rng};\nuse std::error::Error;\nuse std::ptr;\nuse std::sync::atomic::AtomicPtr;\nuse std::sync::mpsc;\nuse std::thread;\nuse std::time::Duration;\nuse tempfile::TempDir;\n\n///////////////////////////////////////////////////////////////////////////////\n// SectorBuilder lifecycle test\n///////////////////////////////\n\nfn make_piece(num_bytes_in_piece: usize) -> (String, Vec<u8>) {\n    let mut rng = thread_rng();\n    let bytes = (0..num_bytes_in_piece).map(|_| rng.gen()).collect();\n    let key = (0..16)\n        .map(|_| (0x20u8 + (rand::random::<f32>() * 96.0) as u8) as char)\n        .collect();\n    (key, bytes)\n}\n\nunsafe fn create_and_add_piece(\n    sector_builder: *mut SectorBuilder,\n    num_bytes_in_piece: usize,\n) -> (Vec<u8>, String, *mut AddPieceResponse) {\n    let (piece_key, piece_bytes) = make_piece(num_bytes_in_piece);\n\n    let c_piece_key = rust_str_to_c_str(piece_key.clone());\n    defer!(free_c_str(c_piece_key));\n\n    (\n        piece_bytes.clone(),\n        piece_key.clone(),\n        add_piece(\n            sector_builder,\n            c_piece_key,\n            &piece_bytes[0],\n            piece_bytes.len(),\n        ),\n    )\n}\n\nunsafe fn create_sector_builder(\n    metadata_dir: &TempDir,\n    staging_dir: &TempDir,\n    sealed_dir: &TempDir,\n    prover_id: [u8; 31],\n    last_committed_sector_id: u64,\n) -> (*mut SectorBuilder, u64) {\n    let mut prover_id: [u8; 31] = prover_id;\n    let sector_store_config: ConfiguredStore = ConfiguredStore_ProofTest;\n\n    let c_metadata_dir = rust_str_to_c_str(metadata_dir.path().to_str().unwrap());\n    let c_sealed_dir = rust_str_to_c_str(sealed_dir.path().to_str().unwrap());\n    let c_staging_dir = rust_str_to_c_str(staging_dir.path().to_str().unwrap());\n\n    defer!({\n        free_c_str(c_metadata_dir);\n        free_c_str(c_sealed_dir);\n        free_c_str(c_staging_dir);\n    });\n\n    let resp = init_sector_builder(\n        &sector_store_config,\n        last_committed_sector_id,\n        c_metadata_dir,\n        &mut prover_id,\n        c_sealed_dir,\n        c_staging_dir,\n        2,\n    );\n    defer!(destroy_init_sector_builder_response(resp));\n\n    if (*resp).status_code != 0 {\n        panic!(\"{}\", c_str_to_rust_str((*resp).error_msg))\n    }\n\n    let resp_2 = get_max_user_bytes_per_staged_sector((*resp).sector_builder);\n    defer!(destroy_get_max_user_bytes_per_staged_sector_response(\n        resp_2\n    ));\n\n    (\n        (*resp).sector_builder,\n        (*resp_2).max_staged_bytes_per_sector,\n    )\n}\n\nunsafe fn sector_builder_lifecycle() -> Result<(), Box<Error>> {\n    let metadata_dir = tempfile::tempdir().unwrap();\n    let staging_dir = tempfile::tempdir().unwrap();\n    let sealed_dir = tempfile::tempdir().unwrap();\n\n    let (sector_builder_a, max_bytes) =\n        create_sector_builder(&metadata_dir, &staging_dir, &sealed_dir, [0; 31], 123);\n\n    // TODO: Replace the hard-coded byte amounts with values computed\n    // from whatever was retrieved from the SectorBuilder.\n    if max_bytes != 127 {\n        panic!(\n            \"test assumes the wrong number of bytes (expected: {}, actual: {})\",\n            127, max_bytes\n        );\n    }\n\n    // verify that we have neither sealed nor staged sectors yet\n    {\n        let resp = get_sealed_sectors(sector_builder_a);\n        defer!(destroy_get_sealed_sectors_response(resp));\n\n        if (*resp).status_code != 0 {\n            panic!(\"{}\", c_str_to_rust_str((*resp).error_msg))\n        }\n\n        assert_eq!(0, (*resp).sectors_len);\n\n        let resp = get_staged_sectors(sector_builder_a);\n        defer!(destroy_get_staged_sectors_response(resp));\n\n        if (*resp).status_code != 0 {\n            panic!(\"{}\", c_str_to_rust_str((*resp).error_msg))\n        }\n\n        assert_eq!(0, (*resp).sectors_len);\n    }\n\n    // add first piece, which lazily provisions a new staged sector\n    {\n        let (_, _, resp) = create_and_add_piece(sector_builder_a, 10);\n        defer!(destroy_add_piece_response(resp));\n\n        if (*resp).status_code != 0 {\n            panic!(\"{}\", c_str_to_rust_str((*resp).error_msg))\n        }\n\n        assert_eq!(124, (*resp).sector_id);\n    }\n\n    // add second piece, which fits into existing staged sector\n    {\n        let (_, _, resp) = create_and_add_piece(sector_builder_a, 50);\n        defer!(destroy_add_piece_response(resp));\n\n        if (*resp).status_code != 0 {\n            panic!(\"{}\", c_str_to_rust_str((*resp).error_msg))\n        }\n\n        assert_eq!(124, (*resp).sector_id);\n    }\n\n    // add third piece, which won't fit into existing staging sector\n    {\n        let (_, _, resp) = create_and_add_piece(sector_builder_a, 100);\n        defer!(destroy_add_piece_response(resp));\n\n        if (*resp).status_code != 0 {\n            panic!(\"{}\", c_str_to_rust_str((*resp).error_msg))\n        }\n\n        // note that the sector id changed here\n        assert_eq!(125, (*resp).sector_id);\n    }\n\n    // get staged sector metadata and verify that we've now got two staged\n    // sectors\n    {\n        let resp = get_staged_sectors(sector_builder_a);\n        defer!(destroy_get_staged_sectors_response(resp));\n\n        if (*resp).status_code != 0 {\n            panic!(\"{}\", c_str_to_rust_str((*resp).error_msg))\n        }\n\n        assert_eq!(2, (*resp).sectors_len);\n    }\n\n    // drop the first sector builder, relinquishing any locks on persistence\n    destroy_sector_builder(sector_builder_a);\n\n    // create a new sector builder using same prover id, which should\n    // initialize with metadata persisted by previous sector builder\n    let (sector_builder_b, _) =\n        create_sector_builder(&metadata_dir, &staging_dir, &sealed_dir, [0; 31], 123);\n    defer!(destroy_sector_builder(sector_builder_b));\n\n    // add fourth piece, where size(piece) == max (will trigger sealing)\n    let (bytes_in, piece_key) = {\n        let (piece_bytes, piece_key, resp) = create_and_add_piece(sector_builder_b, 127);\n        defer!(destroy_add_piece_response(resp));\n\n        if (*resp).status_code != 0 {\n            panic!(\"{}\", c_str_to_rust_str((*resp).error_msg))\n        }\n\n        // sector id changed again (piece wouldn't fit)\n        assert_eq!(126, (*resp).sector_id);\n\n        (piece_bytes, piece_key)\n    };\n\n    // poll for sealed sector metadata through the FFI\n    {\n        let (result_tx, result_rx) = mpsc::channel();\n        let (kill_tx, kill_rx) = mpsc::channel();\n\n        let atomic_ptr = AtomicPtr::new(sector_builder_b);\n\n        let _join_handle = thread::spawn(move || {\n            let sector_builder = atomic_ptr.into_inner();\n\n            loop {\n                match kill_rx.try_recv() {\n                    Ok(_) => return,\n                    _ => (),\n                };\n\n                let resp = get_seal_status(sector_builder, 126);\n                if (*resp).status_code != 0 {\n                    return;\n                }\n\n                if (*resp).seal_status_code == FFISealStatus_Sealed {\n                    let _ = result_tx.send((*resp).sector_id).unwrap();\n                }\n                defer!(destroy_get_seal_status_response(resp));\n\n                thread::sleep(Duration::from_millis(1000));\n            }\n        });\n\n        defer!({\n            let _ = kill_tx.send(true).unwrap();\n        });\n\n        // wait up to 5 minutes for sealing to complete\n        let now_sealed_sector_id = result_rx.recv_timeout(Duration::from_secs(300)).unwrap();\n\n        assert_eq!(now_sealed_sector_id, 126);\n    }\n\n    // get sealed sectors - we should have just one\n    {\n        let resp = get_sealed_sectors(sector_builder_b);\n        defer!(destroy_get_sealed_sectors_response(resp));\n\n        if (*resp).status_code != 0 {\n            panic!(\"{}\", c_str_to_rust_str((*resp).error_msg))\n        }\n\n        assert_eq!(1, (*resp).sectors_len);\n    }\n\n    // after sealing, read the bytes (causes unseal) and compare with what we\n    // added to the sector\n    {\n        let c_piece_key = rust_str_to_c_str(piece_key);\n        defer!(free_c_str(c_piece_key));\n\n        let resp = read_piece_from_sealed_sector(sector_builder_b, c_piece_key);\n        defer!(destroy_read_piece_from_sealed_sector_response(resp));\n\n        if (*resp).status_code != 0 {\n            panic!(\"{}\", c_str_to_rust_str((*resp).error_msg))\n        }\n\n        let data_ptr = (*resp).data_ptr as *mut u8;\n        let data_len = (*resp).data_len;\n        let mut bytes_out = Vec::with_capacity(data_len);\n        bytes_out.set_len(data_len);\n        ptr::copy(data_ptr, bytes_out.as_mut_ptr(), data_len);\n\n        assert_eq!(format!(\"{:x?}\", bytes_in), format!(\"{:x?}\", bytes_out))\n    }\n\n    Ok(())\n}\n\nfn main() {\n    unsafe { sector_builder_lifecycle().unwrap() };\n}\n"
  },
  {
    "path": "filecoin-proofs/examples/merklepor.rs",
    "content": "extern crate bellman;\nextern crate pairing;\nextern crate pbr;\nextern crate rand;\nextern crate sapling_crypto;\n\nextern crate storage_proofs;\n\nuse bellman::groth16::*;\nuse pairing::bls12_381::{Bls12, Fr};\nuse pairing::Field;\nuse rand::Rng;\nuse sapling_crypto::circuit::multipack;\nuse sapling_crypto::jubjub::JubjubBls12;\n\nuse storage_proofs::circuit;\nuse storage_proofs::example_helper::Example;\nuse storage_proofs::test_helper::random_merkle_path;\n\nstruct MerklePorApp {\n    auth_paths: Vec<Vec<Option<(Fr, bool)>>>,\n    root: Fr,\n    leaf: Fr,\n}\n\nimpl Default for MerklePorApp {\n    fn default() -> Self {\n        MerklePorApp {\n            auth_paths: Vec::default(),\n            leaf: Fr::zero(),\n            root: Fr::zero(),\n        }\n    }\n}\n\nimpl<'a> Example<'a, circuit::ppor::ParallelProofOfRetrievability<'a, Bls12>> for MerklePorApp {\n    fn name() -> String {\n        \"Multi-Challenge MerklePor\".to_string()\n    }\n\n    fn create_circuit<R: Rng>(\n        &mut self,\n        rng: &mut R,\n        engine_params: &'a JubjubBls12,\n        tree_depth: usize,\n        challenge_count: usize,\n        _leaves: usize,\n        _m: usize,\n        _sloth_iter: usize,\n    ) -> circuit::ppor::ParallelProofOfRetrievability<'a, Bls12> {\n        let (auth_path, leaf, root) = random_merkle_path(rng, tree_depth);\n        self.root = root;\n        self.leaf = leaf;\n        self.auth_paths = (0..challenge_count).map(|_| auth_path.clone()).collect();\n        let values = (0..challenge_count).map(|_| Some(self.leaf)).collect();\n\n        // create an instance of our circut (with the witness)\n        circuit::ppor::ParallelProofOfRetrievability {\n            params: engine_params,\n            values,\n            auth_paths: self.auth_paths.clone(),\n            root: Some(self.root),\n        }\n    }\n\n    fn generate_groth_params<R: Rng>(\n        &mut self,\n        rng: &mut R,\n        jubjub_params: &JubjubBls12,\n        tree_depth: usize,\n        challenge_count: usize,\n        _m: usize,\n        _sloth_iter: usize,\n    ) -> Parameters<Bls12> {\n        generate_random_parameters::<Bls12, _, _>(\n            circuit::ppor::ParallelProofOfRetrievability {\n                params: jubjub_params,\n                values: vec![None; challenge_count],\n                auth_paths: vec![vec![None; tree_depth]; challenge_count],\n                root: None,\n            },\n            rng,\n        )\n        .unwrap()\n    }\n\n    fn samples() -> usize {\n        5\n    }\n\n    fn create_proof<R: Rng>(\n        &mut self,\n        rng: &mut R,\n        engine_params: &'a JubjubBls12,\n        groth_params: &Parameters<Bls12>,\n        tree_depth: usize,\n        challenge_count: usize,\n        _leaves: usize,\n        _m: usize,\n        _sloth_iter: usize,\n    ) -> Proof<Bls12> {\n        let (auth_path, leaf, root) = random_merkle_path(rng, tree_depth);\n        self.root = root;\n        self.leaf = leaf;\n        self.auth_paths = (0..challenge_count).map(|_| auth_path.clone()).collect();\n        let values = (0..challenge_count).map(|_| Some(self.leaf)).collect();\n\n        // create an instance of our circut (with the witness)\n        let proof = {\n            let c = circuit::ppor::ParallelProofOfRetrievability {\n                params: engine_params,\n                values,\n                auth_paths: self.auth_paths.clone(),\n                root: Some(self.root),\n            };\n\n            // create groth16 proof\n            create_random_proof(c, groth_params, rng).expect(\"failed to create proof\")\n        };\n\n        proof\n    }\n\n    fn verify_proof(\n        &mut self,\n        proof: &Proof<Bls12>,\n        pvk: &PreparedVerifyingKey<Bls12>,\n    ) -> Option<bool> {\n        // -- generate public inputs\n\n        let auth_paths = self.auth_paths.clone();\n        let len = auth_paths.len();\n\n        // regen values, avoids storing\n        let values: Vec<_> = (0..len).map(|_| Some(&self.leaf)).collect();\n\n        let mut expected_inputs: Vec<Fr> = (0..len)\n            .flat_map(|j| {\n                let auth_path_bits: Vec<bool> =\n                    auth_paths[j].iter().map(|p| p.unwrap().1).collect();\n                let packed_auth_path: Vec<Fr> =\n                    multipack::compute_multipacking::<Bls12>(&auth_path_bits);\n\n                let mut input = vec![*values[j].unwrap()];\n                input.extend(packed_auth_path);\n                input\n            })\n            .collect();\n\n        // add the root as the last one\n        expected_inputs.push(self.root);\n\n        // -- verify proof with public inputs\n        Some(verify_proof(pvk, proof, &expected_inputs).expect(\"failed to verify proof\"))\n    }\n}\n\nfn main() {\n    MerklePorApp::main()\n}\n"
  },
  {
    "path": "filecoin-proofs/examples/zigzag.rs",
    "content": "extern crate bellman;\nextern crate pairing;\nextern crate rand;\nextern crate sapling_crypto;\n#[macro_use]\nextern crate clap;\n#[cfg(feature = \"profile\")]\nextern crate gperftools;\nextern crate memmap;\nextern crate tempfile;\n#[macro_use]\nextern crate slog;\n\nextern crate filecoin_proofs;\nextern crate storage_proofs;\n\nuse clap::{App, Arg};\n#[cfg(feature = \"profile\")]\nuse gperftools::profiler::PROFILER;\nuse memmap::MmapMut;\nuse memmap::MmapOptions;\nuse pairing::bls12_381::Bls12;\nuse rand::{Rng, SeedableRng, XorShiftRng};\nuse std::fs::File;\nuse std::io::Write;\nuse std::time::{Duration, Instant};\n\nuse bellman::Circuit;\nuse sapling_crypto::jubjub::JubjubBls12;\n\nuse storage_proofs::circuit::test::*;\nuse storage_proofs::circuit::zigzag::{ZigZagCircuit, ZigZagCompound};\nuse storage_proofs::compound_proof::{self, CircuitComponent, CompoundProof};\nuse storage_proofs::drgporep;\nuse storage_proofs::drgraph::*;\nuse storage_proofs::example_helper::prettyb;\nuse storage_proofs::fr32::fr_into_bytes;\nuse storage_proofs::hasher::{Blake2sHasher, Hasher, PedersenHasher, Sha256Hasher};\nuse storage_proofs::layered_drgporep;\nuse storage_proofs::porep::PoRep;\nuse storage_proofs::proof::ProofScheme;\nuse storage_proofs::zigzag_drgporep::*;\n\nuse filecoin_proofs::FCP_LOG;\n\n#[cfg(feature = \"profile\")]\n#[inline(always)]\nfn start_profile(stage: &str) {\n    PROFILER\n        .lock()\n        .unwrap()\n        .start(format!(\"./{}.profile\", stage))\n        .unwrap();\n}\n\n#[cfg(not(feature = \"profile\"))]\n#[inline(always)]\nfn start_profile(_stage: &str) {}\n\n#[cfg(feature = \"profile\")]\n#[inline(always)]\nfn stop_profile() {\n    PROFILER.lock().unwrap().stop().unwrap();\n}\n\n#[cfg(not(feature = \"profile\"))]\n#[inline(always)]\nfn stop_profile() {}\n\nfn file_backed_mmap_from_random_bytes(n: usize) -> MmapMut {\n    let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n    let mut tmpfile: File = tempfile::tempfile().unwrap();\n\n    for _ in 0..n {\n        tmpfile\n            .write_all(&fr_into_bytes::<Bls12>(&rng.gen()))\n            .unwrap();\n    }\n\n    unsafe { MmapOptions::new().map_mut(&tmpfile).unwrap() }\n}\n\npub fn file_backed_mmap_from(data: &[u8]) -> MmapMut {\n    let mut tmpfile: File = tempfile::tempfile().unwrap();\n    tmpfile.write_all(data).unwrap();\n\n    unsafe { MmapOptions::new().map_mut(&tmpfile).unwrap() }\n}\n\nfn do_the_work<H: 'static>(\n    data_size: usize,\n    m: usize,\n    expansion_degree: usize,\n    sloth_iter: usize,\n    challenge_count: usize,\n    layers: usize,\n    partitions: usize,\n    circuit: bool,\n    groth: bool,\n    bench: bool,\n    extract: bool,\n) where\n    H: Hasher,\n{\n    let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n    info!(FCP_LOG, \"data size: {}\", prettyb(data_size); \"target\" => \"config\");\n    info!(FCP_LOG, \"m: {}\", m; \"target\" => \"config\");\n    info!(FCP_LOG, \"expansion_degree: {}\", expansion_degree; \"target\" => \"config\");\n    info!(FCP_LOG, \"sloth: {}\", sloth_iter; \"target\" => \"config\");\n    info!(FCP_LOG, \"challenge_count: {}\", challenge_count; \"target\" => \"config\");\n    info!(FCP_LOG, \"layers: {}\", layers; \"target\" => \"config\");\n    info!(FCP_LOG, \"partitions: {}\", partitions; \"target\" => \"config\");\n    info!(FCP_LOG, \"circuit: {:?}\", circuit; \"target\" => \"config\");\n    info!(FCP_LOG, \"groth: {:?}\", groth; \"target\" => \"config\");\n    info!(FCP_LOG, \"bench: {:?}\", bench; \"target\" => \"config\");\n\n    info!(FCP_LOG, \"generating fake data\"; \"target\" => \"status\");\n\n    let nodes = data_size / 32;\n\n    let data = file_backed_mmap_from_random_bytes(nodes);\n\n    let replica_id: H::Domain = rng.gen();\n    let mut data_copy = file_backed_mmap_from(&data);\n    let sp = layered_drgporep::SetupParams {\n        drg_porep_setup_params: drgporep::SetupParams {\n            drg: drgporep::DrgParams {\n                nodes,\n                degree: m,\n                expansion_degree,\n                seed: new_seed(),\n            },\n            sloth_iter,\n        },\n        layers,\n        challenge_count,\n    };\n\n    info!(FCP_LOG, \"running setup\");\n    start_profile(\"setup\");\n    let pp = ZigZagDrgPoRep::<H>::setup(&sp).unwrap();\n    stop_profile();\n\n    let start = Instant::now();\n    let mut replication_duration = Duration::new(0, 0);\n\n    info!(FCP_LOG, \"running replicate\");\n\n    start_profile(\"replicate\");\n    let (tau, aux) =\n        ZigZagDrgPoRep::<H>::replicate(&pp, &replica_id, &mut data_copy, None).unwrap();\n    stop_profile();\n    let pub_inputs = layered_drgporep::PublicInputs::<H::Domain> {\n        replica_id,\n        challenge_count,\n        tau: Some(tau.simplify().into()),\n        comm_r_star: tau.comm_r_star,\n        k: Some(0),\n    };\n\n    let priv_inputs = layered_drgporep::PrivateInputs {\n        replica: &data,\n        aux,\n        tau: tau.layer_taus,\n    };\n\n    replication_duration += start.elapsed();\n\n    info!(FCP_LOG, \"replication_time: {:?}\", replication_duration; \"target\" => \"stats\");\n    info!(\n        FCP_LOG,\n        \"replication_time/byte: {:?}\",\n        replication_duration / data_size as u32; \"target\" => \"stats\"\n    );\n    info!(\n        FCP_LOG,\n        \"replication_time/GiB: {:?}\",\n        (1 << 30) * replication_duration / data_size as u32; \"target\" => \"stats\"\n    );\n\n    let mut total_proving = Duration::new(0, 0);\n    info!(FCP_LOG, \"generating one proof\");\n\n    let start = Instant::now();\n    start_profile(\"prove\");\n    let all_partition_proofs =\n        ZigZagDrgPoRep::<H>::prove_all_partitions(&pp, &pub_inputs, &priv_inputs, partitions)\n            .expect(\"failed to prove\");\n    stop_profile();\n    let vanilla_proving = start.elapsed();\n    total_proving += vanilla_proving;\n\n    let proving_avg = total_proving;\n    let proving_avg =\n        f64::from(proving_avg.subsec_nanos()) / 1_000_000_000f64 + (proving_avg.as_secs() as f64);\n\n    // -- print statistics\n\n    //    let serialized_proofs = proofs.iter().fold(Vec::new(), |mut acc, p| {\n    //        acc.extend(p.serialize());\n    //        acc\n    //    });\n    //    let avg_proof_size = serialized_proofs.len() / samples as usize;\n    //\n    //info!(target: \"stats\", \"Average proof size {}\", prettyb(avg_proof_size));\n\n    info!(FCP_LOG, \"vanilla_proving_time: {:?} seconds\", proving_avg; \"target\" => \"stats\");\n\n    let samples: u32 = 5;\n    info!(FCP_LOG, \"sampling verifying (samples: {})\", samples);\n    let mut total_verifying = Duration::new(0, 0);\n\n    start_profile(\"verify\");\n    for _ in 0..samples {\n        let start = Instant::now();\n        let verified =\n            ZigZagDrgPoRep::<H>::verify_all_partitions(&pp, &pub_inputs, &all_partition_proofs)\n                .expect(\"failed during verification\");\n        if !verified {\n            info!(FCP_LOG, \"Verification failed.\"; \"target\" => \"results\");\n        };\n        total_verifying += start.elapsed();\n    }\n    info!(FCP_LOG, \"Verification complete\"; \"target\" => \"status\");\n    stop_profile();\n\n    let verifying_avg = total_verifying / samples;\n    let verifying_avg = f64::from(verifying_avg.subsec_nanos()) / 1_000_000_000f64\n        + (verifying_avg.as_secs() as f64);\n    info!(FCP_LOG, \"average_vanilla_verifying_time: {:?} seconds\", verifying_avg; \"target\" => \"stats\");\n\n    if circuit || groth || bench {\n        let engine_params = JubjubBls12::new();\n        let compound_public_params = compound_proof::PublicParams {\n            vanilla_params: pp.clone(),\n            engine_params: &engine_params,\n            partitions: Some(partitions),\n        };\n        if circuit || bench {\n            info!(FCP_LOG, \"Performing circuit bench.\"; \"target\" => \"status\");\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            ZigZagCompound::circuit(\n                &pub_inputs,\n                <ZigZagCircuit<Bls12, H> as CircuitComponent>::ComponentPrivateInputs::default(),\n                &all_partition_proofs[0],\n                &pp,\n                &engine_params,\n            )\n            .synthesize(&mut cs)\n            .expect(\"failed to synthesize circuit\");\n\n            info!(FCP_LOG, \"circuit_num_inputs: {}\", cs.num_inputs(); \"target\" => \"stats\");\n            info!(FCP_LOG, \"circuit_num_constraints: {}\", cs.num_constraints(); \"target\" => \"stats\");\n\n            if circuit {\n                println!(\"{}\", cs.pretty_print());\n            }\n        }\n\n        if groth {\n            info!(FCP_LOG, \"Performing circuit groth.\"; \"target\" => \"status\");\n            let multi_proof = {\n                // TODO: Make this a macro.\n                let start = Instant::now();\n                start_profile(\"groth-prove\");\n                let result =\n                    ZigZagCompound::prove(&compound_public_params, &pub_inputs, &priv_inputs, None)\n                        .unwrap();\n                stop_profile();\n                let groth_proving = start.elapsed();\n                info!(FCP_LOG, \"groth_proving_time: {:?} seconds\", groth_proving; \"target\" => \"stats\");\n                total_proving += groth_proving;\n                info!(FCP_LOG, \"combined_proving_time: {:?} seconds\", total_proving; \"target\" => \"stats\");\n                result\n            };\n            info!(FCP_LOG, \"sampling groth verifying (samples: {})\", samples);\n            let verified = {\n                let mut total_groth_verifying = Duration::new(0, 0);\n                let mut result = true;\n                start_profile(\"groth-verify\");\n                for _ in 0..samples {\n                    let start = Instant::now();\n                    let cur_result = result;\n                    ZigZagCompound::verify(&compound_public_params, &pub_inputs, &multi_proof)\n                        .unwrap();\n                    // If one verification fails, result becomes permanently false.\n                    result = result && cur_result;\n                    total_groth_verifying += start.elapsed();\n                }\n                stop_profile();\n                let avg_groth_verifying = total_groth_verifying / samples;\n                info!(FCP_LOG, \"average_groth_verifying_time: {:?} seconds\", avg_groth_verifying; \"target\" => \"stats\");\n                result\n            };\n            assert!(verified);\n        }\n    }\n\n    if extract {\n        let start = Instant::now();\n        info!(FCP_LOG, \"Extracting.\");\n        start_profile(\"extract\");\n        let decoded_data = ZigZagDrgPoRep::<H>::extract_all(&pp, &replica_id, &data_copy).unwrap();\n        stop_profile();\n        let extracting = start.elapsed();\n        info!(FCP_LOG, \"extracting_time: {:?}\", extracting; \"target\" => \"stats\");\n\n        assert_eq!(&(*data), decoded_data.as_slice());\n    }\n}\n\nfn main() {\n    let matches = App::new(stringify!(\"DrgPoRep Vanilla Bench\"))\n        .version(\"1.0\")\n        .arg(\n            Arg::with_name(\"size\")\n                .required(true)\n                .long(\"size\")\n                .help(\"The data size in KB\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"m\")\n                .help(\"The size of m\")\n                .long(\"m\")\n                .default_value(\"5\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"exp\")\n                .help(\"Expansion degree\")\n                .long(\"expansion\")\n                .default_value(\"6\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"sloth\")\n                .help(\"The number of sloth iterations\")\n                .long(\"sloth\")\n                .default_value(\"0\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"challenges\")\n                .long(\"challenges\")\n                .help(\"How many challenges to execute\")\n                .default_value(\"1\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"hasher\")\n                .long(\"hasher\")\n                .help(\"Which hasher should be used.Available: \\\"pedersen\\\", \\\"sha256\\\", \\\"blake2s\\\" (default \\\"pedersen\\\")\")\n                .default_value(\"pedersen\")\n                .takes_value(true),\n        )\n       .arg(\n            Arg::with_name(\"layers\")\n                .long(\"layers\")\n                .help(\"How many layers to use\")\n                .default_value(\"10\")\n                .takes_value(true),\n        )\n       .arg(\n            Arg::with_name(\"partitions\")\n                .long(\"partitions\")\n                .help(\"How many circuit partitions to use\")\n                .default_value(\"1\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"groth\")\n                .long(\"groth\")\n                .help(\"Generate and verify a groth circuit proof.\")\n        )\n        .arg(\n            Arg::with_name(\"no-bench\")\n                .long(\"no-bench\")\n                .help(\"Synthesize and report inputs/constraints for a circuit.\")\n        )\n        .arg(\n            Arg::with_name(\"circuit\")\n                .long(\"circuit\")\n                .help(\"Print the constraint system.\")\n        )\n        .arg(\n            Arg::with_name(\"extract\")\n                .long(\"extract\")\n                .help(\"Extract data after proving and verifying.\")\n        )\n\n        .get_matches();\n\n    let data_size = value_t!(matches, \"size\", usize).unwrap() * 1024;\n    let m = value_t!(matches, \"m\", usize).unwrap();\n    let expansion_degree = value_t!(matches, \"exp\", usize).unwrap();\n    let sloth_iter = value_t!(matches, \"sloth\", usize).unwrap();\n    let challenge_count = value_t!(matches, \"challenges\", usize).unwrap();\n    let hasher = value_t!(matches, \"hasher\", String).unwrap();\n    let layers = value_t!(matches, \"layers\", usize).unwrap();\n    let partitions = value_t!(matches, \"partitions\", usize).unwrap();\n    let groth = matches.is_present(\"groth\");\n    let bench = !matches.is_present(\"no-bench\");\n    let circuit = matches.is_present(\"circuit\");\n    let extract = matches.is_present(\"extract\");\n\n    info!(FCP_LOG, \"hasher: {}\", hasher; \"target\" => \"config\");\n    match hasher.as_ref() {\n        \"pedersen\" => {\n            do_the_work::<PedersenHasher>(\n                data_size,\n                m,\n                expansion_degree,\n                sloth_iter,\n                challenge_count,\n                layers,\n                partitions,\n                circuit,\n                groth,\n                bench,\n                extract,\n            );\n        }\n        \"sha256\" => {\n            do_the_work::<Sha256Hasher>(\n                data_size,\n                m,\n                expansion_degree,\n                sloth_iter,\n                challenge_count,\n                layers,\n                partitions,\n                circuit,\n                groth,\n                bench,\n                extract,\n            );\n        }\n        \"blake2s\" => {\n            do_the_work::<Blake2sHasher>(\n                data_size,\n                m,\n                expansion_degree,\n                sloth_iter,\n                challenge_count,\n                layers,\n                partitions,\n                circuit,\n                groth,\n                bench,\n                extract,\n            );\n        }\n        _ => panic!(format!(\"invalid hasher: {}\", hasher)),\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/parameters.json",
    "content": "{\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 0, 0>-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params\": {\n    \"cid\": \"QmNUKXCEcjMRh8ayFG2X9RYUuc2SK5XRVsSVTqJmNWAgSp\",\n    \"digest\": \"fe10d43b607dd6687f30428476076ebb\",\n    \"sector_size\": 2048\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 0, 0>-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk\": {\n    \"cid\": \"QmRyV1DvF57cSnnwUoocKbPiULoLdfnfWpVWi8BSsMN6KR\",\n    \"digest\": \"8aaca32ca9a1c6a431b99e695b443e69\",\n    \"sector_size\": 2048\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 0, 0>-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params\": {\n    \"cid\": \"QmTvwEyFVcjivKUX9AqZrC4mfjLSN2JJTucLJfNaWqCPmD\",\n    \"digest\": \"1cc1bf83c9e3d9b2d994ad2ec946a79f\",\n    \"sector_size\": 536870912\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 0, 0>-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk\": {\n    \"cid\": \"QmVfgowqdh3ruAHqQ8LA6L4VdSYwam5e8VmSEtZXBoAudC\",\n    \"digest\": \"377659f83c6714703b17828f603038fc\",\n    \"sector_size\": 536870912\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 0, 0>-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params\": {\n    \"cid\": \"QmQ2HrKCWbtWQNNQiBj3BFE8QrqMyed8P5Vw5vyyzuSMsF\",\n    \"digest\": \"2e15ec3fbff51abf66d241252fb8babd\",\n    \"sector_size\": 2048\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 0, 0>-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk\": {\n    \"cid\": \"QmVZRduda8L1AYsT3u3uk2kqiMnwm5Sx9D8pZbTVHAZG5i\",\n    \"digest\": \"11c74ae0068ca7e4a5fd8cb1eaf5b511\",\n    \"sector_size\": 2048\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 0, 0>-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params\": {\n    \"cid\": \"QmPQkry7TXuE8nxHFAySp3X8qRXMYj2ArffoFxF2C1hYwf\",\n    \"digest\": \"526edf009176616771af4ba915eb5073\",\n    \"sector_size\": 8388608\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 0, 0>-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk\": {\n    \"cid\": \"QmT5bjrKBUpWEfaveWoPCu96EuHN2HuzbRzS9tSxttPCzw\",\n    \"digest\": \"c29e6b2927b8a28593f7c0c035b32cf5\",\n    \"sector_size\": 8388608\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 0, 0>-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params\": {\n    \"cid\": \"QmXn1v64YTKLAH6yemhotr2dp1ZtjfspT328itKrMfnBW6\",\n    \"digest\": \"66459a78bd5e0225a19f140068620b7f\",\n    \"sector_size\": 8388608\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 0, 0>-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk\": {\n    \"cid\": \"QmTax8iBqjyP3EMUSnkSoxpjxh7dWrpE5RbfN2FA4oUgc4\",\n    \"digest\": \"e482988346217c846cecd80dfffef35f\",\n    \"sector_size\": 8388608\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 0, 0>-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params\": {\n    \"cid\": \"QmdVN2xTAJtKLrUdXfP7JjGpMGnZRmbDT8FHdkzxruRoLQ\",\n    \"digest\": \"4b27a62d2179523a2176ec7a1f2837be\",\n    \"sector_size\": 536870912\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 0, 0>-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk\": {\n    \"cid\": \"QmakhHMzRBB85LLniDeRif71prLckqj7RHCc3NSgZsevQF\",\n    \"digest\": \"21271b25537a42e79247bd403e3ba37e\",\n    \"sector_size\": 536870912\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 8, 0>-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params\": {\n    \"cid\": \"QmZwPa4C5iUKPwGL7pkzZVNpn1Z9QkELneLAX4JFdRc7m5\",\n    \"digest\": \"263b3ee83cfff7c287900346742e363a\",\n    \"sector_size\": 34359738368\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 8, 0>-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk\": {\n    \"cid\": \"QmUVAe53gJ4eC7wmDG2K5WWEtTvfQJaAPBstEtfznJrPhR\",\n    \"digest\": \"e6bc2cb5808b6a5cde7b51bfe0543313\",\n    \"sector_size\": 34359738368\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 8, 0>-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params\": {\n    \"cid\": \"QmXiiXheXvZV8rVkdDCFPdUYJVCNa67THGa7VgQRkqNojy\",\n    \"digest\": \"f031cdaf063c00baa637eae5e4b338c8\",\n    \"sector_size\": 34359738368\n  },\n  \"v25-proof-of-spacetime-fallback-MerkleTree<PoseidonHasher, 8, 8, 0>-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk\": {\n    \"cid\": \"QmXSzhELrQMBhJgYqpT8qTL9Piwti3eziCYt49EJ77368r\",\n    \"digest\": \"3f7f6e287a32083f131d4948e04e6e5b\",\n    \"sector_size\": 34359738368\n  },\n  \"v25-stacked-proof-of-replication-MerkleTree<PoseidonHasher, 8, 0, 0>-Sha256Hasher-840969a6a9533823ecdc37310ef8c99d35991a2145300e10be0b883f1226a0f6.params\": {\n    \"cid\": \"QmbaFhfNtz6TuQdiC5oyL5rWSyUNQzcD68A6PT9mCTbvd7\",\n    \"digest\": \"c0cbe5bd951eb944557784a5a423fd18\",\n    \"sector_size\": 2048\n  },\n  \"v25-stacked-proof-of-replication-MerkleTree<PoseidonHasher, 8, 0, 0>-Sha256Hasher-840969a6a9533823ecdc37310ef8c99d35991a2145300e10be0b883f1226a0f6.vk\": {\n    \"cid\": \"QmYfeAWeg7mKQJvoUCVatqa36WFbWYH2B9JMrJTorhJdUu\",\n    \"digest\": \"3ed77a85380eeacfea658fc4b1ad8b95\",\n    \"sector_size\": 2048\n  },\n  \"v25-stacked-proof-of-replication-MerkleTree<PoseidonHasher, 8, 0, 0>-Sha256Hasher-e3c3fd959a83bf60522a401dc3bf0e2d48f0e2172bcdf4c0cb3c39fa4deacd87.params\": {\n    \"cid\": \"QmYuGgnRHx9x4DAVtkGYGir8SDvRE17pUMH17riEpWguuN\",\n    \"digest\": \"b59249298e9d1bb9d25891b828e03c94\",\n    \"sector_size\": 536870912\n  },\n  \"v25-stacked-proof-of-replication-MerkleTree<PoseidonHasher, 8, 0, 0>-Sha256Hasher-e3c3fd959a83bf60522a401dc3bf0e2d48f0e2172bcdf4c0cb3c39fa4deacd87.vk\": {\n    \"cid\": \"QmUE4Qhd3vUPMQwh1TPJkVxZVisxoLKj93ZDU3zfW7koc4\",\n    \"digest\": \"b4e3e2ea3eba88d2eba3d59472ef4094\",\n    \"sector_size\": 536870912\n  },\n  \"v25-stacked-proof-of-replication-MerkleTree<PoseidonHasher, 8, 0, 0>-Sha256Hasher-e4a49558d04647264048879511e843136e4488499e23bc442a341083a19ee79c.params\": {\n    \"cid\": \"QmePVNPMxzDuPF3mQaZ9Ld1hTGhResvGZgZ61NXy5cDQPK\",\n    \"digest\": \"0deb36662833379267609fc4e5f4176b\",\n    \"sector_size\": 8388608\n  },\n  \"v25-stacked-proof-of-replication-MerkleTree<PoseidonHasher, 8, 0, 0>-Sha256Hasher-e4a49558d04647264048879511e843136e4488499e23bc442a341083a19ee79c.vk\": {\n    \"cid\": \"QmWLpw8pLwuCGiUQGQiwuXTjKcvPwsaS573gQ6YPc67jVm\",\n    \"digest\": \"1618f598e3a5c26acee17540aa5cd536\",\n    \"sector_size\": 8388608\n  },\n  \"v25-stacked-proof-of-replication-MerkleTree<PoseidonHasher, 8, 8, 0>-Sha256Hasher-8a0719d8b9de3605f89b084c73210dfe2a557407c6343f8d32640094f2c9d074.params\": {\n    \"cid\": \"QmdtfjaJpqE8pRt1cmceh8c2Qj8GNwrzmmSmckZr6VDAWR\",\n    \"digest\": \"18796da53b41f23e341d19ce7954f647\",\n    \"sector_size\": 34359738368\n  },\n  \"v25-stacked-proof-of-replication-MerkleTree<PoseidonHasher, 8, 8, 0>-Sha256Hasher-8a0719d8b9de3605f89b084c73210dfe2a557407c6343f8d32640094f2c9d074.vk\": {\n    \"cid\": \"QmYF8Y17nHYAvbRA7NCQMs31VsBiMcAbwrViZwyT4Gvb8C\",\n    \"digest\": \"39d80879d4d7353e2ed5771670d97dfc\",\n    \"sector_size\": 34359738368\n  }\n}"
  },
  {
    "path": "filecoin-proofs/src/api/fake_seal.rs",
    "content": "use std::fs::File;\nuse std::io::Write;\nuse std::path::Path;\n\nuse anyhow::{Context, Result};\nuse bincode::serialize;\nuse filecoin_hashers::{Domain, Hasher};\nuse rand::{thread_rng, Rng};\nuse storage_proofs_core::{cache_key::CacheKey, merkle::MerkleTreeTrait};\nuse storage_proofs_porep::stacked::StackedDrg;\n\nuse crate::{\n    constants::DefaultPieceHasher,\n    types::{Commitment, PaddedBytesAmount, PoRepConfig},\n};\n\npub fn fauxrep<R: AsRef<Path>, S: AsRef<Path>, Tree: 'static + MerkleTreeTrait>(\n    porep_config: PoRepConfig,\n    cache_path: R,\n    out_path: S,\n) -> Result<Commitment> {\n    let mut rng = thread_rng();\n    fauxrep_aux::<_, R, S, Tree>(&mut rng, porep_config, cache_path, out_path)\n}\n\npub fn fauxrep_aux<R: Rng, S: AsRef<Path>, T: AsRef<Path>, Tree: 'static + MerkleTreeTrait>(\n    mut rng: &mut R,\n    porep_config: PoRepConfig,\n    cache_path: S,\n    out_path: T,\n) -> Result<Commitment> {\n    let sector_bytes = PaddedBytesAmount::from(porep_config).0;\n\n    {\n        // Create a sector full of null bytes at `out_path`.\n        let file = File::create(&out_path)?;\n        file.set_len(sector_bytes)?;\n    }\n\n    let fake_comm_c = <Tree::Hasher as Hasher>::Domain::random(&mut rng);\n    let (comm_r, p_aux) = StackedDrg::<Tree, DefaultPieceHasher>::fake_replicate_phase2(\n        fake_comm_c,\n        out_path,\n        &cache_path,\n        sector_bytes as usize,\n    )?;\n\n    let p_aux_path = cache_path.as_ref().join(CacheKey::PAux.to_string());\n    let mut f_p_aux = File::create(&p_aux_path)\n        .with_context(|| format!(\"could not create file p_aux={:?}\", p_aux_path))?;\n    let p_aux_bytes = serialize(&p_aux)?;\n    f_p_aux\n        .write_all(&p_aux_bytes)\n        .with_context(|| format!(\"could not write to file p_aux={:?}\", p_aux_path))?;\n\n    let mut commitment = [0u8; 32];\n    commitment[..].copy_from_slice(&comm_r.into_bytes()[..]);\n    Ok(commitment)\n}\n\npub fn fauxrep2<R: AsRef<Path>, S: AsRef<Path>, Tree: 'static + MerkleTreeTrait>(\n    cache_path: R,\n    existing_p_aux_path: S,\n) -> Result<Commitment> {\n    let mut rng = thread_rng();\n\n    let fake_comm_c = <Tree::Hasher as Hasher>::Domain::random(&mut rng);\n\n    let (comm_r, p_aux) =\n        StackedDrg::<Tree, DefaultPieceHasher>::fake_comm_r(fake_comm_c, existing_p_aux_path)?;\n\n    let p_aux_path = cache_path.as_ref().join(CacheKey::PAux.to_string());\n    let mut f_p_aux = File::create(&p_aux_path)\n        .with_context(|| format!(\"could not create file p_aux={:?}\", p_aux_path))?;\n    let p_aux_bytes = serialize(&p_aux)?;\n    f_p_aux\n        .write_all(&p_aux_bytes)\n        .with_context(|| format!(\"could not write to file p_aux={:?}\", p_aux_path))?;\n\n    let mut commitment = [0u8; 32];\n    commitment[..].copy_from_slice(&comm_r.into_bytes()[..]);\n    Ok(commitment)\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/internal.rs",
    "content": "use std::fs::File;\nuse std::io::{BufWriter, Read, Write};\nuse std::path::PathBuf;\nuse std::{thread, time};\n\nuse bellman::groth16;\nuse pairing::bls12_381::Bls12;\nuse pairing::Engine;\nuse sapling_crypto::jubjub::JubjubBls12;\n\nuse sector_base::api::disk_backed_storage::REAL_SECTOR_SIZE;\nuse sector_base::api::sector_store::SectorConfig;\nuse sector_base::io::fr32::write_unpadded;\nuse std::path::Path;\nuse storage_proofs::circuit::multi_proof::MultiProof;\nuse storage_proofs::circuit::zigzag::ZigZagCompound;\nuse storage_proofs::compound_proof::{self, CompoundProof};\nuse storage_proofs::drgporep::{self, DrgParams};\nuse storage_proofs::drgraph::{new_seed, DefaultTreeHasher};\nuse storage_proofs::fr32::{bytes_into_fr, fr_into_bytes, Fr32Ary};\nuse storage_proofs::hasher::Hasher;\nuse storage_proofs::layered_drgporep;\nuse storage_proofs::merkle::MerkleTree;\nuse storage_proofs::parameter_cache::{\n    parameter_cache_dir, parameter_cache_path, read_cached_params, write_params_to_cache,\n};\nuse storage_proofs::porep::{replica_id, PoRep, Tau};\nuse storage_proofs::proof::ProofScheme;\nuse storage_proofs::zigzag_drgporep::ZigZagDrgPoRep;\nuse storage_proofs::zigzag_graph::ZigZagBucketGraph;\n\nuse crate::error;\n\ntype Commitment = [u8; 32];\n\n/// FrSafe is an array of the largest whole number of bytes guaranteed not to overflow the field.\ntype FrSafe = [u8; 31];\n\n/// How big, in bytes, is the SNARK proof exposed by the API?\n///\n/// Note: These values need to be kept in sync with what's in api/mod.rs.\n/// Due to limitations of cbindgen, we can't define a constant whose value is\n/// a non-primitive (e.g. an expression like 192 * 2 or internal::STUFF) and\n/// see the constant in the generated C-header file.\nconst SNARK_BYTES: usize = 192;\nconst POREP_PARTITIONS: usize = 2;\nconst POREP_PROOF_BYTES: usize = SNARK_BYTES * POREP_PARTITIONS;\n\ntype SnarkProof = [u8; POREP_PROOF_BYTES];\n\n/// How big should a fake sector be when faking proofs?\nconst FAKE_SECTOR_BYTES: usize = 128;\n\nfn dummy_parameter_cache_path(sector_config: &SectorConfig, sector_size: usize) -> PathBuf {\n    parameter_cache_path(&format!(\n        \"{}[{}]\",\n        sector_config.dummy_parameter_cache_name(),\n        sector_size\n    ))\n}\n\npub const OFFICIAL_ZIGZAG_PARAM_FILENAME: &str = \"params.out\";\n\nlazy_static! {\n    pub static ref ENGINE_PARAMS: JubjubBls12 = JubjubBls12::new();\n}\n\nlazy_static! {\n    static ref ZIGZAG_PARAMS: Option<groth16::Parameters<Bls12>> =\n        read_cached_params(&official_params_path()).ok();\n}\n\nfn official_params_path() -> PathBuf {\n    parameter_cache_dir().join(OFFICIAL_ZIGZAG_PARAM_FILENAME)\n}\n\nfn get_zigzag_params() -> Option<groth16::Parameters<Bls12>> {\n    (*ZIGZAG_PARAMS).clone()\n}\n\nconst DEGREE: usize = 1; // TODO: 5; FIXME: increasing degree introduces a test failure. Figure out why.\nconst EXPANSION_DEGREE: usize = 6;\nconst SLOTH_ITER: usize = 0;\nconst LAYERS: usize = 2; // TODO: 10;\nconst CHALLENGE_COUNT: usize = 1;\n\nfn setup_params(sector_bytes: usize) -> layered_drgporep::SetupParams {\n    assert!(\n        sector_bytes % 32 == 0,\n        \"sector_bytes ({}) must be a multiple of 32\",\n        sector_bytes,\n    );\n    let nodes = sector_bytes / 32;\n    layered_drgporep::SetupParams {\n        drg_porep_setup_params: drgporep::SetupParams {\n            drg: DrgParams {\n                nodes,\n                degree: DEGREE,\n                expansion_degree: EXPANSION_DEGREE,\n                seed: new_seed(),\n            },\n            sloth_iter: SLOTH_ITER,\n        },\n        layers: LAYERS,\n        challenge_count: CHALLENGE_COUNT,\n    }\n}\n\npub fn public_params(\n    sector_bytes: usize,\n) -> layered_drgporep::PublicParams<DefaultTreeHasher, ZigZagBucketGraph<DefaultTreeHasher>> {\n    ZigZagDrgPoRep::<DefaultTreeHasher>::setup(&setup_params(sector_bytes)).unwrap()\n}\n\nfn commitment_from_fr<E: Engine>(fr: E::Fr) -> Commitment {\n    let mut commitment = [0; 32];\n    for (i, b) in fr_into_bytes::<E>(&fr).iter().enumerate() {\n        commitment[i] = *b;\n    }\n    commitment\n}\n\nfn pad_safe_fr(unpadded: &FrSafe) -> Fr32Ary {\n    let mut res = [0; 32];\n    res[0..31].copy_from_slice(unpadded);\n    res\n}\n\n/// Validate sector_config configuration and calculates derived configuration.\n///\n/// # Return Values\n/// * - `fake` is true when faking.\n/// * - `delay_seconds` is None if no delay.\n/// * - `sector_bytes` is the size (in bytes) of sector which should be stored on disk.\n/// * - `proof_sector_bytes` is the size of the sector which will be proved when faking.\npub fn get_config(sector_config: &SectorConfig) -> (bool, Option<u32>, usize, usize, bool) {\n    let fake = sector_config.is_fake();\n    let delay_seconds = sector_config.simulate_delay_seconds();\n    let delayed = delay_seconds.is_some();\n    let sector_bytes = sector_config.sector_bytes() as usize;\n    let proof_sector_bytes = if fake {\n        FAKE_SECTOR_BYTES\n    } else {\n        sector_bytes\n    };\n\n    // If configuration is 'completely real', then we can use the parameters pre-generated for the real circuit.\n    let uses_official_circuit = !fake && (sector_bytes as u64 == REAL_SECTOR_SIZE);\n\n    // It doesn't make sense to set a delay when not faking. The current implementations of SectorStore\n    // never do, but if that were to change, it would be a mistake.\n    let valid = if fake { true } else { !delayed };\n    assert!(valid, \"delay is only valid when faking\");\n\n    (\n        fake,\n        delay_seconds,\n        sector_bytes,\n        proof_sector_bytes,\n        uses_official_circuit,\n    )\n}\n\npub struct PoStOutput {\n    pub snark_proof: [u8; 192],\n    pub faults: Vec<u64>,\n}\n\npub struct PoStInputPart {\n    pub sealed_sector_access: Option<String>,\n    pub comm_r: [u8; 32],\n}\n\npub struct PoStInput {\n    pub challenge_seed: [u8; 32],\n    pub input_parts: Vec<PoStInputPart>,\n}\n\npub fn generate_post(input: PoStInput) -> error::Result<PoStOutput> {\n    let faults: Vec<u64> = if !input.input_parts.is_empty() {\n        vec![0]\n    } else {\n        Default::default()\n    };\n\n    Ok(PoStOutput {\n        snark_proof: [42; 192],\n        faults,\n    })\n}\n\npub struct SealOutput {\n    pub comm_r: Commitment,\n    pub comm_r_star: Commitment,\n    pub comm_d: Commitment,\n    pub snark_proof: SnarkProof,\n}\n\npub fn seal<T: Into<PathBuf> + AsRef<Path>>(\n    sector_config: &SectorConfig,\n    in_path: T,\n    out_path: T,\n    prover_id_in: &FrSafe,\n    sector_id_in: &FrSafe,\n) -> error::Result<SealOutput> {\n    let (fake, delay_seconds, sector_bytes, proof_sector_bytes, uses_official_circuit) =\n        get_config(sector_config);\n\n    let public_params = public_params(proof_sector_bytes);\n    let challenge_count = public_params.challenge_count;\n    if let Some(delay) = delay_seconds {\n        delay_seal(delay);\n    };\n\n    let f_in = File::open(in_path)?;\n\n    // Read all the provided data, even if we will prove less of it because we are faking.\n    let mut data = Vec::with_capacity(sector_bytes);\n    f_in.take(sector_bytes as u64).read_to_end(&mut data)?;\n\n    // Zero-pad the data to the requested size.\n    for _ in data.len()..sector_bytes {\n        data.push(0);\n    }\n\n    // Copy all the data.\n    let data_copy = data.clone();\n\n    // Zero-pad the prover_id to 32 bytes (and therefore Fr32).\n    let prover_id = pad_safe_fr(prover_id_in);\n    // Zero-pad the sector_id to 32 bytes (and therefore Fr32).\n    let sector_id = pad_safe_fr(sector_id_in);\n    let replica_id = replica_id::<DefaultTreeHasher>(prover_id, sector_id);\n\n    let compound_setup_params = compound_proof::SetupParams {\n        // The proof might use a different number of bytes than we read and copied, if we are faking.\n        vanilla_params: &setup_params(proof_sector_bytes),\n        engine_params: &(*ENGINE_PARAMS),\n        partitions: Some(POREP_PARTITIONS),\n    };\n\n    let compound_public_params = ZigZagCompound::setup(&compound_setup_params)?;\n\n    let (tau, aux) = perform_replication(\n        out_path,\n        &compound_public_params.vanilla_params,\n        &replica_id,\n        &mut data,\n        fake,\n        proof_sector_bytes,\n    )?;\n\n    let public_tau = tau.simplify();\n\n    let public_inputs = layered_drgporep::PublicInputs {\n        replica_id,\n        challenge_count,\n        tau: Some(public_tau),\n        comm_r_star: tau.comm_r_star,\n        k: None,\n    };\n\n    let private_inputs = layered_drgporep::PrivateInputs::<DefaultTreeHasher> {\n        replica: &data_copy[0..proof_sector_bytes],\n        aux,\n        tau: tau.layer_taus,\n    };\n\n    let groth_params = if uses_official_circuit {\n        get_zigzag_params()\n    } else {\n        None\n    };\n\n    let must_cache_params = if groth_params.is_some() {\n        println!(\"Using official parameters.\");\n        false\n    } else {\n        true\n    };\n\n    let proof = ZigZagCompound::prove(\n        &compound_public_params,\n        &public_inputs,\n        &private_inputs,\n        groth_params,\n    )?;\n\n    let mut buf = Vec::with_capacity(POREP_PROOF_BYTES);\n\n    proof.write(&mut buf)?;\n\n    let mut proof_bytes = [0; POREP_PROOF_BYTES];\n    proof_bytes.copy_from_slice(&buf);\n\n    if must_cache_params {\n        write_params_to_cache(\n            proof.groth_params.clone(),\n            &dummy_parameter_cache_path(sector_config, proof_sector_bytes),\n        )?;\n    }\n\n    let comm_r = commitment_from_fr::<Bls12>(public_tau.comm_r.into());\n    let comm_d = commitment_from_fr::<Bls12>(public_tau.comm_d.into());\n    let comm_r_star = commitment_from_fr::<Bls12>(tau.comm_r_star.into());\n\n    // Verification is cheap when parameters are cached,\n    // and it is never correct to return a proof which does not verify.\n    verify_seal(\n        sector_config,\n        comm_r,\n        comm_d,\n        comm_r_star,\n        prover_id_in,\n        sector_id_in,\n        &proof_bytes,\n    )\n    .expect(\"post-seal verification sanity check failed\");\n\n    Ok(SealOutput {\n        comm_r,\n        comm_r_star,\n        comm_d,\n        snark_proof: proof_bytes,\n    })\n}\n\nfn delay_seal(seconds: u32) {\n    let delay = time::Duration::from_secs(u64::from(seconds));\n    thread::sleep(delay);\n}\n\nfn delay_get_unsealed_range(base_seconds: u32) {\n    let delay = time::Duration::from_secs(u64::from(base_seconds / 2));\n    thread::sleep(delay);\n}\n\nfn perform_replication<T: AsRef<Path>>(\n    out_path: T,\n    public_params: &<ZigZagDrgPoRep<DefaultTreeHasher> as ProofScheme>::PublicParams,\n    replica_id: &<DefaultTreeHasher as Hasher>::Domain,\n    data: &mut [u8],\n    fake: bool,\n    proof_sector_bytes: usize,\n) -> error::Result<(\n    layered_drgporep::Tau<<DefaultTreeHasher as Hasher>::Domain>,\n    Vec<MerkleTree<<DefaultTreeHasher as Hasher>::Domain, <DefaultTreeHasher as Hasher>::Function>>,\n)> {\n    if fake {\n        // When faking replication, we write the original data to disk, before replication.\n        write_data(out_path, data)?;\n\n        assert!(\n            data.len() >= FAKE_SECTOR_BYTES,\n            \"data length ({}) is less than FAKE_SECTOR_BYTES ({}) when faking replication\",\n            data.len(),\n            FAKE_SECTOR_BYTES\n        );\n        let (tau, aux) = ZigZagDrgPoRep::replicate(\n            public_params,\n            &replica_id,\n            &mut data[0..proof_sector_bytes],\n            None,\n        )?;\n        Ok((tau, aux))\n    } else {\n        // When not faking replication, we write the replicated data to disk, after replication.\n        let (tau, aux) = ZigZagDrgPoRep::replicate(public_params, &replica_id, data, None)?;\n\n        write_data(out_path, data)?;\n        Ok((tau, aux))\n    }\n}\n\nfn write_data<T: AsRef<Path>>(out_path: T, data: &[u8]) -> error::Result<()> {\n    // Write replicated data to out_path.\n    let f_out = File::create(out_path)?;\n    let mut buf_writer = BufWriter::new(f_out);\n    buf_writer.write_all(&data)?;\n    Ok(())\n}\n\npub fn get_unsealed_range<T: Into<PathBuf> + AsRef<Path>>(\n    sector_config: &SectorConfig,\n    sealed_path: T,\n    output_path: T,\n    prover_id_in: &FrSafe,\n    sector_id_in: &FrSafe,\n    offset: u64,\n    num_bytes: u64,\n) -> error::Result<(u64)> {\n    let (fake, delay_seconds, sector_bytes, proof_sector_bytes, _uses_official_circuit) =\n        get_config(sector_config);\n    if let Some(delay) = delay_seconds {\n        delay_get_unsealed_range(delay);\n    }\n\n    let prover_id = pad_safe_fr(prover_id_in);\n    let sector_id = pad_safe_fr(sector_id_in);\n    let replica_id = replica_id::<DefaultTreeHasher>(prover_id, sector_id);\n\n    let f_in = File::open(sealed_path)?;\n    let mut data = Vec::new();\n    f_in.take(sector_bytes as u64).read_to_end(&mut data)?;\n\n    let f_out = File::create(output_path)?;\n    let mut buf_writer = BufWriter::new(f_out);\n\n    let unsealed = if fake {\n        data\n    } else {\n        ZigZagDrgPoRep::extract_all(&public_params(proof_sector_bytes), &replica_id, &data)?\n    };\n\n    let written = write_unpadded(\n        &unsealed,\n        &mut buf_writer,\n        offset as usize,\n        num_bytes as usize,\n    )?;\n\n    Ok(written as u64)\n}\n\npub fn verify_seal(\n    sector_config: &SectorConfig,\n    comm_r: Commitment,\n    comm_d: Commitment,\n    comm_r_star: Commitment,\n    prover_id_in: &FrSafe,\n    sector_id_in: &FrSafe,\n    proof_vec: &[u8],\n) -> error::Result<bool> {\n    let (_fake, _delay_seconds, _sector_bytes, proof_sector_bytes, uses_official_circuit) =\n        get_config(sector_config);\n\n    let challenge_count = CHALLENGE_COUNT;\n    let prover_id = pad_safe_fr(prover_id_in);\n    let sector_id = pad_safe_fr(sector_id_in);\n    let replica_id = replica_id::<DefaultTreeHasher>(prover_id, sector_id);\n\n    let comm_r = bytes_into_fr::<Bls12>(&comm_r)?;\n    let comm_d = bytes_into_fr::<Bls12>(&comm_d)?;\n    let comm_r_star = bytes_into_fr::<Bls12>(&comm_r_star)?;\n\n    let compound_setup_params = compound_proof::SetupParams {\n        // The proof might use a different number of bytes than we read and copied, if we are faking.\n        vanilla_params: &setup_params(proof_sector_bytes),\n        engine_params: &(*ENGINE_PARAMS),\n        partitions: Some(POREP_PARTITIONS),\n    };\n\n    let compound_public_params: compound_proof::PublicParams<\n        '_,\n        Bls12,\n        ZigZagDrgPoRep<'_, DefaultTreeHasher>,\n    > = ZigZagCompound::setup(&compound_setup_params)?;\n\n    let public_inputs = layered_drgporep::PublicInputs::<<DefaultTreeHasher as Hasher>::Domain> {\n        replica_id,\n        challenge_count,\n        tau: Some(Tau {\n            comm_r: comm_r.into(),\n            comm_d: comm_d.into(),\n        }),\n        comm_r_star: comm_r_star.into(),\n        k: None,\n    };\n\n    let groth_params = if uses_official_circuit {\n        match get_zigzag_params() {\n            Some(p) => p,\n            None => read_cached_params(&dummy_parameter_cache_path(\n                sector_config,\n                proof_sector_bytes,\n            ))?,\n        }\n    } else {\n        read_cached_params(&dummy_parameter_cache_path(\n            sector_config,\n            proof_sector_bytes,\n        ))?\n    };\n\n    let proof = MultiProof::new_from_reader(Some(POREP_PARTITIONS), proof_vec, groth_params)?;\n\n    ZigZagCompound::verify(&compound_public_params, &public_inputs, &proof).map_err(|e| e.into())\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use rand::{thread_rng, Rng};\n    use sector_base::api::disk_backed_storage::new_sector_store;\n    use sector_base::api::disk_backed_storage::ConfiguredStore;\n    use sector_base::api::sector_store::SectorStore;\n    use std::fs::create_dir_all;\n    use std::fs::File;\n    use std::io::Read;\n\n    struct Harness {\n        prover_id: FrSafe,\n        seal_output: SealOutput,\n        sealed_access: String,\n        sector_id: FrSafe,\n        store: Box<SectorStore>,\n        unseal_access: String,\n        written_contents: Vec<Vec<u8>>,\n    }\n\n    #[derive(Debug, Clone, Copy)]\n    enum BytesAmount<'a> {\n        Max,\n        Offset(u64),\n        Exact(&'a [u8]),\n    }\n\n    fn create_harness(cs: &ConfiguredStore, bytes_amts: &[BytesAmount]) -> Harness {\n        let store = create_sector_store(cs);\n        let mgr = store.manager();\n        let cfg = store.config();\n\n        let staged_access = mgr\n            .new_staging_sector_access()\n            .expect(\"could not create staging access\");\n\n        let sealed_access = mgr\n            .new_sealed_sector_access()\n            .expect(\"could not create sealed access\");\n\n        let unseal_access = mgr\n            .new_sealed_sector_access()\n            .expect(\"could not create unseal access\");\n\n        let prover_id = [2; 31];\n        let sector_id = [0; 31];\n\n        let mut written_contents: Vec<Vec<u8>> = Default::default();\n        for bytes_amt in bytes_amts {\n            let contents = match bytes_amt {\n                BytesAmount::Exact(bs) => bs.to_vec(),\n                BytesAmount::Max => {\n                    make_random_bytes(store.config().max_unsealed_bytes_per_sector())\n                }\n                BytesAmount::Offset(m) => {\n                    make_random_bytes(store.config().max_unsealed_bytes_per_sector() - m)\n                }\n            };\n\n            assert_eq!(\n                contents.len() as u64,\n                mgr.write_and_preprocess(&staged_access, &contents)\n                    .expect(\"failed to write and preprocess\")\n            );\n\n            written_contents.push(contents);\n        }\n\n        let seal_output = seal(cfg, &staged_access, &sealed_access, &prover_id, &sector_id)\n            .expect(\"failed to seal\");\n\n        let SealOutput {\n            comm_r,\n            comm_d,\n            comm_r_star,\n            snark_proof,\n        } = seal_output;\n\n        // valid commitments\n        {\n            let is_valid = verify_seal(\n                cfg,\n                comm_r,\n                comm_d,\n                comm_r_star,\n                &prover_id,\n                &sector_id,\n                &snark_proof,\n            )\n            .expect(\"failed to run verify_seal\");\n\n            assert!(\n                is_valid,\n                \"verification of valid proof failed for cs={:?}, bytes_amts={:?}\",\n                cs, bytes_amts\n            );\n        }\n\n        // unseal the whole thing\n        assert_eq!(\n            cfg.max_unsealed_bytes_per_sector(),\n            get_unsealed_range(\n                cfg,\n                &sealed_access,\n                &unseal_access,\n                &prover_id,\n                &sector_id,\n                0,\n                cfg.max_unsealed_bytes_per_sector(),\n            )\n            .expect(\"failed to unseal\")\n        );\n\n        Harness {\n            prover_id,\n            seal_output,\n            sealed_access,\n            sector_id,\n            store,\n            unseal_access,\n            written_contents,\n        }\n    }\n\n    fn create_sector_store(cs: &ConfiguredStore) -> Box<SectorStore> {\n        let staging_path = tempfile::tempdir().unwrap().path().to_owned();\n        let sealed_path = tempfile::tempdir().unwrap().path().to_owned();\n\n        create_dir_all(&staging_path).expect(\"failed to create staging dir\");\n        create_dir_all(&sealed_path).expect(\"failed to create sealed dir\");\n\n        Box::new(new_sector_store(\n            cs,\n            sealed_path.to_str().unwrap().to_owned(),\n            staging_path.to_str().unwrap().to_owned(),\n        ))\n    }\n\n    fn make_random_bytes(num_bytes_to_make: u64) -> Vec<u8> {\n        let mut rng = thread_rng();\n        (0..num_bytes_to_make).map(|_| rng.gen()).collect()\n    }\n\n    fn seal_verify_aux(cs: ConfiguredStore, bytes_amt: BytesAmount) {\n        let h = create_harness(&cs, &vec![bytes_amt]);\n\n        // invalid commitments\n        {\n            let is_valid = verify_seal(\n                h.store.config(),\n                h.seal_output.comm_d,\n                h.seal_output.comm_r_star,\n                h.seal_output.comm_r,\n                &h.prover_id,\n                &h.sector_id,\n                &h.seal_output.snark_proof,\n            )\n            .expect(\"failed to run verify_seal\");\n\n            // This should always fail, because we've rotated the commitments in\n            // the call. Note that comm_d is passed for comm_r and comm_r_star\n            // for comm_d.\n            assert!(!is_valid, \"proof should not be valid\");\n        }\n    }\n\n    fn seal_unsealed_roundtrip_aux(cs: ConfiguredStore, bytes_amt: BytesAmount) {\n        let h = create_harness(&cs, &vec![bytes_amt]);\n\n        let mut file = File::open(&h.unseal_access).unwrap();\n        let mut buf = Vec::new();\n        file.read_to_end(&mut buf).unwrap();\n\n        // test A\n        {\n            let read_unsealed_buf = h\n                .store\n                .manager()\n                .read_raw(&h.unseal_access, 0, buf.len() as u64)\n                .expect(\"failed to read_raw a\");\n\n            assert_eq!(\n                &buf, &read_unsealed_buf,\n                \"test A contents differed for cs={:?}, bytes_amt={:?}\",\n                cs, bytes_amt\n            );\n        }\n\n        // test B\n        {\n            let read_unsealed_buf = h\n                .store\n                .manager()\n                .read_raw(&h.unseal_access, 1, buf.len() as u64 - 2)\n                .expect(\"failed to read_raw a\");\n\n            assert_eq!(\n                &buf[1..buf.len() - 1],\n                &read_unsealed_buf[..],\n                \"test B contents differed for cs={:?}, bytes_amt={:?}\",\n                cs,\n                bytes_amt\n            );\n        }\n\n        let byte_padding_amount = match bytes_amt {\n            BytesAmount::Exact(bs) => {\n                h.store.config().max_unsealed_bytes_per_sector() - (bs.len() as u64)\n            }\n            BytesAmount::Max => 0,\n            BytesAmount::Offset(m) => m,\n        };\n\n        assert_eq!(\n            h.written_contents[0].len(),\n            buf.len() - (byte_padding_amount as usize),\n            \"length of original and unsealed contents differed for cs={:?}, bytes_amt={:?}\",\n            cs,\n            bytes_amt\n        );\n\n        assert_eq!(\n            h.written_contents[0][..],\n            buf[0..h.written_contents[0].len()],\n            \"original and unsealed contents differed for cs={:?}, bytes_amt={:?}\",\n            cs,\n            bytes_amt\n        );\n    }\n\n    fn seal_unsealed_range_roundtrip_aux(cs: ConfiguredStore, bytes_amt: BytesAmount) {\n        let h = create_harness(&cs, &vec![bytes_amt]);\n\n        let offset = 5;\n        let range_length = h.written_contents[0].len() as u64 - offset;\n\n        assert_eq!(\n            range_length,\n            get_unsealed_range(\n                h.store.config(),\n                &PathBuf::from(&h.sealed_access),\n                &PathBuf::from(&h.unseal_access),\n                &h.prover_id,\n                &h.sector_id,\n                offset,\n                range_length,\n            )\n            .expect(\"failed to unseal\")\n        );\n\n        let mut file = File::open(&h.unseal_access).unwrap();\n        let mut buf = Vec::new();\n        file.read_to_end(&mut buf).unwrap();\n\n        assert_eq!(\n            h.written_contents[0][(offset as usize)..],\n            buf[0..(range_length as usize)],\n            \"original and unsealed range contents differed for cs={:?}, bytes_amt={:?}\",\n            cs,\n            bytes_amt\n        );\n    }\n\n    fn write_and_preprocess_overwrites_unaligned_last_bytes_aux(cs: ConfiguredStore) {\n        // The minimal reproduction for the bug this regression test checks is to write\n        // 32 bytes, then 95 bytes.\n        // The bytes must sum to 127, since that is the required unsealed sector size.\n        // With suitable bytes (.e.g all 255), the bug always occurs when the first chunk is >= 32.\n        // It never occurs when the first chunk is < 32.\n        // The root problem was that write_and_preprocess was opening in append mode, so seeking backward\n        // to overwrite the last, incomplete byte, was not happening.\n        let contents_a = [255; 32];\n        let contents_b = [255; 95];\n\n        let h = create_harness(\n            &cs,\n            &vec![\n                BytesAmount::Exact(&contents_a),\n                BytesAmount::Exact(&contents_b),\n            ],\n        );\n\n        let unseal_access = h\n            .store\n            .manager()\n            .new_sealed_sector_access()\n            .expect(\"could not create unseal access\");\n\n        let _ = get_unsealed_range(\n            h.store.config(),\n            &h.sealed_access,\n            &unseal_access,\n            &h.prover_id,\n            &h.sector_id,\n            0,\n            (contents_a.len() + contents_b.len()) as u64,\n        )\n        .expect(\"failed to unseal\");\n\n        let mut file = File::open(&unseal_access).unwrap();\n        let mut buf_from_file = Vec::new();\n        file.read_to_end(&mut buf_from_file).unwrap();\n\n        assert_eq!(\n            contents_a.len() + contents_b.len(),\n            buf_from_file.len(),\n            \"length of original and unsealed contents differed for {:?}\",\n            cs\n        );\n\n        assert_eq!(\n            contents_a[..],\n            buf_from_file[0..contents_a.len()],\n            \"original and unsealed contents differed for {:?}\",\n            cs\n        );\n\n        assert_eq!(\n            contents_b[..],\n            buf_from_file[contents_a.len()..contents_a.len() + contents_b.len()],\n            \"original and unsealed contents differed for {:?}\",\n            cs\n        );\n    }\n\n    /*\n\n    TODO: create a way to run these super-slow-by-design tests manually.\n\n    fn seal_verify_live() {\n        seal_verify_aux(ConfiguredStore::Live, 0);\n        seal_verify_aux(ConfiguredStore::Live, 5);\n    }\n\n    fn seal_unsealed_roundtrip_live() {\n        seal_unsealed_roundtrip_aux(ConfiguredStore::Live, 0);\n        seal_unsealed_roundtrip_aux(ConfiguredStore::Live, 5);\n    }\n\n    fn seal_unsealed_range_roundtrip_live() {\n        seal_unsealed_range_roundtrip_aux(ConfiguredStore::Live, 0);\n        seal_unsealed_range_roundtrip_aux(ConfiguredStore::Live, 5);\n    }\n\n    */\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn seal_verify_test() {\n        seal_verify_aux(ConfiguredStore::Test, BytesAmount::Max);\n        seal_verify_aux(ConfiguredStore::Test, BytesAmount::Offset(5));\n    }\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn seal_verify_proof_test() {\n        seal_verify_aux(ConfiguredStore::ProofTest, BytesAmount::Max);\n        seal_verify_aux(ConfiguredStore::ProofTest, BytesAmount::Offset(5));\n    }\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn seal_unsealed_roundtrip_test() {\n        seal_unsealed_roundtrip_aux(ConfiguredStore::Test, BytesAmount::Max);\n        seal_unsealed_roundtrip_aux(ConfiguredStore::Test, BytesAmount::Offset(5));\n    }\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn seal_unsealed_roundtrip_proof_test() {\n        seal_unsealed_roundtrip_aux(ConfiguredStore::ProofTest, BytesAmount::Max);\n        seal_unsealed_roundtrip_aux(ConfiguredStore::ProofTest, BytesAmount::Offset(5));\n    }\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn seal_unsealed_range_roundtrip_test() {\n        seal_unsealed_range_roundtrip_aux(ConfiguredStore::Test, BytesAmount::Max);\n        seal_unsealed_range_roundtrip_aux(ConfiguredStore::Test, BytesAmount::Offset(5));\n    }\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn seal_unsealed_range_roundtrip_proof_test() {\n        seal_unsealed_range_roundtrip_aux(ConfiguredStore::ProofTest, BytesAmount::Max);\n        seal_unsealed_range_roundtrip_aux(ConfiguredStore::ProofTest, BytesAmount::Offset(5));\n    }\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn write_and_preprocess_overwrites_unaligned_last_bytes() {\n        write_and_preprocess_overwrites_unaligned_last_bytes_aux(ConfiguredStore::ProofTest);\n    }\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn concurrent_seal_unsealed_range_roundtrip_proof_test() {\n        let threads = 5;\n\n        let spawned = (0..threads)\n            .map(|_| {\n                thread::spawn(|| {\n                    seal_unsealed_range_roundtrip_aux(ConfiguredStore::ProofTest, BytesAmount::Max)\n                })\n            })\n            .collect::<Vec<_>>();\n\n        for thread in spawned {\n            thread.join().expect(\"test thread panicked\");\n        }\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/mod.rs",
    "content": "use std::fs::{self, File, OpenOptions};\nuse std::io::{self, BufReader, BufWriter, Read, Write};\nuse std::path::{Path, PathBuf};\n\nuse anyhow::{ensure, Context, Result};\nuse bincode::deserialize;\nuse filecoin_hashers::Hasher;\nuse fr32::{write_unpadded, Fr32Reader};\nuse log::{info, trace};\nuse memmap::MmapOptions;\nuse merkletree::store::{DiskStore, LevelCacheStore, StoreConfig};\nuse storage_proofs_core::{\n    cache_key::CacheKey,\n    measurements::{measure_op, Operation},\n    merkle::get_base_tree_count,\n    pieces::generate_piece_commitment_bytes_from_source,\n    sector::SectorId,\n    util::default_rows_to_discard,\n};\nuse storage_proofs_porep::{\n    stacked::{generate_replica_id, PersistentAux, StackedDrg, TemporaryAux},\n    PoRep,\n};\nuse typenum::Unsigned;\n\nuse crate::{\n    commitment_reader::CommitmentReader,\n    constants::{\n        DefaultBinaryTree, DefaultOctTree, DefaultPieceDomain, DefaultPieceHasher,\n        MINIMUM_RESERVED_BYTES_FOR_PIECE_IN_FULLY_ALIGNED_SECTOR as MINIMUM_PIECE_SIZE,\n    },\n    parameters::public_params,\n    pieces::{get_piece_alignment, sum_piece_bytes_with_alignment},\n    types::{\n        Commitment, MerkleTreeTrait, PaddedBytesAmount, PieceInfo, PoRepConfig,\n        PoRepProofPartitions, ProverId, SealPreCommitPhase1Output, Ticket, UnpaddedByteIndex,\n        UnpaddedBytesAmount,\n    },\n};\n\nmod fake_seal;\nmod post_util;\nmod seal;\nmod util;\nmod window_post;\nmod winning_post;\n\npub use fake_seal::*;\npub use post_util::*;\npub use seal::*;\npub use util::*;\npub use window_post::*;\npub use winning_post::*;\n\n/// Unseals the sector at `sealed_path` and returns the bytes for a piece\n/// whose first (unpadded) byte begins at `offset` and ends at `offset` plus\n/// `num_bytes`, inclusive. Note that the entire sector is unsealed each time\n/// this function is called.\n///\n/// # Arguments\n///\n/// * `porep_config` - porep configuration containing the sector size.\n/// * `cache_path` - path to the directory in which the sector data's Merkle Tree is written.\n/// * `sealed_path` - path to the sealed sector file that we will unseal and read a byte range.\n/// * `output_path` - path to a file that we will write the requested byte range to.\n/// * `prover_id` - the prover-id that sealed the sector.\n/// * `sector_id` - the sector-id of the sealed sector.\n/// * `comm_d` - the commitment to the sector's data.\n/// * `ticket` - the ticket that was used to generate the sector's replica-id.\n/// * `offset` - the byte index in the unsealed sector of the first byte that we want to read.\n/// * `num_bytes` - the number of bytes that we want to read.\n#[allow(clippy::too_many_arguments)]\npub fn get_unsealed_range<T: Into<PathBuf> + AsRef<Path>, Tree: 'static + MerkleTreeTrait>(\n    porep_config: PoRepConfig,\n    cache_path: T,\n    sealed_path: T,\n    output_path: T,\n    prover_id: ProverId,\n    sector_id: SectorId,\n    comm_d: Commitment,\n    ticket: Ticket,\n    offset: UnpaddedByteIndex,\n    num_bytes: UnpaddedBytesAmount,\n) -> Result<UnpaddedBytesAmount> {\n    info!(\"get_unsealed_range:start\");\n\n    let f_out = File::create(&output_path)\n        .with_context(|| format!(\"could not create output_path={:?}\", output_path.as_ref()))?;\n\n    let buf_f_out = BufWriter::new(f_out);\n\n    let result = unseal_range_mapped::<_, _, Tree>(\n        porep_config,\n        cache_path,\n        sealed_path.into(),\n        buf_f_out,\n        prover_id,\n        sector_id,\n        comm_d,\n        ticket,\n        offset,\n        num_bytes,\n    );\n\n    info!(\"get_unsealed_range:finish\");\n    result\n}\n\n/// Unseals the sector read from `sealed_sector` and returns the bytes for a\n/// piece whose first (unpadded) byte begins at `offset` and ends at `offset`\n/// plus `num_bytes`, inclusive. Note that the entire sector is unsealed each\n/// time this function is called.\n///\n/// # Arguments\n///\n/// * `porep_config` - porep configuration containing the sector size.\n/// * `cache_path` - path to the directory in which the sector data's Merkle Tree is written.\n/// * `sealed_sector` - a byte source from which we read sealed sector data.\n/// * `unsealed_output` - a byte sink to which we write unsealed, un-bit-padded sector bytes.\n/// * `prover_id` - the prover-id that sealed the sector.\n/// * `sector_id` - the sector-id of the sealed sector.\n/// * `comm_d` - the commitment to the sector's data.\n/// * `ticket` - the ticket that was used to generate the sector's replica-id.\n/// * `offset` - the byte index in the unsealed sector of the first byte that we want to read.\n/// * `num_bytes` - the number of bytes that we want to read.\n#[allow(clippy::too_many_arguments)]\npub fn unseal_range<P, R, W, Tree>(\n    porep_config: PoRepConfig,\n    cache_path: P,\n    mut sealed_sector: R,\n    unsealed_output: W,\n    prover_id: ProverId,\n    sector_id: SectorId,\n    comm_d: Commitment,\n    ticket: Ticket,\n    offset: UnpaddedByteIndex,\n    num_bytes: UnpaddedBytesAmount,\n) -> Result<UnpaddedBytesAmount>\nwhere\n    P: Into<PathBuf> + AsRef<Path>,\n    R: Read,\n    W: Write,\n    Tree: 'static + MerkleTreeTrait,\n{\n    info!(\"unseal_range:start\");\n    ensure!(comm_d != [0; 32], \"Invalid all zero commitment (comm_d)\");\n\n    let comm_d =\n        as_safe_commitment::<<DefaultPieceHasher as Hasher>::Domain, _>(&comm_d, \"comm_d\")?;\n\n    let replica_id = generate_replica_id::<Tree::Hasher, _>(\n        &prover_id,\n        sector_id.into(),\n        &ticket,\n        comm_d,\n        &porep_config.porep_id,\n    );\n\n    let mut data = Vec::new();\n    sealed_sector.read_to_end(&mut data)?;\n\n    let res = unseal_range_inner::<_, _, Tree>(\n        porep_config,\n        cache_path,\n        &mut data,\n        unsealed_output,\n        replica_id,\n        offset,\n        num_bytes,\n    )?;\n\n    info!(\"unseal_range:finish\");\n\n    Ok(res)\n}\n\n/// Unseals the sector read from `sealed_sector` and returns the bytes for a\n/// piece whose first (unpadded) byte begins at `offset` and ends at `offset`\n/// plus `num_bytes`, inclusive. Note that the entire sector is unsealed each\n/// time this function is called.\n///\n/// # Arguments\n///\n/// * `porep_config` - porep configuration containing the sector size.\n/// * `cache_path` - path to the directory in which the sector data's Merkle Tree is written.\n/// * `sealed_sector` - a byte source from which we read sealed sector data.\n/// * `unsealed_output` - a byte sink to which we write unsealed, un-bit-padded sector bytes.\n/// * `prover_id` - the prover-id that sealed the sector.\n/// * `sector_id` - the sector-id of the sealed sector.\n/// * `comm_d` - the commitment to the sector's data.\n/// * `ticket` - the ticket that was used to generate the sector's replica-id.\n/// * `offset` - the byte index in the unsealed sector of the first byte that we want to read.\n/// * `num_bytes` - the number of bytes that we want to read.\n#[allow(clippy::too_many_arguments)]\npub fn unseal_range_mapped<P, W, Tree>(\n    porep_config: PoRepConfig,\n    cache_path: P,\n    sealed_path: PathBuf,\n    unsealed_output: W,\n    prover_id: ProverId,\n    sector_id: SectorId,\n    comm_d: Commitment,\n    ticket: Ticket,\n    offset: UnpaddedByteIndex,\n    num_bytes: UnpaddedBytesAmount,\n) -> Result<UnpaddedBytesAmount>\nwhere\n    P: Into<PathBuf> + AsRef<Path>,\n    W: Write,\n    Tree: 'static + MerkleTreeTrait,\n{\n    info!(\"unseal_range_mapped:start\");\n    ensure!(comm_d != [0; 32], \"Invalid all zero commitment (comm_d)\");\n\n    let comm_d =\n        as_safe_commitment::<<DefaultPieceHasher as Hasher>::Domain, _>(&comm_d, \"comm_d\")?;\n\n    let replica_id = generate_replica_id::<Tree::Hasher, _>(\n        &prover_id,\n        sector_id.into(),\n        &ticket,\n        comm_d,\n        &porep_config.porep_id,\n    );\n\n    let mapped_file = OpenOptions::new()\n        .read(true)\n        .write(true)\n        .open(&sealed_path)?;\n    let mut data = unsafe { MmapOptions::new().map_copy(&mapped_file)? };\n\n    let result = unseal_range_inner::<_, _, Tree>(\n        porep_config,\n        cache_path,\n        &mut data,\n        unsealed_output,\n        replica_id,\n        offset,\n        num_bytes,\n    );\n    info!(\"unseal_range_mapped:finish\");\n\n    result\n}\n\n/// Unseals the sector read from `sealed_sector` and returns the bytes for a\n/// piece whose first (unpadded) byte begins at `offset` and ends at `offset`\n/// plus `num_bytes`, inclusive. Note that the entire sector is unsealed each\n/// time this function is called.\n///\n/// # Arguments\n///\n/// * `porep_config` - porep configuration containing the sector size.\n/// * `cache_path` - path to the directory in which the sector data's Merkle Tree is written.\n/// * `sealed_sector` - a byte source from which we read sealed sector data.\n/// * `unsealed_output` - a byte sink to which we write unsealed, un-bit-padded sector bytes.\n/// * `prover_id` - the prover-id that sealed the sector.\n/// * `sector_id` - the sector-id of the sealed sector.\n/// * `comm_d` - the commitment to the sector's data.\n/// * `ticket` - the ticket that was used to generate the sector's replica-id.\n/// * `offset` - the byte index in the unsealed sector of the first byte that we want to read.\n/// * `num_bytes` - the number of bytes that we want to read.\n#[allow(clippy::too_many_arguments)]\nfn unseal_range_inner<P, W, Tree>(\n    porep_config: PoRepConfig,\n    cache_path: P,\n    data: &mut [u8],\n    mut unsealed_output: W,\n    replica_id: <Tree::Hasher as Hasher>::Domain,\n    offset: UnpaddedByteIndex,\n    num_bytes: UnpaddedBytesAmount,\n) -> Result<UnpaddedBytesAmount>\nwhere\n    P: Into<PathBuf> + AsRef<Path>,\n    W: Write,\n    Tree: 'static + MerkleTreeTrait,\n{\n    info!(\"unseal_range_inner:start\");\n\n    let base_tree_size = get_base_tree_size::<DefaultBinaryTree>(porep_config.sector_size)?;\n    let base_tree_leafs = get_base_tree_leafs::<DefaultBinaryTree>(base_tree_size)?;\n    let config = StoreConfig::new(\n        cache_path.as_ref(),\n        CacheKey::CommDTree.to_string(),\n        default_rows_to_discard(\n            base_tree_leafs,\n            <DefaultBinaryTree as MerkleTreeTrait>::Arity::to_usize(),\n        ),\n    );\n    let pp = public_params(\n        PaddedBytesAmount::from(porep_config),\n        usize::from(PoRepProofPartitions::from(porep_config)),\n        porep_config.porep_id,\n        porep_config.api_version,\n    )?;\n\n    let offset_padded: PaddedBytesAmount = UnpaddedBytesAmount::from(offset).into();\n    let num_bytes_padded: PaddedBytesAmount = num_bytes.into();\n\n    StackedDrg::<Tree, DefaultPieceHasher>::extract_all(&pp, &replica_id, data, Some(config))?;\n    let start: usize = offset_padded.into();\n    let end = start + usize::from(num_bytes_padded);\n    let unsealed = &data[start..end];\n\n    // If the call to `extract_range` was successful, the `unsealed` vector must\n    // have a length which equals `num_bytes_padded`. The byte at its 0-index\n    // byte will be the the byte at index `offset_padded` in the sealed sector.\n    let written = write_unpadded(unsealed, &mut unsealed_output, 0, num_bytes.into())\n        .context(\"write_unpadded failed\")?;\n\n    let amount = UnpaddedBytesAmount(written as u64);\n\n    info!(\"unseal_range_inner:finish\");\n    Ok(amount)\n}\n\n/// Generates a piece commitment for the provided byte source. Returns an error\n/// if the byte source produced more than `piece_size` bytes.\n///\n/// # Arguments\n///\n/// * `source` - a readable source of unprocessed piece bytes. The piece's commitment will be\n/// generated for the bytes read from the source plus any added padding.\n/// * `piece_size` - the number of unpadded user-bytes which can be read from source before EOF.\npub fn generate_piece_commitment<T: Read>(\n    source: T,\n    piece_size: UnpaddedBytesAmount,\n) -> Result<PieceInfo> {\n    trace!(\"generate_piece_commitment:start\");\n\n    let result = measure_op(Operation::GeneratePieceCommitment, || {\n        ensure_piece_size(piece_size)?;\n\n        // send the source through the preprocessor\n        let source = BufReader::new(source);\n        let mut fr32_reader = Fr32Reader::new(source);\n\n        let commitment = generate_piece_commitment_bytes_from_source::<DefaultPieceHasher>(\n            &mut fr32_reader,\n            PaddedBytesAmount::from(piece_size).into(),\n        )?;\n\n        PieceInfo::new(commitment, piece_size)\n    });\n\n    trace!(\"generate_piece_commitment:finish\");\n    result\n}\n\n/// Computes a NUL-byte prefix and/or suffix for `source` using the provided\n/// `piece_lengths` and `piece_size` (such that the `source`, after\n/// preprocessing, will occupy a subtree of a merkle tree built using the bytes\n/// from `target`), runs the resultant byte stream through the preprocessor,\n/// and writes the result to `target`. Returns a tuple containing the number of\n/// bytes written to `target` (`source` plus alignment) and the commitment.\n///\n/// WARNING: Depending on the ordering and size of the pieces in\n/// `piece_lengths`, this function could write a prefix of NUL bytes which\n/// wastes ($SIZESECTORSIZE/2)-$MINIMUM_PIECE_SIZE space. This function will be\n/// deprecated in favor of `write_and_preprocess`, and miners will be prevented\n/// from sealing sectors containing more than $TOOMUCH alignment bytes.\n///\n/// # Arguments\n///\n/// * `source` - a readable source of unprocessed piece bytes.\n/// * `target` - a writer where we will write the processed piece bytes.\n/// * `piece_size` - the number of unpadded user-bytes which can be read from source before EOF.\n/// * `piece_lengths` - the number of bytes for each previous piece in the sector.\npub fn add_piece<R, W>(\n    source: R,\n    target: W,\n    piece_size: UnpaddedBytesAmount,\n    piece_lengths: &[UnpaddedBytesAmount],\n) -> Result<(PieceInfo, UnpaddedBytesAmount)>\nwhere\n    R: Read,\n    W: Write,\n{\n    info!(\"add_piece:start\");\n\n    let result = measure_op(Operation::AddPiece, || {\n        ensure_piece_size(piece_size)?;\n\n        let source = BufReader::new(source);\n        let mut target = BufWriter::new(target);\n\n        let written_bytes = sum_piece_bytes_with_alignment(&piece_lengths);\n        let piece_alignment = get_piece_alignment(written_bytes, piece_size);\n        let fr32_reader = Fr32Reader::new(source);\n\n        // write left alignment\n        for _ in 0..usize::from(PaddedBytesAmount::from(piece_alignment.left_bytes)) {\n            target.write_all(&[0u8][..])?;\n        }\n\n        let mut commitment_reader = CommitmentReader::new(fr32_reader);\n        let n = io::copy(&mut commitment_reader, &mut target)\n            .context(\"failed to write and preprocess bytes\")?;\n\n        ensure!(n != 0, \"add_piece: read 0 bytes before EOF from source\");\n        let n = PaddedBytesAmount(n as u64);\n        let n: UnpaddedBytesAmount = n.into();\n\n        ensure!(n == piece_size, \"add_piece: invalid bytes amount written\");\n\n        // write right alignment\n        for _ in 0..usize::from(PaddedBytesAmount::from(piece_alignment.right_bytes)) {\n            target.write_all(&[0u8][..])?;\n        }\n\n        let commitment = commitment_reader.finish()?;\n        let mut comm = [0u8; 32];\n        comm.copy_from_slice(commitment.as_ref());\n\n        let written = piece_alignment.left_bytes + piece_alignment.right_bytes + piece_size;\n\n        Ok((PieceInfo::new(comm, n)?, written))\n    });\n\n    info!(\"add_piece:finish\");\n    result\n}\n\nfn ensure_piece_size(piece_size: UnpaddedBytesAmount) -> Result<()> {\n    ensure!(\n        piece_size >= UnpaddedBytesAmount(MINIMUM_PIECE_SIZE),\n        \"Piece must be at least {} bytes\",\n        MINIMUM_PIECE_SIZE\n    );\n\n    let padded_piece_size: PaddedBytesAmount = piece_size.into();\n    ensure!(\n        u64::from(padded_piece_size).is_power_of_two(),\n        \"Bit-padded piece size must be a power of 2 ({:?})\",\n        padded_piece_size,\n    );\n\n    Ok(())\n}\n\n/// Writes bytes from `source` to `target`, adding bit-padding (\"preprocessing\")\n/// as needed. Returns a tuple containing the number of bytes written to\n/// `target` and the commitment.\n///\n/// WARNING: This function neither prepends nor appends alignment bytes to the\n/// `target`; it is the caller's responsibility to ensure properly sized\n/// and ordered writes to `target` such that `source`-bytes occupy whole\n/// subtrees of the final merkle tree built over `target`.\n///\n/// # Arguments\n///\n/// * `source` - a readable source of unprocessed piece bytes.\n/// * `target` - a writer where we will write the processed piece bytes.\n/// * `piece_size` - the number of unpadded user-bytes which can be read from source before EOF.\npub fn write_and_preprocess<R, W>(\n    source: R,\n    target: W,\n    piece_size: UnpaddedBytesAmount,\n) -> Result<(PieceInfo, UnpaddedBytesAmount)>\nwhere\n    R: Read,\n    W: Write,\n{\n    add_piece(source, target, piece_size, Default::default())\n}\n\n// Verifies if a DiskStore specified by a config (or set of 'required_configs' is consistent).\nfn verify_store(config: &StoreConfig, arity: usize, required_configs: usize) -> Result<()> {\n    let store_path = StoreConfig::data_path(&config.path, &config.id);\n    if !Path::new(&store_path).exists() {\n        // Configs may have split due to sector size, so we need to\n        // check deterministic paths from here.\n        let orig_path = store_path\n            .clone()\n            .into_os_string()\n            .into_string()\n            .expect(\"failed to convert store_path to string\");\n        let mut configs: Vec<StoreConfig> = Vec::with_capacity(required_configs);\n        for i in 0..required_configs {\n            let cur_path = orig_path\n                .clone()\n                .replace(\".dat\", format!(\"-{}.dat\", i).as_str());\n\n            if Path::new(&cur_path).exists() {\n                let path_str = cur_path.as_str();\n                let tree_names = vec![\"tree-d\", \"tree-c\", \"tree-r-last\"];\n                for name in tree_names {\n                    if path_str.contains(name) {\n                        configs.push(StoreConfig::from_config(\n                            config,\n                            format!(\"{}-{}\", name, i),\n                            None,\n                        ));\n                        break;\n                    }\n                }\n            }\n        }\n\n        ensure!(\n            configs.len() == required_configs,\n            \"Missing store file (or associated split paths): {}\",\n            store_path.display()\n        );\n\n        let store_len = config.size.expect(\"disk store size not configured\");\n        for config in &configs {\n            ensure!(\n                DiskStore::<DefaultPieceDomain>::is_consistent(store_len, arity, &config,)?,\n                \"Store is inconsistent: {:?}\",\n                StoreConfig::data_path(&config.path, &config.id)\n            );\n        }\n    } else {\n        ensure!(\n            DiskStore::<DefaultPieceDomain>::is_consistent(\n                config.size.expect(\"disk store size not configured\"),\n                arity,\n                &config,\n            )?,\n            \"Store is inconsistent: {:?}\",\n            store_path\n        );\n    }\n\n    Ok(())\n}\n\n// Verifies if a LevelCacheStore specified by a config is consistent.\nfn verify_level_cache_store<Tree: MerkleTreeTrait>(config: &StoreConfig) -> Result<()> {\n    let store_path = StoreConfig::data_path(&config.path, &config.id);\n    if !Path::new(&store_path).exists() {\n        let required_configs = get_base_tree_count::<Tree>();\n\n        // Configs may have split due to sector size, so we need to\n        // check deterministic paths from here.\n        let orig_path = store_path\n            .clone()\n            .into_os_string()\n            .into_string()\n            .expect(\"failed to convert store_path to string\");\n        let mut configs: Vec<StoreConfig> = Vec::with_capacity(required_configs);\n        for i in 0..required_configs {\n            let cur_path = orig_path\n                .clone()\n                .replace(\".dat\", format!(\"-{}.dat\", i).as_str());\n\n            if Path::new(&cur_path).exists() {\n                let path_str = cur_path.as_str();\n                let tree_names = vec![\"tree-d\", \"tree-c\", \"tree-r-last\"];\n                for name in tree_names {\n                    if path_str.contains(name) {\n                        configs.push(StoreConfig::from_config(\n                            config,\n                            format!(\"{}-{}\", name, i),\n                            None,\n                        ));\n                        break;\n                    }\n                }\n            }\n        }\n\n        ensure!(\n            configs.len() == required_configs,\n            \"Missing store file (or associated split paths): {}\",\n            store_path.display()\n        );\n\n        let store_len = config.size.expect(\"disk store size not configured\");\n        for config in &configs {\n            ensure!(\n                LevelCacheStore::<DefaultPieceDomain, File>::is_consistent(\n                    store_len,\n                    Tree::Arity::to_usize(),\n                    &config,\n                )?,\n                \"Store is inconsistent: {:?}\",\n                StoreConfig::data_path(&config.path, &config.id)\n            );\n        }\n    } else {\n        ensure!(\n            LevelCacheStore::<DefaultPieceDomain, File>::is_consistent(\n                config.size.expect(\"disk store size not configured\"),\n                Tree::Arity::to_usize(),\n                &config,\n            )?,\n            \"Store is inconsistent: {:?}\",\n            store_path\n        );\n    }\n\n    Ok(())\n}\n\n// Checks for the existence of the tree d store, the replica, and all generated labels.\npub fn validate_cache_for_precommit_phase2<R, T, Tree: MerkleTreeTrait>(\n    cache_path: R,\n    replica_path: T,\n    seal_precommit_phase1_output: &SealPreCommitPhase1Output<Tree>,\n) -> Result<()>\nwhere\n    R: AsRef<Path>,\n    T: AsRef<Path>,\n{\n    info!(\"validate_cache_for_precommit_phase2:start\");\n\n    ensure!(\n        replica_path.as_ref().exists(),\n        \"Missing replica: {}\",\n        replica_path.as_ref().to_path_buf().display()\n    );\n\n    // Verify all stores/labels within the Labels object, but\n    // respecting the current cache_path.\n    let cache = cache_path.as_ref().to_path_buf();\n    seal_precommit_phase1_output\n        .labels\n        .verify_stores(verify_store, &cache)?;\n\n    // Update the previous phase store path to the current cache_path.\n    let mut config = StoreConfig::from_config(\n        &seal_precommit_phase1_output.config,\n        &seal_precommit_phase1_output.config.id,\n        seal_precommit_phase1_output.config.size,\n    );\n    config.path = cache_path.as_ref().into();\n\n    let result = verify_store(\n        &config,\n        <DefaultBinaryTree as MerkleTreeTrait>::Arity::to_usize(),\n        get_base_tree_count::<Tree>(),\n    );\n\n    info!(\"validate_cache_for_precommit_phase2:finish\");\n    result\n}\n\n// Checks for the existence of the replica data and t_aux, which in\n// turn allows us to verify the tree d, tree r, tree c, and the\n// labels.\npub fn validate_cache_for_commit<R, T, Tree: MerkleTreeTrait>(\n    cache_path: R,\n    replica_path: T,\n) -> Result<()>\nwhere\n    R: AsRef<Path>,\n    T: AsRef<Path>,\n{\n    info!(\"validate_cache_for_precommit:start\");\n\n    // Verify that the replica exists and is not empty.\n    ensure!(\n        replica_path.as_ref().exists(),\n        \"Missing replica: {}\",\n        replica_path.as_ref().to_path_buf().display()\n    );\n\n    let metadata = File::open(&replica_path)?.metadata()?;\n    ensure!(\n        metadata.len() > 0,\n        \"Replica {} exists, but is empty!\",\n        replica_path.as_ref().to_path_buf().display()\n    );\n\n    let cache = &cache_path.as_ref();\n\n    // Make sure p_aux exists and is valid.\n    let p_aux_path = cache.join(CacheKey::PAux.to_string());\n    let p_aux_bytes = fs::read(&p_aux_path)\n        .with_context(|| format!(\"could not read file p_aux={:?}\", p_aux_path))?;\n\n    let _: PersistentAux<<Tree::Hasher as Hasher>::Domain> = deserialize(&p_aux_bytes)?;\n    drop(p_aux_bytes);\n\n    // Make sure t_aux exists and is valid.\n    let t_aux = {\n        let t_aux_path = cache.join(CacheKey::TAux.to_string());\n        let t_aux_bytes = fs::read(&t_aux_path)\n            .with_context(|| format!(\"could not read file t_aux={:?}\", t_aux_path))?;\n\n        let mut res: TemporaryAux<Tree, DefaultPieceHasher> = deserialize(&t_aux_bytes)?;\n\n        // Switch t_aux to the passed in cache_path\n        res.set_cache_path(&cache_path);\n        res\n    };\n\n    // Verify all stores/labels within the Labels object.\n    let cache = cache_path.as_ref().to_path_buf();\n    t_aux.labels.verify_stores(verify_store, &cache)?;\n\n    // Verify each tree disk store.\n    verify_store(\n        &t_aux.tree_d_config,\n        <DefaultBinaryTree as MerkleTreeTrait>::Arity::to_usize(),\n        get_base_tree_count::<Tree>(),\n    )?;\n    verify_store(\n        &t_aux.tree_c_config,\n        <DefaultOctTree as MerkleTreeTrait>::Arity::to_usize(),\n        get_base_tree_count::<Tree>(),\n    )?;\n    verify_level_cache_store::<DefaultOctTree>(&t_aux.tree_r_last_config)?;\n\n    info!(\"validate_cache_for_precommit:finish\");\n    Ok(())\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/post.rs",
    "content": "use std::collections::BTreeMap;\nuse std::hash::{Hash, Hasher as StdHasher};\nuse std::marker::PhantomData;\nuse std::path::{Path, PathBuf};\n\nuse anyhow::{ensure, Context, Result};\nuse bincode::deserialize;\nuse generic_array::typenum::Unsigned;\nuse log::{info, trace};\nuse merkletree::store::StoreConfig;\nuse storage_proofs::cache_key::CacheKey;\nuse storage_proofs::compound_proof::{self, CompoundProof};\nuse storage_proofs::hasher::{Domain, Hasher};\nuse storage_proofs::merkle::{\n    create_tree, get_base_tree_count, split_config_and_replica, MerkleTreeTrait, MerkleTreeWrapper,\n};\nuse storage_proofs::multi_proof::MultiProof;\nuse storage_proofs::post::fallback;\nuse storage_proofs::sector::*;\nuse storage_proofs::util::default_rows_to_discard;\n\nuse crate::api::util::{as_safe_commitment, get_base_tree_leafs, get_base_tree_size};\nuse crate::caches::{get_post_params, get_post_verifying_key};\nuse crate::constants::*;\nuse crate::parameters::{window_post_setup_params, winning_post_setup_params};\nuse crate::types::{\n    ChallengeSeed, Commitment, PersistentAux, PoStConfig, ProverId, SectorSize, TemporaryAux,\n};\nuse crate::PoStType;\n\n/// The minimal information required about a replica, in order to be able to generate\n/// a PoSt over it.\n#[derive(Debug)]\npub struct PrivateReplicaInfo<Tree: MerkleTreeTrait> {\n    /// Path to the replica.\n    replica: PathBuf,\n    /// The replica commitment.\n    comm_r: Commitment,\n    /// Persistent Aux.\n    aux: PersistentAux<<Tree::Hasher as Hasher>::Domain>,\n    /// Contains sector-specific (e.g. merkle trees) assets\n    cache_dir: PathBuf,\n\n    _t: PhantomData<Tree>,\n}\n\nimpl<Tree: MerkleTreeTrait> Clone for PrivateReplicaInfo<Tree> {\n    fn clone(&self) -> Self {\n        Self {\n            replica: self.replica.clone(),\n            comm_r: self.comm_r,\n            aux: self.aux.clone(),\n            cache_dir: self.cache_dir.clone(),\n            _t: Default::default(),\n        }\n    }\n}\n\nimpl<Tree: MerkleTreeTrait> std::cmp::PartialEq for PrivateReplicaInfo<Tree> {\n    fn eq(&self, other: &Self) -> bool {\n        self.replica == other.replica\n            && self.comm_r == other.comm_r\n            && self.aux == other.aux\n            && self.cache_dir == other.cache_dir\n    }\n}\n\nimpl<Tree: MerkleTreeTrait> Hash for PrivateReplicaInfo<Tree> {\n    fn hash<H: StdHasher>(&self, state: &mut H) {\n        self.replica.hash(state);\n        self.comm_r.hash(state);\n        self.aux.hash(state);\n        self.cache_dir.hash(state);\n    }\n}\n\nimpl<Tree: MerkleTreeTrait> std::cmp::Eq for PrivateReplicaInfo<Tree> {}\n\nimpl<Tree: MerkleTreeTrait> std::cmp::Ord for PrivateReplicaInfo<Tree> {\n    fn cmp(&self, other: &Self) -> std::cmp::Ordering {\n        self.comm_r.as_ref().cmp(other.comm_r.as_ref())\n    }\n}\n\nimpl<Tree: MerkleTreeTrait> std::cmp::PartialOrd for PrivateReplicaInfo<Tree> {\n    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {\n        self.comm_r.as_ref().partial_cmp(other.comm_r.as_ref())\n    }\n}\n\nimpl<Tree: 'static + MerkleTreeTrait> PrivateReplicaInfo<Tree> {\n    pub fn new(replica: PathBuf, comm_r: Commitment, cache_dir: PathBuf) -> Result<Self> {\n        ensure!(comm_r != [0; 32], \"Invalid all zero commitment (comm_r)\");\n\n        let aux = {\n            let f_aux_path = cache_dir.join(CacheKey::PAux.to_string());\n            let aux_bytes = std::fs::read(&f_aux_path)\n                .with_context(|| format!(\"could not read from path={:?}\", f_aux_path))?;\n\n            deserialize(&aux_bytes)\n        }?;\n\n        ensure!(replica.exists(), \"Sealed replica does not exist\");\n\n        Ok(PrivateReplicaInfo {\n            replica,\n            comm_r,\n            aux,\n            cache_dir,\n            _t: Default::default(),\n        })\n    }\n\n    pub fn cache_dir_path(&self) -> &Path {\n        self.cache_dir.as_path()\n    }\n\n    pub fn replica_path(&self) -> &Path {\n        self.replica.as_path()\n    }\n\n    pub fn safe_comm_r(&self) -> Result<<Tree::Hasher as Hasher>::Domain> {\n        as_safe_commitment(&self.comm_r, \"comm_r\")\n    }\n\n    pub fn safe_comm_c(&self) -> Result<<Tree::Hasher as Hasher>::Domain> {\n        Ok(self.aux.comm_c)\n    }\n\n    pub fn safe_comm_r_last(&self) -> Result<<Tree::Hasher as Hasher>::Domain> {\n        Ok(self.aux.comm_r_last)\n    }\n\n    /// Generate the merkle tree of this particular replica.\n    pub fn merkle_tree(\n        &self,\n        sector_size: SectorSize,\n    ) -> Result<\n        MerkleTreeWrapper<\n            Tree::Hasher,\n            Tree::Store,\n            Tree::Arity,\n            Tree::SubTreeArity,\n            Tree::TopTreeArity,\n        >,\n    > {\n        let base_tree_size = get_base_tree_size::<Tree>(sector_size)?;\n        let base_tree_leafs = get_base_tree_leafs::<Tree>(base_tree_size)?;\n        trace!(\n            \"post: base tree size {}, base tree leafs {}, rows_to_discard {}, arities [{}, {}, {}]\",\n            base_tree_size,\n            base_tree_leafs,\n            default_rows_to_discard(base_tree_leafs, Tree::Arity::to_usize()),\n            Tree::Arity::to_usize(),\n            Tree::SubTreeArity::to_usize(),\n            Tree::TopTreeArity::to_usize(),\n        );\n\n        let mut config = StoreConfig::new(\n            self.cache_dir_path(),\n            CacheKey::CommRLastTree.to_string(),\n            default_rows_to_discard(base_tree_leafs, Tree::Arity::to_usize()),\n        );\n        config.size = Some(base_tree_size);\n\n        let tree_count = get_base_tree_count::<Tree>();\n        let (configs, replica_config) = split_config_and_replica(\n            config,\n            self.replica_path().to_path_buf(),\n            base_tree_leafs,\n            tree_count,\n        )?;\n\n        create_tree::<Tree>(base_tree_size, &configs, Some(&replica_config))\n    }\n}\n\n/// The minimal information required about a replica, in order to be able to verify\n/// a PoSt over it.\n#[derive(Clone, Debug, PartialEq, Eq, Hash)]\npub struct PublicReplicaInfo {\n    /// The replica commitment.\n    comm_r: Commitment,\n}\n\nimpl std::cmp::Ord for PublicReplicaInfo {\n    fn cmp(&self, other: &Self) -> std::cmp::Ordering {\n        self.comm_r.as_ref().cmp(other.comm_r.as_ref())\n    }\n}\n\nimpl std::cmp::PartialOrd for PublicReplicaInfo {\n    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl PublicReplicaInfo {\n    pub fn new(comm_r: Commitment) -> Result<Self> {\n        ensure!(comm_r != [0; 32], \"Invalid all zero commitment (comm_r)\");\n        Ok(PublicReplicaInfo { comm_r })\n    }\n\n    pub fn safe_comm_r<T: Domain>(&self) -> Result<T> {\n        as_safe_commitment(&self.comm_r, \"comm_r\")\n    }\n}\n\n// Ensure that any associated cached data persisted is discarded.\npub fn clear_cache<Tree: MerkleTreeTrait>(cache_dir: &Path) -> Result<()> {\n    let t_aux = {\n        let f_aux_path = cache_dir.to_path_buf().join(CacheKey::TAux.to_string());\n        let aux_bytes = std::fs::read(&f_aux_path)\n            .with_context(|| format!(\"could not read from path={:?}\", f_aux_path))?;\n\n        deserialize(&aux_bytes)\n    }?;\n\n    TemporaryAux::<Tree, DefaultPieceHasher>::clear_temp(t_aux)\n}\n\n// Ensure that any associated cached data persisted is discarded.\npub fn clear_caches<Tree: MerkleTreeTrait>(\n    replicas: &BTreeMap<SectorId, PrivateReplicaInfo<Tree>>,\n) -> Result<()> {\n    for replica in replicas.values() {\n        clear_cache::<Tree>(&replica.cache_dir.as_path())?;\n    }\n\n    Ok(())\n}\n\npub type SnarkProof = Vec<u8>;\n\n/// Generates a Winning proof-of-spacetime.\npub fn generate_winning_post<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    randomness: &ChallengeSeed,\n    replicas: &[(SectorId, PrivateReplicaInfo<Tree>)],\n    prover_id: ProverId,\n) -> Result<SnarkProof> {\n    info!(\"generate_winning_post:start\");\n    ensure!(\n        post_config.typ == PoStType::Winning,\n        \"invalid post config type\"\n    );\n\n    ensure!(\n        replicas.len() == post_config.sector_count,\n        \"invalid amount of replicas\"\n    );\n\n    let randomness_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(randomness, \"randomness\")?;\n    let prover_id_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(&prover_id, \"prover_id\")?;\n\n    let vanilla_params = winning_post_setup_params(&post_config)?;\n    let param_sector_count = vanilla_params.sector_count;\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params,\n        partitions: None,\n        priority: post_config.priority,\n    };\n    let pub_params: compound_proof::PublicParams<fallback::FallbackPoSt<Tree>> =\n        fallback::FallbackPoStCompound::setup(&setup_params)?;\n    let groth_params = get_post_params::<Tree>(&post_config)?;\n\n    let trees = replicas\n        .iter()\n        .map(|(_, replica)| replica.merkle_tree(post_config.sector_size))\n        .collect::<Result<Vec<_>>>()?;\n\n    let mut pub_sectors = Vec::with_capacity(param_sector_count);\n    let mut priv_sectors = Vec::with_capacity(param_sector_count);\n\n    for _ in 0..param_sector_count {\n        for ((id, replica), tree) in replicas.iter().zip(trees.iter()) {\n            let comm_r = replica.safe_comm_r()?;\n            let comm_c = replica.safe_comm_c()?;\n            let comm_r_last = replica.safe_comm_r_last()?;\n\n            pub_sectors.push(fallback::PublicSector::<<Tree::Hasher as Hasher>::Domain> {\n                id: *id,\n                comm_r,\n            });\n            priv_sectors.push(fallback::PrivateSector {\n                tree,\n                comm_c,\n                comm_r_last,\n            });\n        }\n    }\n\n    let pub_inputs = fallback::PublicInputs::<<Tree::Hasher as Hasher>::Domain> {\n        randomness: randomness_safe,\n        prover_id: prover_id_safe,\n        sectors: &pub_sectors,\n        k: None,\n    };\n\n    let priv_inputs = fallback::PrivateInputs::<Tree> {\n        sectors: &priv_sectors,\n    };\n\n    let proof = fallback::FallbackPoStCompound::<Tree>::prove(\n        &pub_params,\n        &pub_inputs,\n        &priv_inputs,\n        &groth_params,\n    )?;\n    let proof = proof.to_vec()?;\n\n    info!(\"generate_winning_post:finish\");\n\n    Ok(proof)\n}\n\n/// Given some randomness and a the length of available sectors, generates the challenged sector.\n///\n/// The returned values are indicies in the range of `0..sector_set_size`, requiring the caller\n/// to match the index to the correct sector.\npub fn generate_winning_post_sector_challenge<Tree: MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    randomness: &ChallengeSeed,\n    sector_set_size: u64,\n    prover_id: Commitment,\n) -> Result<Vec<u64>> {\n    ensure!(sector_set_size != 0, \"empty sector set is invalid\");\n    ensure!(\n        post_config.typ == PoStType::Winning,\n        \"invalid post config type\"\n    );\n\n    let prover_id_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(&prover_id, \"prover_id\")?;\n\n    let randomness_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(randomness, \"randomness\")?;\n    fallback::generate_sector_challenges(\n        randomness_safe,\n        post_config.sector_count,\n        sector_set_size,\n        prover_id_safe,\n    )\n}\n\n/// Verifies a winning proof-of-spacetime.\n///\n/// The provided `replicas` must be the same ones as passed to `generate_winning_post`, and be based on\n/// the indices generated by `generate_winning_post_sector_challenge`. It is the responsibility of the\n/// caller to ensure this.\npub fn verify_winning_post<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    randomness: &ChallengeSeed,\n    replicas: &[(SectorId, PublicReplicaInfo)],\n    prover_id: ProverId,\n    proof: &[u8],\n) -> Result<bool> {\n    info!(\"verify_winning_post:start\");\n\n    ensure!(\n        post_config.typ == PoStType::Winning,\n        \"invalid post config type\"\n    );\n    ensure!(\n        post_config.sector_count == replicas.len(),\n        \"invalid amount of replicas provided\"\n    );\n\n    let randomness_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(randomness, \"randomness\")?;\n    let prover_id_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(&prover_id, \"prover_id\")?;\n\n    let vanilla_params = winning_post_setup_params(&post_config)?;\n    let param_sector_count = vanilla_params.sector_count;\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params,\n        partitions: None,\n        priority: false,\n    };\n    let pub_params: compound_proof::PublicParams<fallback::FallbackPoSt<Tree>> =\n        fallback::FallbackPoStCompound::setup(&setup_params)?;\n\n    let verifying_key = get_post_verifying_key::<Tree>(&post_config)?;\n\n    let proof = MultiProof::new_from_reader(None, &proof[..], &verifying_key)?;\n    if proof.len() != 1 {\n        return Ok(false);\n    }\n\n    let mut pub_sectors = Vec::with_capacity(param_sector_count);\n    for _ in 0..param_sector_count {\n        for (id, replica) in replicas.iter() {\n            let comm_r = replica.safe_comm_r()?;\n            pub_sectors.push(fallback::PublicSector { id: *id, comm_r });\n        }\n    }\n\n    let pub_inputs = fallback::PublicInputs {\n        randomness: randomness_safe,\n        prover_id: prover_id_safe,\n        sectors: &pub_sectors,\n        k: None,\n    };\n\n    let is_valid = fallback::FallbackPoStCompound::verify(\n        &pub_params,\n        &pub_inputs,\n        &proof,\n        &fallback::ChallengeRequirements {\n            minimum_challenge_count: post_config.challenge_count * post_config.sector_count,\n        },\n    )?;\n\n    if !is_valid {\n        return Ok(false);\n    }\n\n    info!(\"verify_winning_post:finish\");\n\n    Ok(true)\n}\n\n/// Generates a Window proof-of-spacetime.\npub fn generate_window_post<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    randomness: &ChallengeSeed,\n    replicas: &BTreeMap<SectorId, PrivateReplicaInfo<Tree>>,\n    prover_id: ProverId,\n) -> Result<SnarkProof> {\n    info!(\"generate_window_post:start\");\n    ensure!(\n        post_config.typ == PoStType::Window,\n        \"invalid post config type\"\n    );\n\n    let randomness_safe = as_safe_commitment(randomness, \"randomness\")?;\n    let prover_id_safe = as_safe_commitment(&prover_id, \"prover_id\")?;\n\n    let vanilla_params = window_post_setup_params(&post_config);\n    let partitions = get_partitions_for_window_post(replicas.len(), &post_config);\n\n    let sector_count = vanilla_params.sector_count;\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params,\n        partitions,\n        priority: post_config.priority,\n    };\n\n    let pub_params: compound_proof::PublicParams<fallback::FallbackPoSt<Tree>> =\n        fallback::FallbackPoStCompound::setup(&setup_params)?;\n    let groth_params = get_post_params::<Tree>(&post_config)?;\n\n    let trees: Vec<_> = replicas\n        .iter()\n        .map(|(_id, replica)| replica.merkle_tree(post_config.sector_size))\n        .collect::<Result<_>>()?;\n\n    let mut pub_sectors = Vec::with_capacity(sector_count);\n    let mut priv_sectors = Vec::with_capacity(sector_count);\n\n    for ((sector_id, replica), tree) in replicas.iter().zip(trees.iter()) {\n        let comm_r = replica.safe_comm_r()?;\n        let comm_c = replica.safe_comm_c()?;\n        let comm_r_last = replica.safe_comm_r_last()?;\n\n        pub_sectors.push(fallback::PublicSector {\n            id: *sector_id,\n            comm_r,\n        });\n        priv_sectors.push(fallback::PrivateSector {\n            tree,\n            comm_c,\n            comm_r_last,\n        });\n    }\n\n    let pub_inputs = fallback::PublicInputs {\n        randomness: randomness_safe,\n        prover_id: prover_id_safe,\n        sectors: &pub_sectors,\n        k: None,\n    };\n\n    let priv_inputs = fallback::PrivateInputs::<Tree> {\n        sectors: &priv_sectors,\n    };\n\n    let proof = fallback::FallbackPoStCompound::prove(\n        &pub_params,\n        &pub_inputs,\n        &priv_inputs,\n        &groth_params,\n    )?;\n\n    info!(\"generate_window_post:finish\");\n\n    Ok(proof.to_vec()?)\n}\n\n/// Verifies a window proof-of-spacetime.\npub fn verify_window_post<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    randomness: &ChallengeSeed,\n    replicas: &BTreeMap<SectorId, PublicReplicaInfo>,\n    prover_id: ProverId,\n    proof: &[u8],\n) -> Result<bool> {\n    info!(\"verify_window_post:start\");\n\n    ensure!(\n        post_config.typ == PoStType::Window,\n        \"invalid post config type\"\n    );\n\n    let randomness_safe = as_safe_commitment(randomness, \"randomness\")?;\n    let prover_id_safe = as_safe_commitment(&prover_id, \"prover_id\")?;\n\n    let vanilla_params = window_post_setup_params(&post_config);\n    let partitions = get_partitions_for_window_post(replicas.len(), &post_config);\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params,\n        partitions,\n        priority: false,\n    };\n    let pub_params: compound_proof::PublicParams<fallback::FallbackPoSt<Tree>> =\n        fallback::FallbackPoStCompound::setup(&setup_params)?;\n\n    let verifying_key = get_post_verifying_key::<Tree>(&post_config)?;\n\n    let proof = MultiProof::new_from_reader(partitions, &proof[..], &verifying_key)?;\n\n    let pub_sectors: Vec<_> = replicas\n        .iter()\n        .map(|(sector_id, replica)| {\n            let comm_r = replica.safe_comm_r()?;\n            Ok(fallback::PublicSector {\n                id: *sector_id,\n                comm_r,\n            })\n        })\n        .collect::<Result<_>>()?;\n\n    let pub_inputs = fallback::PublicInputs {\n        randomness: randomness_safe,\n        prover_id: prover_id_safe,\n        sectors: &pub_sectors,\n        k: None,\n    };\n\n    let is_valid = fallback::FallbackPoStCompound::verify(\n        &pub_params,\n        &pub_inputs,\n        &proof,\n        &fallback::ChallengeRequirements {\n            minimum_challenge_count: post_config.challenge_count * post_config.sector_count,\n        },\n    )?;\n\n    if !is_valid {\n        return Ok(false);\n    }\n\n    info!(\"verify_window_post:finish\");\n\n    Ok(true)\n}\n\nfn get_partitions_for_window_post(\n    total_sector_count: usize,\n    post_config: &PoStConfig,\n) -> Option<usize> {\n    let partitions = (total_sector_count as f32 / post_config.sector_count as f32).ceil() as usize;\n\n    if partitions > 1 {\n        Some(partitions)\n    } else {\n        None\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/post_util.rs",
    "content": "use std::collections::BTreeMap;\nuse std::fs;\nuse std::path::Path;\n\nuse anyhow::{anyhow, ensure, Context, Result};\nuse bincode::deserialize;\nuse filecoin_hashers::Hasher;\nuse log::{info, trace};\nuse storage_proofs_core::{\n    cache_key::CacheKey, merkle::MerkleTreeTrait, proof::ProofScheme, sector::SectorId,\n};\nuse storage_proofs_post::fallback::{self, generate_leaf_challenge, FallbackPoSt, SectorProof};\n\nuse crate::{\n    api::as_safe_commitment,\n    constants::DefaultPieceHasher,\n    types::{\n        ChallengeSeed, FallbackPoStSectorProof, PoStConfig, PrivateReplicaInfo, ProverId,\n        TemporaryAux, VanillaProof,\n    },\n    PoStType,\n};\n\n// Ensure that any associated cached data persisted is discarded.\npub fn clear_cache<Tree: MerkleTreeTrait>(cache_dir: &Path) -> Result<()> {\n    info!(\"clear_cache:start\");\n\n    let t_aux = {\n        let f_aux_path = cache_dir.to_path_buf().join(CacheKey::TAux.to_string());\n        let aux_bytes = fs::read(&f_aux_path)\n            .with_context(|| format!(\"could not read from path={:?}\", f_aux_path))?;\n\n        deserialize(&aux_bytes)\n    }?;\n\n    let result = TemporaryAux::<Tree, DefaultPieceHasher>::clear_temp(t_aux);\n\n    info!(\"clear_cache:finish\");\n\n    result\n}\n\n// Ensure that any associated cached data persisted is discarded.\npub fn clear_caches<Tree: MerkleTreeTrait>(\n    replicas: &BTreeMap<SectorId, PrivateReplicaInfo<Tree>>,\n) -> Result<()> {\n    info!(\"clear_caches:start\");\n\n    for replica in replicas.values() {\n        clear_cache::<Tree>(&replica.cache_dir.as_path())?;\n    }\n\n    info!(\"clear_caches:finish\");\n\n    Ok(())\n}\n\n/// Generates the challenges per SectorId required for either a Window\n/// proof-of-spacetime or a Winning proof-of-spacetime.\npub fn generate_fallback_sector_challenges<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    randomness: &ChallengeSeed,\n    pub_sectors: &[SectorId],\n    _prover_id: ProverId,\n) -> Result<BTreeMap<SectorId, Vec<u64>>> {\n    info!(\"generate_sector_challenges:start\");\n    ensure!(\n        post_config.typ == PoStType::Window || post_config.typ == PoStType::Winning,\n        \"invalid post config type\"\n    );\n\n    let randomness_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(randomness, \"randomness\")?;\n\n    let public_params = fallback::PublicParams {\n        sector_size: u64::from(post_config.sector_size),\n        challenge_count: post_config.challenge_count,\n        sector_count: post_config.sector_count,\n        api_version: post_config.api_version,\n    };\n\n    let mut sector_challenges: BTreeMap<SectorId, Vec<u64>> = BTreeMap::new();\n\n    let num_sectors_per_chunk = post_config.sector_count;\n    let partitions = match post_config.typ {\n        PoStType::Window => {\n            get_partitions_for_window_post(pub_sectors.len(), &post_config).unwrap_or(1)\n        }\n        PoStType::Winning => 1,\n    };\n\n    for partition_index in 0..partitions {\n        let sectors = pub_sectors\n            .chunks(num_sectors_per_chunk)\n            .nth(partition_index)\n            .ok_or_else(|| anyhow!(\"invalid number of sectors/partition index\"))?;\n\n        for (i, sector) in sectors.iter().enumerate() {\n            let mut challenges = Vec::new();\n\n            for n in 0..post_config.challenge_count {\n                let challenge_index = ((partition_index * post_config.sector_count + i)\n                    * post_config.challenge_count\n                    + n) as u64;\n                let challenged_leaf = generate_leaf_challenge(\n                    &public_params,\n                    randomness_safe,\n                    u64::from(*sector),\n                    challenge_index,\n                );\n                challenges.push(challenged_leaf);\n            }\n\n            sector_challenges.insert(*sector, challenges);\n        }\n    }\n\n    info!(\"generate_sector_challenges:finish\");\n\n    Ok(sector_challenges)\n}\n\n/// Generates a single vanilla proof required for either Window proof-of-spacetime\n/// or Winning proof-of-spacetime.\npub fn generate_single_vanilla_proof<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    sector_id: SectorId,\n    replica: &PrivateReplicaInfo<Tree>,\n    challenges: &[u64],\n) -> Result<FallbackPoStSectorProof<Tree>> {\n    info!(\"generate_single_vanilla_proof:start: {:?}\", sector_id);\n\n    let tree = &replica\n        .merkle_tree(post_config.sector_size)\n        .with_context(|| {\n            format!(\n                \"generate_single_vanilla_proof: merkle_tree failed: {:?}\",\n                sector_id\n            )\n        })?;\n    let comm_r = replica.safe_comm_r().with_context(|| {\n        format!(\n            \"generate_single_vanilla_poof: safe_comm_r failed: {:?}\",\n            sector_id\n        )\n    })?;\n    let comm_c = replica.safe_comm_c();\n    let comm_r_last = replica.safe_comm_r_last();\n\n    let priv_sectors = vec![fallback::PrivateSector {\n        tree,\n        comm_c,\n        comm_r_last,\n    }];\n\n    let priv_inputs = fallback::PrivateInputs::<Tree> {\n        sectors: &priv_sectors,\n    };\n\n    let vanilla_proof =\n        fallback::vanilla_proof(sector_id, &priv_inputs, challenges).with_context(|| {\n            format!(\n                \"generate_single_vanilla_proof: vanilla_proof failed: {:?}\",\n                sector_id\n            )\n        })?;\n\n    info!(\"generate_single_vanilla_proof:finish: {:?}\", sector_id);\n\n    Ok(FallbackPoStSectorProof {\n        sector_id,\n        comm_r,\n        vanilla_proof,\n    })\n}\n\n// Partition a flat vector of vanilla sector proofs.  The post_config\n// (PoSt) type is required in order to determine the proper shape of\n// the returned partitioned proofs.\npub fn partition_vanilla_proofs<Tree: MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    pub_params: &fallback::PublicParams,\n    pub_inputs: &fallback::PublicInputs<<Tree::Hasher as Hasher>::Domain>,\n    partition_count: usize,\n    vanilla_proofs: &[FallbackPoStSectorProof<Tree>],\n) -> Result<Vec<VanillaProof<Tree>>> {\n    info!(\"partition_vanilla_proofs:start\");\n    ensure!(\n        post_config.typ == PoStType::Window || post_config.typ == PoStType::Winning,\n        \"invalid post config type\"\n    );\n\n    let num_sectors_per_chunk = pub_params.sector_count;\n    let num_sectors = pub_inputs.sectors.len();\n\n    ensure!(\n        num_sectors <= partition_count * num_sectors_per_chunk,\n        \"cannot prove the provided number of sectors: {} > {} * {}\",\n        num_sectors,\n        partition_count,\n        num_sectors_per_chunk,\n    );\n\n    let mut partition_proofs = Vec::new();\n\n    // Note that the partition proofs returned are shaped differently\n    // based on which type of PoSt is being considered.\n    match post_config.typ {\n        PoStType::Window => {\n            for (j, sectors_chunk) in pub_inputs.sectors.chunks(num_sectors_per_chunk).enumerate() {\n                trace!(\"processing partition {}\", j);\n\n                let mut sector_proofs = Vec::with_capacity(num_sectors_per_chunk);\n\n                for pub_sector in sectors_chunk.iter() {\n                    let cur_proof = vanilla_proofs\n                        .iter()\n                        .find(|&proof| proof.sector_id == pub_sector.id)\n                        .expect(\"failed to locate sector proof\");\n\n                    // Note: Window post requires all inclusion proofs (based on the challenge\n                    // count per sector) per sector proof.\n                    sector_proofs.extend(cur_proof.vanilla_proof.sectors.clone());\n                }\n\n                // If there were less than the required number of sectors provided, we duplicate the last one\n                // to pad the proof out, such that it works in the circuit part.\n                while sector_proofs.len() < num_sectors_per_chunk {\n                    sector_proofs.push(sector_proofs[sector_proofs.len() - 1].clone());\n                }\n\n                partition_proofs.push(fallback::Proof::<<Tree as MerkleTreeTrait>::Proof> {\n                    sectors: sector_proofs,\n                });\n            }\n        }\n        PoStType::Winning => {\n            for (j, sectors_chunk) in vanilla_proofs.chunks(num_sectors_per_chunk).enumerate() {\n                trace!(\"processing partition {}\", j);\n\n                // Sanity check incoming structure\n                ensure!(\n                    sectors_chunk.len() == 1,\n                    \"Invalid sector chunk for Winning PoSt\"\n                );\n                ensure!(\n                    sectors_chunk[0].vanilla_proof.sectors.len() == 1,\n                    \"Invalid sector count for Winning PoSt chunk\"\n                );\n\n                // Winning post sector_count is winning post challenges per sector\n                ensure!(\n                    post_config.sector_count == sectors_chunk[j].vanilla_proof.sectors.len(),\n                    \"invalid number of sector proofs for Winning PoSt\"\n                );\n\n                let mut sector_proofs = Vec::with_capacity(post_config.challenge_count);\n                let cur_sector_proof = &sectors_chunk[0].vanilla_proof.sectors[0];\n\n                // Unroll inclusions proofs from the single provided sector_proof (per partition)\n                // into individual sector proofs, required for winning post.\n                for cur_inclusion_proof in cur_sector_proof.inclusion_proofs() {\n                    sector_proofs.push(SectorProof {\n                        inclusion_proofs: vec![cur_inclusion_proof.clone()],\n                        comm_c: cur_sector_proof.comm_c,\n                        comm_r_last: cur_sector_proof.comm_r_last,\n                    });\n                }\n\n                // If there were less than the required number of sectors provided, we duplicate the last one\n                // to pad the proof out, such that it works in the circuit part.\n                while sector_proofs.len() < num_sectors_per_chunk {\n                    sector_proofs.push(sector_proofs[sector_proofs.len() - 1].clone());\n                }\n\n                // Winning post Challenge count is the total winning post challenges\n                ensure!(\n                    sector_proofs.len() == post_config.challenge_count,\n                    \"invalid number of partition proofs based on Winning PoSt challenges\"\n                );\n\n                partition_proofs.push(fallback::Proof::<<Tree as MerkleTreeTrait>::Proof> {\n                    sectors: sector_proofs,\n                });\n            }\n        }\n    }\n\n    info!(\"partition_vanilla_proofs:finish\");\n\n    ensure!(\n        FallbackPoSt::<Tree>::verify_all_partitions(pub_params, pub_inputs, &partition_proofs)?,\n        \"partitioned vanilla proofs failed to verify\"\n    );\n\n    Ok(partition_proofs)\n}\n\npub(crate) fn get_partitions_for_window_post(\n    total_sector_count: usize,\n    post_config: &PoStConfig,\n) -> Option<usize> {\n    let partitions = (total_sector_count as f32 / post_config.sector_count as f32).ceil() as usize;\n\n    if partitions > 1 {\n        Some(partitions)\n    } else {\n        None\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/responses.rs",
    "content": "use crate::api::sector_builder::errors::SectorBuilderErr;\nuse crate::api::sector_builder::SectorBuilder;\nuse crate::api::{API_POREP_PROOF_BYTES, API_POST_PROOF_BYTES};\nuse failure::Error;\nuse ffi_toolkit::free_c_str;\nuse libc;\nuse sector_base::api::errors::SectorManagerErr;\nuse std::ffi::CString;\nuse std::mem;\nuse std::ptr;\n\n#[repr(C)]\n#[derive(PartialEq, Debug)]\npub enum FCPResponseStatus {\n    // Don't use FCPSuccess, since that complicates description of 'successful' verification.\n    FCPNoError = 0,\n    FCPUnclassifiedError = 1,\n    FCPCallerError = 2,\n    FCPReceiverError = 3,\n}\n\n#[repr(C)]\n#[derive(PartialEq, Debug)]\npub enum FFISealStatus {\n    Sealed = 0,\n    Pending = 1,\n    Failed = 2,\n    Sealing = 3,\n}\n\n///////////////////////////////////////////////////////////////////////////////\n/// VerifySealResponse\n//////////////////////\n\n#[repr(C)]\npub struct VerifySealResponse {\n    pub status_code: FCPResponseStatus,\n    pub error_msg: *const libc::c_char,\n    pub is_valid: bool,\n}\n\nimpl Default for VerifySealResponse {\n    fn default() -> VerifySealResponse {\n        VerifySealResponse {\n            status_code: FCPResponseStatus::FCPNoError,\n            error_msg: ptr::null(),\n            is_valid: false,\n        }\n    }\n}\n\nimpl Drop for VerifySealResponse {\n    fn drop(&mut self) {\n        unsafe {\n            free_c_str(self.error_msg as *mut libc::c_char);\n        };\n    }\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn destroy_verify_seal_response(ptr: *mut VerifySealResponse) {\n    let _ = Box::from_raw(ptr);\n}\n\n///////////////////////////////////////////////////////////////////////////////\n/// GeneratePoSTResult\n//////////////////////\n\n#[repr(C)]\npub struct GeneratePoSTResponse {\n    pub status_code: FCPResponseStatus,\n    pub error_msg: *const libc::c_char,\n    pub faults_len: libc::size_t,\n    pub faults_ptr: *const u64,\n    pub proof: [u8; API_POST_PROOF_BYTES],\n}\n\nimpl Default for GeneratePoSTResponse {\n    fn default() -> GeneratePoSTResponse {\n        GeneratePoSTResponse {\n            status_code: FCPResponseStatus::FCPNoError,\n            error_msg: ptr::null(),\n            faults_len: 0,\n            faults_ptr: ptr::null(),\n            proof: [0; API_POST_PROOF_BYTES],\n        }\n    }\n}\n\nimpl Drop for GeneratePoSTResponse {\n    fn drop(&mut self) {\n        unsafe {\n            drop(Vec::from_raw_parts(\n                self.faults_ptr as *mut u8,\n                self.faults_len,\n                self.faults_len,\n            ));\n\n            free_c_str(self.error_msg as *mut libc::c_char);\n        };\n    }\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn destroy_generate_post_response(ptr: *mut GeneratePoSTResponse) {\n    let _ = Box::from_raw(ptr);\n}\n\n///////////////////////////////////////////////////////////////////////////////\n/// VerifyPoSTResult\n////////////////////\n\n#[repr(C)]\npub struct VerifyPoSTResponse {\n    pub status_code: FCPResponseStatus,\n    pub error_msg: *const libc::c_char,\n    pub is_valid: bool,\n}\n\nimpl Default for VerifyPoSTResponse {\n    fn default() -> VerifyPoSTResponse {\n        VerifyPoSTResponse {\n            status_code: FCPResponseStatus::FCPNoError,\n            error_msg: ptr::null(),\n            is_valid: false,\n        }\n    }\n}\n\nimpl Drop for VerifyPoSTResponse {\n    fn drop(&mut self) {\n        unsafe {\n            free_c_str(self.error_msg as *mut libc::c_char);\n        };\n    }\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn destroy_verify_post_response(ptr: *mut VerifyPoSTResponse) {\n    let _ = Box::from_raw(ptr);\n}\n\n// err_code_and_msg accepts an Error struct and produces a tuple of response\n// status code and a pointer to a C string, both of which can be used to set\n// fields in a response struct to be returned from an FFI call.\npub fn err_code_and_msg(err: &Error) -> (FCPResponseStatus, *const libc::c_char) {\n    use crate::api::responses::FCPResponseStatus::*;\n\n    let msg = CString::new(format!(\"{}\", err)).unwrap();\n    let ptr = msg.as_ptr();\n    mem::forget(msg);\n\n    match err.downcast_ref() {\n        Some(SectorBuilderErr::OverflowError { .. }) => return (FCPCallerError, ptr),\n        Some(SectorBuilderErr::IncompleteWriteError { .. }) => return (FCPReceiverError, ptr),\n        Some(SectorBuilderErr::Unrecoverable(_, _)) => return (FCPReceiverError, ptr),\n        Some(SectorBuilderErr::PieceNotFound(_)) => return (FCPCallerError, ptr),\n        None => (),\n    }\n\n    match err.downcast_ref() {\n        Some(SectorManagerErr::UnclassifiedError(_)) => return (FCPUnclassifiedError, ptr),\n        Some(SectorManagerErr::CallerError(_)) => return (FCPCallerError, ptr),\n        Some(SectorManagerErr::ReceiverError(_)) => return (FCPReceiverError, ptr),\n        None => (),\n    }\n\n    (FCPUnclassifiedError, ptr)\n}\n\n///////////////////////////////////////////////////////////////////////////////\n/// InitSectorBuilderResponse\n/////////////////////////////\n\n#[repr(C)]\npub struct InitSectorBuilderResponse {\n    pub status_code: FCPResponseStatus,\n    pub error_msg: *const libc::c_char,\n    pub sector_builder: *mut SectorBuilder,\n}\n\nimpl Default for InitSectorBuilderResponse {\n    fn default() -> InitSectorBuilderResponse {\n        InitSectorBuilderResponse {\n            status_code: FCPResponseStatus::FCPNoError,\n            error_msg: ptr::null(),\n            sector_builder: ptr::null_mut(),\n        }\n    }\n}\n\nimpl Drop for InitSectorBuilderResponse {\n    fn drop(&mut self) {\n        unsafe {\n            free_c_str(self.error_msg as *mut libc::c_char);\n        };\n    }\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn destroy_init_sector_builder_response(ptr: *mut InitSectorBuilderResponse) {\n    let _ = Box::from_raw(ptr);\n}\n\n///////////////////////////////////////////////////////////////////////////////\n/// AddPieceResponse\n////////////////////\n\n#[repr(C)]\npub struct AddPieceResponse {\n    pub status_code: FCPResponseStatus,\n    pub error_msg: *const libc::c_char,\n    pub sector_id: u64,\n}\n\nimpl Default for AddPieceResponse {\n    fn default() -> AddPieceResponse {\n        AddPieceResponse {\n            status_code: FCPResponseStatus::FCPNoError,\n            error_msg: ptr::null(),\n            sector_id: 0,\n        }\n    }\n}\n\nimpl Drop for AddPieceResponse {\n    fn drop(&mut self) {\n        unsafe {\n            free_c_str(self.error_msg as *mut libc::c_char);\n        };\n    }\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn destroy_add_piece_response(ptr: *mut AddPieceResponse) {\n    let _ = Box::from_raw(ptr);\n}\n\n////////////////////////////////////////////////////////////////////////////////\n/// ReadPieceFromSealedSectorResponse\n/////////////////////////////////////\n\n#[repr(C)]\npub struct ReadPieceFromSealedSectorResponse {\n    pub status_code: FCPResponseStatus,\n    pub error_msg: *const libc::c_char,\n    pub data_len: libc::size_t,\n    pub data_ptr: *const u8,\n}\n\nimpl Default for ReadPieceFromSealedSectorResponse {\n    fn default() -> ReadPieceFromSealedSectorResponse {\n        ReadPieceFromSealedSectorResponse {\n            status_code: FCPResponseStatus::FCPNoError,\n            error_msg: ptr::null(),\n            data_len: 0,\n            data_ptr: ptr::null(),\n        }\n    }\n}\n\nimpl Drop for ReadPieceFromSealedSectorResponse {\n    fn drop(&mut self) {\n        unsafe {\n            free_c_str(self.error_msg as *mut libc::c_char);\n\n            drop(Vec::from_raw_parts(\n                self.data_ptr as *mut u8,\n                self.data_len,\n                self.data_len,\n            ));\n        };\n    }\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn destroy_read_piece_from_sealed_sector_response(\n    ptr: *mut ReadPieceFromSealedSectorResponse,\n) {\n    let _ = Box::from_raw(ptr);\n}\n\n///////////////////////////////////////////////////////////////////////////////\n/// SealAllStagedSectorsResponse\n////////////////////////////////\n\n#[repr(C)]\npub struct SealAllStagedSectorsResponse {\n    pub status_code: FCPResponseStatus,\n    pub error_msg: *const libc::c_char,\n}\n\nimpl Default for SealAllStagedSectorsResponse {\n    fn default() -> SealAllStagedSectorsResponse {\n        SealAllStagedSectorsResponse {\n            status_code: FCPResponseStatus::FCPNoError,\n            error_msg: ptr::null(),\n        }\n    }\n}\n\nimpl Drop for SealAllStagedSectorsResponse {\n    fn drop(&mut self) {\n        unsafe {\n            free_c_str(self.error_msg as *mut libc::c_char);\n        };\n    }\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn destroy_seal_all_staged_sectors_response(\n    ptr: *mut SealAllStagedSectorsResponse,\n) {\n    let _ = Box::from_raw(ptr);\n}\n\n///////////////////////////////////////////////////////////////////////////////\n/// GetMaxStagedBytesPerSector\n//////////////////////////////\n\n#[repr(C)]\npub struct GetMaxStagedBytesPerSector {\n    pub status_code: FCPResponseStatus,\n    pub error_msg: *const libc::c_char,\n    pub max_staged_bytes_per_sector: u64,\n}\n\nimpl Default for GetMaxStagedBytesPerSector {\n    fn default() -> GetMaxStagedBytesPerSector {\n        GetMaxStagedBytesPerSector {\n            status_code: FCPResponseStatus::FCPNoError,\n            error_msg: ptr::null(),\n            max_staged_bytes_per_sector: 0,\n        }\n    }\n}\n\nimpl Drop for GetMaxStagedBytesPerSector {\n    fn drop(&mut self) {\n        unsafe {\n            free_c_str(self.error_msg as *mut libc::c_char);\n        };\n    }\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn destroy_get_max_user_bytes_per_staged_sector_response(\n    ptr: *mut GetMaxStagedBytesPerSector,\n) {\n    let _ = Box::from_raw(ptr);\n}\n\n///////////////////////////////////////////////////////////////////////////////\n/// GetSealStatusResponse\n/////////////////////////\n\n#[repr(C)]\npub struct GetSealStatusResponse {\n    pub status_code: FCPResponseStatus,\n    pub error_msg: *const libc::c_char,\n\n    pub seal_status_code: FFISealStatus,\n\n    // sealing failed - here's the error\n    pub seal_error_msg: *const libc::c_char,\n\n    // sealed sector metadata\n    pub comm_d: [u8; 32],\n    pub comm_r: [u8; 32],\n    pub comm_r_star: [u8; 32],\n    pub sector_access: *const libc::c_char,\n    pub sector_id: u64,\n    pub snark_proof: [u8; API_POREP_PROOF_BYTES],\n    pub pieces_len: libc::size_t,\n    pub pieces_ptr: *const FFIPieceMetadata,\n}\n\n#[repr(C)]\npub struct FFIPieceMetadata {\n    pub piece_key: *const libc::c_char,\n    pub num_bytes: u64,\n}\n\nimpl Drop for FFIPieceMetadata {\n    fn drop(&mut self) {\n        unsafe {\n            free_c_str(self.piece_key as *mut libc::c_char);\n        }\n    }\n}\n\nimpl Default for GetSealStatusResponse {\n    fn default() -> GetSealStatusResponse {\n        GetSealStatusResponse {\n            status_code: FCPResponseStatus::FCPNoError,\n            error_msg: ptr::null(),\n\n            seal_status_code: FFISealStatus::Failed,\n\n            seal_error_msg: ptr::null(),\n\n            comm_d: Default::default(),\n            comm_r: Default::default(),\n            comm_r_star: Default::default(),\n            pieces_len: 0,\n            pieces_ptr: ptr::null(),\n            sector_access: ptr::null(),\n            sector_id: 0,\n            snark_proof: [0; 384],\n        }\n    }\n}\n\nimpl Drop for GetSealStatusResponse {\n    fn drop(&mut self) {\n        unsafe {\n            free_c_str(self.error_msg as *mut libc::c_char);\n            free_c_str(self.seal_error_msg as *mut libc::c_char);\n            free_c_str(self.sector_access as *mut libc::c_char);\n            drop(Vec::from_raw_parts(\n                self.pieces_ptr as *mut FFIPieceMetadata,\n                self.pieces_len,\n                self.pieces_len,\n            ));\n        };\n    }\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn destroy_get_seal_status_response(ptr: *mut GetSealStatusResponse) {\n    let _ = Box::from_raw(ptr);\n}\n\n///////////////////////////////////////////////////////////////////////////////\n/// FFIStagedSectorMetadata\n///////////////////////////\n\n#[repr(C)]\npub struct FFIStagedSectorMetadata {\n    pub sector_access: *const libc::c_char,\n    pub sector_id: u64,\n    pub pieces_len: libc::size_t,\n    pub pieces_ptr: *const FFIPieceMetadata,\n\n    // must be one of: Pending, Failed, Sealing\n    pub seal_status_code: FFISealStatus,\n\n    // if sealing failed - here's the error\n    pub seal_error_msg: *const libc::c_char,\n}\n\nimpl Drop for FFIStagedSectorMetadata {\n    fn drop(&mut self) {\n        unsafe {\n            free_c_str(self.sector_access as *mut libc::c_char);\n            free_c_str(self.seal_error_msg as *mut libc::c_char);\n            drop(Vec::from_raw_parts(\n                self.pieces_ptr as *mut FFIPieceMetadata,\n                self.pieces_len,\n                self.pieces_len,\n            ));\n        }\n    }\n}\n\n///////////////////////////////////////////////////////////////////////////////\n/// FFISealedSectorMetadata\n///////////////////////////\n\n#[repr(C)]\npub struct FFISealedSectorMetadata {\n    pub comm_d: [u8; 32],\n    pub comm_r: [u8; 32],\n    pub comm_r_star: [u8; 32],\n    pub sector_access: *const libc::c_char,\n    pub sector_id: u64,\n    pub snark_proof: [u8; API_POREP_PROOF_BYTES],\n    pub pieces_len: libc::size_t,\n    pub pieces_ptr: *const FFIPieceMetadata,\n}\n\nimpl Drop for FFISealedSectorMetadata {\n    fn drop(&mut self) {\n        unsafe {\n            free_c_str(self.sector_access as *mut libc::c_char);\n            drop(Vec::from_raw_parts(\n                self.pieces_ptr as *mut FFIPieceMetadata,\n                self.pieces_len,\n                self.pieces_len,\n            ));\n        }\n    }\n}\n\n///////////////////////////////////////////////////////////////////////////////\n/// GetSealedSectorsResponse\n////////////////////////////\n\n#[repr(C)]\npub struct GetSealedSectorsResponse {\n    pub status_code: FCPResponseStatus,\n    pub error_msg: *const libc::c_char,\n\n    pub sectors_len: libc::size_t,\n    pub sectors_ptr: *const FFISealedSectorMetadata,\n}\n\nimpl Default for GetSealedSectorsResponse {\n    fn default() -> GetSealedSectorsResponse {\n        GetSealedSectorsResponse {\n            status_code: FCPResponseStatus::FCPNoError,\n            error_msg: ptr::null(),\n            sectors_len: 0,\n            sectors_ptr: ptr::null(),\n        }\n    }\n}\n\nimpl Drop for GetSealedSectorsResponse {\n    fn drop(&mut self) {\n        unsafe {\n            free_c_str(self.error_msg as *mut libc::c_char);\n            drop(Vec::from_raw_parts(\n                self.sectors_ptr as *mut FFISealedSectorMetadata,\n                self.sectors_len,\n                self.sectors_len,\n            ));\n        }\n    }\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn destroy_get_sealed_sectors_response(ptr: *mut GetSealedSectorsResponse) {\n    let _ = Box::from_raw(ptr);\n}\n\n///////////////////////////////////////////////////////////////////////////////\n/// GetStagedSectorsResponse\n////////////////////////////\n\n#[repr(C)]\npub struct GetStagedSectorsResponse {\n    pub status_code: FCPResponseStatus,\n    pub error_msg: *const libc::c_char,\n\n    pub sectors_len: libc::size_t,\n    pub sectors_ptr: *const FFIStagedSectorMetadata,\n}\n\nimpl Default for GetStagedSectorsResponse {\n    fn default() -> GetStagedSectorsResponse {\n        GetStagedSectorsResponse {\n            status_code: FCPResponseStatus::FCPNoError,\n            error_msg: ptr::null(),\n            sectors_len: 0,\n            sectors_ptr: ptr::null(),\n        }\n    }\n}\n\nimpl Drop for GetStagedSectorsResponse {\n    fn drop(&mut self) {\n        unsafe {\n            free_c_str(self.error_msg as *mut libc::c_char);\n            drop(Vec::from_raw_parts(\n                self.sectors_ptr as *mut FFIStagedSectorMetadata,\n                self.sectors_len,\n                self.sectors_len,\n            ));\n        }\n    }\n}\n\n#[no_mangle]\npub unsafe extern \"C\" fn destroy_get_staged_sectors_response(ptr: *mut GetStagedSectorsResponse) {\n    let _ = Box::from_raw(ptr);\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/seal.rs",
    "content": "use std::fs::{self, metadata, File, OpenOptions};\nuse std::io::Write;\nuse std::path::{Path, PathBuf};\n\nuse anyhow::{ensure, Context, Result};\nuse bellperson::bls::Fr;\nuse bincode::{deserialize, serialize};\nuse filecoin_hashers::{Domain, Hasher};\nuse log::{info, trace};\nuse memmap::MmapOptions;\nuse merkletree::store::{DiskStore, Store, StoreConfig};\nuse storage_proofs_core::{\n    cache_key::CacheKey,\n    compound_proof::{self, CompoundProof},\n    drgraph::Graph,\n    measurements::{measure_op, Operation},\n    merkle::{create_base_merkle_tree, BinaryMerkleTree, MerkleTreeTrait},\n    multi_proof::MultiProof,\n    proof::ProofScheme,\n    sector::SectorId,\n    util::default_rows_to_discard,\n    Data,\n};\nuse storage_proofs_porep::stacked::{\n    self, generate_replica_id, ChallengeRequirements, StackedCompound, StackedDrg, Tau,\n    TemporaryAux, TemporaryAuxCache,\n};\n\nuse crate::{\n    api::{as_safe_commitment, commitment_from_fr, get_base_tree_leafs, get_base_tree_size},\n    caches::{get_stacked_params, get_stacked_verifying_key},\n    constants::{\n        DefaultBinaryTree, DefaultPieceDomain, DefaultPieceHasher, POREP_MINIMUM_CHALLENGES,\n        SINGLE_PARTITION_PROOF_LEN,\n    },\n    parameters::setup_params,\n    pieces::{self, verify_pieces},\n    types::{\n        Commitment, PaddedBytesAmount, PieceInfo, PoRepConfig, PoRepProofPartitions, ProverId,\n        SealCommitOutput, SealCommitPhase1Output, SealPreCommitOutput, SealPreCommitPhase1Output,\n        SectorSize, Ticket, BINARY_ARITY,\n    },\n};\n\n#[allow(clippy::too_many_arguments)]\npub fn seal_pre_commit_phase1<R, S, T, Tree: 'static + MerkleTreeTrait>(\n    porep_config: PoRepConfig,\n    cache_path: R,\n    in_path: S,\n    out_path: T,\n    prover_id: ProverId,\n    sector_id: SectorId,\n    ticket: Ticket,\n    piece_infos: &[PieceInfo],\n) -> Result<SealPreCommitPhase1Output<Tree>>\nwhere\n    R: AsRef<Path>,\n    S: AsRef<Path>,\n    T: AsRef<Path>,\n{\n    info!(\"seal_pre_commit_phase1:start: {:?}\", sector_id);\n\n    // Sanity check all input path types.\n    ensure!(\n        metadata(in_path.as_ref())?.is_file(),\n        \"in_path must be a file\"\n    );\n    ensure!(\n        metadata(out_path.as_ref())?.is_file(),\n        \"out_path must be a file\"\n    );\n    ensure!(\n        metadata(cache_path.as_ref())?.is_dir(),\n        \"cache_path must be a directory\"\n    );\n\n    let sector_bytes = usize::from(PaddedBytesAmount::from(porep_config));\n    fs::metadata(&in_path)\n        .with_context(|| format!(\"could not read in_path={:?})\", in_path.as_ref().display()))?;\n\n    fs::metadata(&out_path)\n        .with_context(|| format!(\"could not read out_path={:?}\", out_path.as_ref().display()))?;\n\n    // Copy unsealed data to output location, where it will be sealed in place.\n    fs::copy(&in_path, &out_path).with_context(|| {\n        format!(\n            \"could not copy in_path={:?} to out_path={:?}\",\n            in_path.as_ref().display(),\n            out_path.as_ref().display()\n        )\n    })?;\n\n    let f_data = OpenOptions::new()\n        .read(true)\n        .write(true)\n        .open(&out_path)\n        .with_context(|| format!(\"could not open out_path={:?}\", out_path.as_ref().display()))?;\n\n    // Zero-pad the data to the requested size by extending the underlying file if needed.\n    f_data.set_len(sector_bytes as u64)?;\n\n    let data = unsafe {\n        MmapOptions::new()\n            .map_mut(&f_data)\n            .with_context(|| format!(\"could not mmap out_path={:?}\", out_path.as_ref().display()))?\n    };\n\n    let compound_setup_params = compound_proof::SetupParams {\n        vanilla_params: setup_params(\n            PaddedBytesAmount::from(porep_config),\n            usize::from(PoRepProofPartitions::from(porep_config)),\n            porep_config.porep_id,\n            porep_config.api_version,\n        )?,\n        partitions: Some(usize::from(PoRepProofPartitions::from(porep_config))),\n        priority: false,\n    };\n\n    let compound_public_params = <StackedCompound<Tree, DefaultPieceHasher> as CompoundProof<\n        StackedDrg<'_, Tree, DefaultPieceHasher>,\n        _,\n    >>::setup(&compound_setup_params)?;\n\n    info!(\"building merkle tree for the original data\");\n    let (config, comm_d) = measure_op(Operation::CommD, || -> Result<_> {\n        let base_tree_size = get_base_tree_size::<DefaultBinaryTree>(porep_config.sector_size)?;\n        let base_tree_leafs = get_base_tree_leafs::<DefaultBinaryTree>(base_tree_size)?;\n        ensure!(\n            compound_public_params.vanilla_params.graph.size() == base_tree_leafs,\n            \"graph size and leaf size don't match\"\n        );\n\n        trace!(\n            \"seal phase 1: sector_size {}, base tree size {}, base tree leafs {}\",\n            u64::from(porep_config.sector_size),\n            base_tree_size,\n            base_tree_leafs,\n        );\n\n        let mut config = StoreConfig::new(\n            cache_path.as_ref(),\n            CacheKey::CommDTree.to_string(),\n            default_rows_to_discard(base_tree_leafs, BINARY_ARITY),\n        );\n\n        let data_tree = create_base_merkle_tree::<BinaryMerkleTree<DefaultPieceHasher>>(\n            Some(config.clone()),\n            base_tree_leafs,\n            &data,\n        )?;\n        drop(data);\n\n        config.size = Some(data_tree.len());\n        let comm_d_root: Fr = data_tree.root().into();\n        let comm_d = commitment_from_fr(comm_d_root);\n\n        drop(data_tree);\n\n        Ok((config, comm_d))\n    })?;\n\n    info!(\"verifying pieces\");\n\n    ensure!(\n        verify_pieces(&comm_d, piece_infos, porep_config.into())?,\n        \"pieces and comm_d do not match\"\n    );\n\n    let replica_id = generate_replica_id::<Tree::Hasher, _>(\n        &prover_id,\n        sector_id.into(),\n        &ticket,\n        comm_d,\n        &porep_config.porep_id,\n    );\n\n    let labels = StackedDrg::<Tree, DefaultPieceHasher>::replicate_phase1(\n        &compound_public_params.vanilla_params,\n        &replica_id,\n        config.clone(),\n    )?;\n\n    let out = SealPreCommitPhase1Output {\n        labels,\n        config,\n        comm_d,\n    };\n\n    info!(\"seal_pre_commit_phase1:finish: {:?}\", sector_id);\n    Ok(out)\n}\n\n#[allow(clippy::too_many_arguments)]\npub fn seal_pre_commit_phase2<R, S, Tree: 'static + MerkleTreeTrait>(\n    porep_config: PoRepConfig,\n    phase1_output: SealPreCommitPhase1Output<Tree>,\n    cache_path: S,\n    replica_path: R,\n) -> Result<SealPreCommitOutput>\nwhere\n    R: AsRef<Path>,\n    S: AsRef<Path>,\n{\n    info!(\"seal_pre_commit_phase2:start\");\n\n    // Sanity check all input path types.\n    ensure!(\n        metadata(cache_path.as_ref())?.is_dir(),\n        \"cache_path must be a directory\"\n    );\n    ensure!(\n        metadata(replica_path.as_ref())?.is_file(),\n        \"replica_path must be a file\"\n    );\n\n    let SealPreCommitPhase1Output {\n        mut labels,\n        mut config,\n        comm_d,\n        ..\n    } = phase1_output;\n\n    labels.update_root(cache_path.as_ref());\n    config.path = cache_path.as_ref().into();\n\n    let f_data = OpenOptions::new()\n        .read(true)\n        .write(true)\n        .open(&replica_path)\n        .with_context(|| {\n            format!(\n                \"could not open replica_path={:?}\",\n                replica_path.as_ref().display()\n            )\n        })?;\n    let data = unsafe {\n        MmapOptions::new().map_mut(&f_data).with_context(|| {\n            format!(\n                \"could not mmap replica_path={:?}\",\n                replica_path.as_ref().display()\n            )\n        })?\n    };\n    let data: Data<'_> = (data, PathBuf::from(replica_path.as_ref())).into();\n\n    // Load data tree from disk\n    let data_tree = {\n        let base_tree_size = get_base_tree_size::<DefaultBinaryTree>(porep_config.sector_size)?;\n        let base_tree_leafs = get_base_tree_leafs::<DefaultBinaryTree>(base_tree_size)?;\n\n        trace!(\n            \"seal phase 2: base tree size {}, base tree leafs {}, rows to discard {}\",\n            base_tree_size,\n            base_tree_leafs,\n            default_rows_to_discard(base_tree_leafs, BINARY_ARITY)\n        );\n        ensure!(\n            config.rows_to_discard == default_rows_to_discard(base_tree_leafs, BINARY_ARITY),\n            \"Invalid cache size specified\"\n        );\n\n        let store: DiskStore<DefaultPieceDomain> =\n            DiskStore::new_from_disk(base_tree_size, BINARY_ARITY, &config)?;\n        BinaryMerkleTree::<DefaultPieceHasher>::from_data_store(store, base_tree_leafs)?\n    };\n\n    let compound_setup_params = compound_proof::SetupParams {\n        vanilla_params: setup_params(\n            PaddedBytesAmount::from(porep_config),\n            usize::from(PoRepProofPartitions::from(porep_config)),\n            porep_config.porep_id,\n            porep_config.api_version,\n        )?,\n        partitions: Some(usize::from(PoRepProofPartitions::from(porep_config))),\n        priority: false,\n    };\n\n    let compound_public_params = <StackedCompound<Tree, DefaultPieceHasher> as CompoundProof<\n        StackedDrg<'_, Tree, DefaultPieceHasher>,\n        _,\n    >>::setup(&compound_setup_params)?;\n\n    let (tau, (p_aux, t_aux)) = StackedDrg::<Tree, DefaultPieceHasher>::replicate_phase2(\n        &compound_public_params.vanilla_params,\n        labels,\n        data,\n        data_tree,\n        config,\n        replica_path.as_ref().to_path_buf(),\n    )?;\n\n    let comm_r = commitment_from_fr(tau.comm_r.into());\n\n    // Persist p_aux and t_aux here\n    let p_aux_path = cache_path.as_ref().join(CacheKey::PAux.to_string());\n    let mut f_p_aux = File::create(&p_aux_path)\n        .with_context(|| format!(\"could not create file p_aux={:?}\", p_aux_path))?;\n    let p_aux_bytes = serialize(&p_aux)?;\n    f_p_aux\n        .write_all(&p_aux_bytes)\n        .with_context(|| format!(\"could not write to file p_aux={:?}\", p_aux_path))?;\n\n    let t_aux_path = cache_path.as_ref().join(CacheKey::TAux.to_string());\n    let mut f_t_aux = File::create(&t_aux_path)\n        .with_context(|| format!(\"could not create file t_aux={:?}\", t_aux_path))?;\n    let t_aux_bytes = serialize(&t_aux)?;\n    f_t_aux\n        .write_all(&t_aux_bytes)\n        .with_context(|| format!(\"could not write to file t_aux={:?}\", t_aux_path))?;\n\n    let out = SealPreCommitOutput { comm_r, comm_d };\n\n    info!(\"seal_pre_commit_phase2:finish\");\n    Ok(out)\n}\n\n#[allow(clippy::too_many_arguments)]\npub fn seal_commit_phase1<T: AsRef<Path>, Tree: 'static + MerkleTreeTrait>(\n    porep_config: PoRepConfig,\n    cache_path: T,\n    replica_path: T,\n    prover_id: ProverId,\n    sector_id: SectorId,\n    ticket: Ticket,\n    seed: Ticket,\n    pre_commit: SealPreCommitOutput,\n    piece_infos: &[PieceInfo],\n) -> Result<SealCommitPhase1Output<Tree>> {\n    info!(\"seal_commit_phase1:start: {:?}\", sector_id);\n\n    // Sanity check all input path types.\n    ensure!(\n        metadata(cache_path.as_ref())?.is_dir(),\n        \"cache_path must be a directory\"\n    );\n    ensure!(\n        metadata(replica_path.as_ref())?.is_file(),\n        \"replica_path must be a file\"\n    );\n\n    let SealPreCommitOutput { comm_d, comm_r } = pre_commit;\n\n    ensure!(comm_d != [0; 32], \"Invalid all zero commitment (comm_d)\");\n    ensure!(comm_r != [0; 32], \"Invalid all zero commitment (comm_r)\");\n    ensure!(\n        verify_pieces(&comm_d, piece_infos, porep_config.into())?,\n        \"pieces and comm_d do not match\"\n    );\n\n    let p_aux = {\n        let p_aux_path = cache_path.as_ref().join(CacheKey::PAux.to_string());\n        let p_aux_bytes = fs::read(&p_aux_path)\n            .with_context(|| format!(\"could not read file p_aux={:?}\", p_aux_path))?;\n\n        deserialize(&p_aux_bytes)\n    }?;\n\n    let t_aux = {\n        let t_aux_path = cache_path.as_ref().join(CacheKey::TAux.to_string());\n        let t_aux_bytes = fs::read(&t_aux_path)\n            .with_context(|| format!(\"could not read file t_aux={:?}\", t_aux_path))?;\n\n        let mut res: TemporaryAux<_, _> = deserialize(&t_aux_bytes)?;\n\n        // Switch t_aux to the passed in cache_path\n        res.set_cache_path(cache_path);\n        res\n    };\n\n    // Convert TemporaryAux to TemporaryAuxCache, which instantiates all\n    // elements based on the configs stored in TemporaryAux.\n    let t_aux_cache: TemporaryAuxCache<Tree, DefaultPieceHasher> =\n        TemporaryAuxCache::new(&t_aux, replica_path.as_ref().to_path_buf())\n            .context(\"failed to restore contents of t_aux\")?;\n\n    let comm_r_safe = as_safe_commitment(&comm_r, \"comm_r\")?;\n    let comm_d_safe = DefaultPieceDomain::try_from_bytes(&comm_d)?;\n\n    let replica_id = generate_replica_id::<Tree::Hasher, _>(\n        &prover_id,\n        sector_id.into(),\n        &ticket,\n        comm_d_safe,\n        &porep_config.porep_id,\n    );\n\n    let public_inputs = stacked::PublicInputs {\n        replica_id,\n        tau: Some(stacked::Tau {\n            comm_d: comm_d_safe,\n            comm_r: comm_r_safe,\n        }),\n        k: None,\n        seed,\n    };\n\n    let private_inputs = stacked::PrivateInputs::<Tree, DefaultPieceHasher> {\n        p_aux,\n        t_aux: t_aux_cache,\n    };\n\n    let compound_setup_params = compound_proof::SetupParams {\n        vanilla_params: setup_params(\n            PaddedBytesAmount::from(porep_config),\n            usize::from(PoRepProofPartitions::from(porep_config)),\n            porep_config.porep_id,\n            porep_config.api_version,\n        )?,\n        partitions: Some(usize::from(PoRepProofPartitions::from(porep_config))),\n        priority: false,\n    };\n\n    let compound_public_params = <StackedCompound<Tree, DefaultPieceHasher> as CompoundProof<\n        StackedDrg<'_, Tree, DefaultPieceHasher>,\n        _,\n    >>::setup(&compound_setup_params)?;\n\n    let vanilla_proofs = StackedDrg::prove_all_partitions(\n        &compound_public_params.vanilla_params,\n        &public_inputs,\n        &private_inputs,\n        StackedCompound::partition_count(&compound_public_params),\n    )?;\n\n    let sanity_check = StackedDrg::<Tree, DefaultPieceHasher>::verify_all_partitions(\n        &compound_public_params.vanilla_params,\n        &public_inputs,\n        &vanilla_proofs,\n    )?;\n    ensure!(sanity_check, \"Invalid vanilla proof generated\");\n\n    let out = SealCommitPhase1Output {\n        vanilla_proofs,\n        comm_r,\n        comm_d,\n        replica_id,\n        seed,\n        ticket,\n    };\n\n    info!(\"seal_commit_phase1:finish: {:?}\", sector_id);\n    Ok(out)\n}\n\n#[allow(clippy::too_many_arguments)]\npub fn seal_commit_phase2<Tree: 'static + MerkleTreeTrait>(\n    porep_config: PoRepConfig,\n    phase1_output: SealCommitPhase1Output<Tree>,\n    prover_id: ProverId,\n    sector_id: SectorId,\n) -> Result<SealCommitOutput> {\n    info!(\"seal_commit_phase2:start: {:?}\", sector_id);\n\n    let SealCommitPhase1Output {\n        vanilla_proofs,\n        comm_d,\n        comm_r,\n        replica_id,\n        seed,\n        ticket,\n    } = phase1_output;\n\n    ensure!(comm_d != [0; 32], \"Invalid all zero commitment (comm_d)\");\n    ensure!(comm_r != [0; 32], \"Invalid all zero commitment (comm_r)\");\n\n    let comm_r_safe = as_safe_commitment(&comm_r, \"comm_r\")?;\n    let comm_d_safe = DefaultPieceDomain::try_from_bytes(&comm_d)?;\n\n    let public_inputs = stacked::PublicInputs {\n        replica_id,\n        tau: Some(stacked::Tau {\n            comm_d: comm_d_safe,\n            comm_r: comm_r_safe,\n        }),\n        k: None,\n        seed,\n    };\n\n    let groth_params = get_stacked_params::<Tree>(porep_config)?;\n\n    info!(\n        \"got groth params ({}) while sealing\",\n        u64::from(PaddedBytesAmount::from(porep_config))\n    );\n\n    let compound_setup_params = compound_proof::SetupParams {\n        vanilla_params: setup_params(\n            PaddedBytesAmount::from(porep_config),\n            usize::from(PoRepProofPartitions::from(porep_config)),\n            porep_config.porep_id,\n            porep_config.api_version,\n        )?,\n        partitions: Some(usize::from(PoRepProofPartitions::from(porep_config))),\n        priority: false,\n    };\n\n    let compound_public_params = <StackedCompound<Tree, DefaultPieceHasher> as CompoundProof<\n        StackedDrg<'_, Tree, DefaultPieceHasher>,\n        _,\n    >>::setup(&compound_setup_params)?;\n\n    info!(\"snark_proof:start\");\n    let groth_proofs = StackedCompound::<Tree, DefaultPieceHasher>::circuit_proofs(\n        &public_inputs,\n        vanilla_proofs,\n        &compound_public_params.vanilla_params,\n        &groth_params,\n        compound_public_params.priority,\n    )?;\n    info!(\"snark_proof:finish\");\n\n    let proof = MultiProof::new(groth_proofs, &groth_params.pvk);\n\n    let mut buf = Vec::with_capacity(\n        SINGLE_PARTITION_PROOF_LEN * usize::from(PoRepProofPartitions::from(porep_config)),\n    );\n\n    proof.write(&mut buf)?;\n\n    // Verification is cheap when parameters are cached,\n    // and it is never correct to return a proof which does not verify.\n    verify_seal::<Tree>(\n        porep_config,\n        comm_r,\n        comm_d,\n        prover_id,\n        sector_id,\n        ticket,\n        seed,\n        &buf,\n    )\n    .context(\"post-seal verification sanity check failed\")?;\n\n    let out = SealCommitOutput { proof: buf };\n\n    info!(\"seal_commit_phase2:finish: {:?}\", sector_id);\n    Ok(out)\n}\n\n/// Computes a sectors's `comm_d` given its pieces.\n///\n/// # Arguments\n///\n/// * `porep_config` - this sector's porep config that contains the number of bytes in the sector.\n/// * `piece_infos` - the piece info (commitment and byte length) for each piece in this sector.\npub fn compute_comm_d(sector_size: SectorSize, piece_infos: &[PieceInfo]) -> Result<Commitment> {\n    info!(\"compute_comm_d:start\");\n\n    let result = pieces::compute_comm_d(sector_size, piece_infos);\n\n    info!(\"compute_comm_d:finish\");\n    result\n}\n\n/// Verifies the output of some previously-run seal operation.\n///\n/// # Arguments\n///\n/// * `porep_config` - this sector's porep config that contains the number of bytes in this sector.\n/// * `comm_r_in` - commitment to the sector's replica (`comm_r`).\n/// * `comm_d_in` - commitment to the sector's data (`comm_d`).\n/// * `prover_id` - the prover-id that sealed this sector.\n/// * `sector_id` - this sector's sector-id.\n/// * `ticket` - the ticket that was used to generate this sector's replica-id.\n/// * `seed` - the seed used to derive the porep challenges.\n/// * `proof_vec` - the porep circuit proof serialized into a vector of bytes.\n#[allow(clippy::too_many_arguments)]\npub fn verify_seal<Tree: 'static + MerkleTreeTrait>(\n    porep_config: PoRepConfig,\n    comm_r_in: Commitment,\n    comm_d_in: Commitment,\n    prover_id: ProverId,\n    sector_id: SectorId,\n    ticket: Ticket,\n    seed: Ticket,\n    proof_vec: &[u8],\n) -> Result<bool> {\n    info!(\"verify_seal:start: {:?}\", sector_id);\n    ensure!(comm_d_in != [0; 32], \"Invalid all zero commitment (comm_d)\");\n    ensure!(comm_r_in != [0; 32], \"Invalid all zero commitment (comm_r)\");\n\n    let comm_r: <Tree::Hasher as Hasher>::Domain = as_safe_commitment(&comm_r_in, \"comm_r\")?;\n    let comm_d: DefaultPieceDomain = as_safe_commitment(&comm_d_in, \"comm_d\")?;\n\n    let replica_id = generate_replica_id::<Tree::Hasher, _>(\n        &prover_id,\n        sector_id.into(),\n        &ticket,\n        comm_d,\n        &porep_config.porep_id,\n    );\n\n    let compound_setup_params = compound_proof::SetupParams {\n        vanilla_params: setup_params(\n            PaddedBytesAmount::from(porep_config),\n            usize::from(PoRepProofPartitions::from(porep_config)),\n            porep_config.porep_id,\n            porep_config.api_version,\n        )?,\n        partitions: Some(usize::from(PoRepProofPartitions::from(porep_config))),\n        priority: false,\n    };\n\n    let compound_public_params: compound_proof::PublicParams<\n        '_,\n        StackedDrg<'_, Tree, DefaultPieceHasher>,\n    > = StackedCompound::setup(&compound_setup_params)?;\n\n    let public_inputs =\n        stacked::PublicInputs::<<Tree::Hasher as Hasher>::Domain, DefaultPieceDomain> {\n            replica_id,\n            tau: Some(Tau { comm_r, comm_d }),\n            seed,\n            k: None,\n        };\n\n    let result = {\n        let sector_bytes = PaddedBytesAmount::from(porep_config);\n        let verifying_key = get_stacked_verifying_key::<Tree>(porep_config)?;\n\n        info!(\n            \"got verifying key ({}) while verifying seal\",\n            u64::from(sector_bytes)\n        );\n\n        let proof = MultiProof::new_from_reader(\n            Some(usize::from(PoRepProofPartitions::from(porep_config))),\n            proof_vec,\n            &verifying_key,\n        )?;\n\n        StackedCompound::verify(\n            &compound_public_params,\n            &public_inputs,\n            &proof,\n            &ChallengeRequirements {\n                minimum_challenges: *POREP_MINIMUM_CHALLENGES\n                    .read()\n                    .expect(\"POREP_MINIMUM_CHALLENGES poisoned\")\n                    .get(&u64::from(SectorSize::from(porep_config)))\n                    .expect(\"unknown sector size\") as usize,\n            },\n        )\n    };\n\n    info!(\"verify_seal:finish: {:?}\", sector_id);\n    result\n}\n\n/// Verifies a batch of outputs of some previously-run seal operations.\n///\n/// # Arguments\n///\n/// * `porep_config` - this sector's porep config that contains the number of bytes in this sector.\n/// * `[comm_r_ins]` - list of commitments to the sector's replica (`comm_r`).\n/// * `[comm_d_ins]` - list of commitments to the sector's data (`comm_d`).\n/// * `[prover_ids]` - list of prover-ids that sealed this sector.\n/// * `[sector_ids]` - list of the sector's sector-id.\n/// * `[tickets]` - list of tickets that was used to generate this sector's replica-id.\n/// * `[seeds]` - list of seeds used to derive the porep challenges.\n/// * `[proof_vecs]` - list of porep circuit proofs serialized into a vector of bytes.\n#[allow(clippy::too_many_arguments)]\npub fn verify_batch_seal<Tree: 'static + MerkleTreeTrait>(\n    porep_config: PoRepConfig,\n    comm_r_ins: &[Commitment],\n    comm_d_ins: &[Commitment],\n    prover_ids: &[ProverId],\n    sector_ids: &[SectorId],\n    tickets: &[Ticket],\n    seeds: &[Ticket],\n    proof_vecs: &[&[u8]],\n) -> Result<bool> {\n    info!(\"verify_batch_seal:start\");\n    ensure!(!comm_r_ins.is_empty(), \"Cannot prove empty batch\");\n    let l = comm_r_ins.len();\n    ensure!(l == comm_d_ins.len(), \"Inconsistent inputs\");\n    ensure!(l == prover_ids.len(), \"Inconsistent inputs\");\n    ensure!(l == prover_ids.len(), \"Inconsistent inputs\");\n    ensure!(l == sector_ids.len(), \"Inconsistent inputs\");\n    ensure!(l == tickets.len(), \"Inconsistent inputs\");\n    ensure!(l == seeds.len(), \"Inconsistent inputs\");\n    ensure!(l == proof_vecs.len(), \"Inconsistent inputs\");\n\n    for comm_d_in in comm_d_ins {\n        ensure!(\n            comm_d_in != &[0; 32],\n            \"Invalid all zero commitment (comm_d)\"\n        );\n    }\n    for comm_r_in in comm_r_ins {\n        ensure!(\n            comm_r_in != &[0; 32],\n            \"Invalid all zero commitment (comm_r)\"\n        );\n    }\n\n    let sector_bytes = PaddedBytesAmount::from(porep_config);\n\n    let verifying_key = get_stacked_verifying_key::<Tree>(porep_config)?;\n    info!(\n        \"got verifying key ({}) while verifying seal\",\n        u64::from(sector_bytes)\n    );\n\n    let compound_setup_params = compound_proof::SetupParams {\n        vanilla_params: setup_params(\n            PaddedBytesAmount::from(porep_config),\n            usize::from(PoRepProofPartitions::from(porep_config)),\n            porep_config.porep_id,\n            porep_config.api_version,\n        )?,\n        partitions: Some(usize::from(PoRepProofPartitions::from(porep_config))),\n        priority: false,\n    };\n\n    let compound_public_params: compound_proof::PublicParams<\n        '_,\n        StackedDrg<'_, Tree, DefaultPieceHasher>,\n    > = StackedCompound::setup(&compound_setup_params)?;\n\n    let mut public_inputs = Vec::with_capacity(l);\n    let mut proofs = Vec::with_capacity(l);\n\n    for i in 0..l {\n        let comm_r = as_safe_commitment(&comm_r_ins[i], \"comm_r\")?;\n        let comm_d = as_safe_commitment(&comm_d_ins[i], \"comm_d\")?;\n\n        let replica_id = generate_replica_id::<Tree::Hasher, _>(\n            &prover_ids[i],\n            sector_ids[i].into(),\n            &tickets[i],\n            comm_d,\n            &porep_config.porep_id,\n        );\n\n        public_inputs.push(stacked::PublicInputs::<\n            <Tree::Hasher as Hasher>::Domain,\n            DefaultPieceDomain,\n        > {\n            replica_id,\n            tau: Some(Tau { comm_r, comm_d }),\n            seed: seeds[i],\n            k: None,\n        });\n        proofs.push(MultiProof::new_from_reader(\n            Some(usize::from(PoRepProofPartitions::from(porep_config))),\n            proof_vecs[i],\n            &verifying_key,\n        )?);\n    }\n\n    let result = StackedCompound::<Tree, DefaultPieceHasher>::batch_verify(\n        &compound_public_params,\n        &public_inputs,\n        &proofs,\n        &ChallengeRequirements {\n            minimum_challenges: *POREP_MINIMUM_CHALLENGES\n                .read()\n                .expect(\"POREP_MINIMUM_CHALLENGES poisoned\")\n                .get(&u64::from(SectorSize::from(porep_config)))\n                .expect(\"unknown sector size\") as usize,\n        },\n    )\n    .map_err(Into::into);\n\n    info!(\"verify_batch_seal:finish\");\n    result\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/errors.rs",
    "content": "use failure::Backtrace;\nuse std::fmt::Display;\n\n#[derive(Debug, Fail)]\npub enum SectorBuilderErr {\n    #[fail(\n        display = \"number of bytes in piece ({}) exceeds maximum ({})\",\n        num_bytes_in_piece, max_bytes_per_sector\n    )]\n    OverflowError {\n        num_bytes_in_piece: u64,\n        max_bytes_per_sector: u64,\n    },\n\n    #[fail(\n        display = \"number of bytes written ({}) does not match bytes in piece ({})\",\n        num_bytes_written, num_bytes_in_piece\n    )]\n    IncompleteWriteError {\n        num_bytes_written: u64,\n        num_bytes_in_piece: u64,\n    },\n\n    #[fail(display = \"no piece with key {} found\", _0)]\n    PieceNotFound(String),\n\n    #[fail(display = \"unrecoverable error: {}\", _0)]\n    Unrecoverable(String, Backtrace),\n}\n\npub fn err_piecenotfound(piece_key: String) -> SectorBuilderErr {\n    SectorBuilderErr::PieceNotFound(piece_key)\n}\n\npub fn err_unrecov<S: Display>(msg: S) -> SectorBuilderErr {\n    let backtrace = failure::Backtrace::new();\n    SectorBuilderErr::Unrecoverable(format!(\"{}\", msg), backtrace)\n}\n\npub fn err_overflow(num_bytes_in_piece: u64, max_bytes_per_sector: u64) -> SectorBuilderErr {\n    SectorBuilderErr::OverflowError {\n        num_bytes_in_piece,\n        max_bytes_per_sector,\n    }\n}\n\npub fn err_inc_write(num_bytes_written: u64, num_bytes_in_piece: u64) -> SectorBuilderErr {\n    SectorBuilderErr::IncompleteWriteError {\n        num_bytes_written,\n        num_bytes_in_piece,\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/helpers/add_piece.rs",
    "content": "use crate::api::sector_builder::errors::*;\nuse crate::api::sector_builder::metadata::sum_piece_bytes;\nuse crate::api::sector_builder::metadata::StagedSectorMetadata;\nuse crate::api::sector_builder::state::StagedState;\nuse crate::api::sector_builder::*;\nuse crate::error;\nuse sector_base::api::sector_store::SectorManager;\nuse std::sync::Arc;\n\npub fn add_piece(\n    sector_store: &Arc<WrappedSectorStore>,\n    mut staged_state: &mut StagedState,\n    piece_key: String,\n    piece_bytes: &[u8],\n) -> error::Result<SectorId> {\n    let sector_mgr = sector_store.inner.manager();\n    let sector_max = sector_store.inner.config().max_unsealed_bytes_per_sector();\n\n    let piece_bytes_len = piece_bytes.len() as u64;\n\n    let opt_dest_sector_id = {\n        let candidates: Vec<StagedSectorMetadata> = staged_state\n            .sectors\n            .iter()\n            .filter(|(_, v)| v.seal_status == SealStatus::Pending)\n            .map(|(_, v)| (*v).clone())\n            .collect();\n\n        compute_destination_sector_id(&candidates[..], sector_max, piece_bytes_len)?\n    };\n\n    let dest_sector_id = opt_dest_sector_id\n        .ok_or(())\n        .or_else(|_| provision_new_staged_sector(sector_mgr, &mut staged_state))?;\n\n    if let Some(s) = staged_state.sectors.get_mut(&dest_sector_id) {\n        sector_store\n            .inner\n            .manager()\n            .write_and_preprocess(&s.sector_access, &piece_bytes)\n            .map_err(|err| err.into())\n            .and_then(|num_bytes_written| {\n                if num_bytes_written != piece_bytes_len {\n                    Err(err_inc_write(num_bytes_written, piece_bytes_len).into())\n                } else {\n                    Ok(s.sector_id)\n                }\n            })\n            .map(|sector_id| {\n                s.pieces.push(metadata::PieceMetadata {\n                    piece_key,\n                    num_bytes: piece_bytes_len,\n                });\n\n                sector_id\n            })\n    } else {\n        Err(err_unrecov(\"unable to retrieve sector from state-map\").into())\n    }\n}\n\n// Given a list of staged sectors which are accepting data, return the\n// first staged sector into which the bytes will fit.\nfn compute_destination_sector_id(\n    candidate_sectors: &[StagedSectorMetadata],\n    max_bytes_per_sector: u64,\n    num_bytes_in_piece: u64,\n) -> error::Result<Option<SectorId>> {\n    if num_bytes_in_piece > max_bytes_per_sector {\n        Err(err_overflow(num_bytes_in_piece, max_bytes_per_sector).into())\n    } else {\n        Ok(candidate_sectors\n            .iter()\n            .find(move |staged_sector| {\n                (max_bytes_per_sector - sum_piece_bytes(staged_sector)) >= num_bytes_in_piece\n            })\n            .map(|x| x.sector_id))\n    }\n}\n\n// Provisions a new staged sector and returns its sector_id. Not a pure\n// function; creates a sector access (likely a file), increments the sector id\n// nonce, and mutates the StagedState.\nfn provision_new_staged_sector(\n    sector_manager: &SectorManager,\n    staged_state: &mut StagedState,\n) -> error::Result<SectorId> {\n    let sector_id = {\n        let n = &mut staged_state.sector_id_nonce;\n        *n += 1;\n        *n\n    };\n\n    let access = sector_manager.new_staging_sector_access()?;\n\n    let meta = StagedSectorMetadata {\n        pieces: Default::default(),\n        sector_access: access.clone(),\n        sector_id,\n        seal_status: SealStatus::Pending,\n    };\n\n    staged_state.sectors.insert(meta.sector_id, meta.clone());\n\n    Ok(sector_id)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::api::sector_builder::metadata::PieceMetadata;\n\n    #[test]\n    fn test_alpha() {\n        let mut sealed_sector_a: StagedSectorMetadata = Default::default();\n\n        sealed_sector_a.pieces.push(PieceMetadata {\n            piece_key: String::from(\"x\"),\n            num_bytes: 5,\n        });\n\n        sealed_sector_a.pieces.push(PieceMetadata {\n            piece_key: String::from(\"x\"),\n            num_bytes: 10,\n        });\n\n        let mut sealed_sector_b: StagedSectorMetadata = Default::default();\n\n        sealed_sector_b.pieces.push(PieceMetadata {\n            piece_key: String::from(\"x\"),\n            num_bytes: 5,\n        });\n\n        let staged_sectors = vec![sealed_sector_a.clone(), sealed_sector_b.clone()];\n\n        // piece takes up all remaining space in first sector\n        match compute_destination_sector_id(&staged_sectors, 100, 85) {\n            Ok(Some(destination_sector_id)) => {\n                assert_eq!(destination_sector_id, sealed_sector_a.sector_id)\n            }\n            _ => panic!(),\n        }\n\n        // piece doesn't fit into the first, but does the second\n        match compute_destination_sector_id(&staged_sectors, 100, 90) {\n            Ok(Some(destination_sector_id)) => {\n                assert_eq!(destination_sector_id, sealed_sector_b.sector_id)\n            }\n            _ => panic!(),\n        }\n\n        // piece doesn't fit into any in the list\n        match compute_destination_sector_id(&staged_sectors, 100, 100) {\n            Ok(None) => (),\n            _ => panic!(),\n        }\n\n        // piece is over max\n        match compute_destination_sector_id(&staged_sectors, 100, 101) {\n            Err(_) => (),\n            _ => panic!(),\n        }\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/helpers/get_seal_status.rs",
    "content": "use crate::api::sector_builder::errors::err_unrecov;\nuse crate::api::sector_builder::metadata::SealStatus;\nuse crate::api::sector_builder::state::SealedState;\nuse crate::api::sector_builder::state::StagedState;\nuse crate::api::sector_builder::SectorId;\nuse crate::error;\n\npub fn get_seal_status(\n    staged_state: &StagedState,\n    sealed_state: &SealedState,\n    sector_id: SectorId,\n) -> error::Result<SealStatus> {\n    sealed_state\n        .sectors\n        .get(&sector_id)\n        .map(|sealed_sector| SealStatus::Sealed(Box::new(sealed_sector.clone())))\n        .or_else(|| {\n            staged_state\n                .sectors\n                .get(&sector_id)\n                .and_then(|staged_sector| Some(staged_sector.seal_status.clone()))\n        })\n        .ok_or_else(|| err_unrecov(format!(\"no sector with id {} found\", sector_id)).into())\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::api::sector_builder::metadata::{SealedSectorMetadata, StagedSectorMetadata};\n    use crate::api::sector_builder::state::SealedState;\n    use crate::api::sector_builder::state::SectorBuilderState;\n    use crate::api::sector_builder::state::StagedState;\n    use std::collections::HashMap;\n\n    fn setup() -> SectorBuilderState {\n        let mut staged_sectors: HashMap<SectorId, StagedSectorMetadata> = Default::default();\n        let mut sealed_sectors: HashMap<SectorId, SealedSectorMetadata> = Default::default();\n\n        staged_sectors.insert(\n            2,\n            StagedSectorMetadata {\n                sector_id: 2,\n                seal_status: SealStatus::Sealing,\n                ..Default::default()\n            },\n        );\n\n        staged_sectors.insert(\n            3,\n            StagedSectorMetadata {\n                sector_id: 3,\n                seal_status: SealStatus::Pending,\n                ..Default::default()\n            },\n        );\n\n        sealed_sectors.insert(\n            4,\n            SealedSectorMetadata {\n                sector_id: 4,\n                ..Default::default()\n            },\n        );\n\n        SectorBuilderState {\n            prover_id: Default::default(),\n            staged: StagedState {\n                sector_id_nonce: 0,\n                sectors: staged_sectors,\n            },\n            sealed: SealedState {\n                sectors: sealed_sectors,\n            },\n        }\n    }\n\n    #[test]\n    fn test_alpha() {\n        let state = setup();\n\n        let sealed_state = state.sealed;\n        let staged_state = state.staged;\n\n        let result = get_seal_status(&staged_state, &sealed_state, 1);\n        assert!(result.is_err());\n\n        let result = get_seal_status(&staged_state, &sealed_state, 2).unwrap();\n        match result {\n            SealStatus::Sealing => (),\n            _ => panic!(\"should have been SealStatus::Sealing\"),\n        }\n\n        let result = get_seal_status(&staged_state, &sealed_state, 3).unwrap();\n        match result {\n            SealStatus::Pending => (),\n            _ => panic!(\"should have been SealStatus::Pending\"),\n        }\n\n        let result = get_seal_status(&staged_state, &sealed_state, 4).unwrap();\n        match result {\n            SealStatus::Sealed(_) => (),\n            _ => panic!(\"should have been SealStatus::Sealed\"),\n        }\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/helpers/get_sectors_ready_for_sealing.rs",
    "content": "use crate::api::sector_builder::metadata::sum_piece_bytes;\nuse crate::api::sector_builder::metadata::SealStatus;\nuse crate::api::sector_builder::metadata::StagedSectorMetadata;\nuse crate::api::sector_builder::state::StagedState;\nuse crate::api::sector_builder::SectorId;\nuse itertools::chain;\nuse std::cmp::Reverse;\n\npub fn get_sectors_ready_for_sealing(\n    staged_state: &StagedState,\n    max_user_bytes_per_staged_sector: u64,\n    max_num_staged_sectors: u8,\n    seal_all_staged_sectors: bool,\n) -> Vec<SectorId> {\n    let (full, mut not_full): (Vec<&StagedSectorMetadata>, Vec<&StagedSectorMetadata>) =\n        staged_state\n            .sectors\n            .values()\n            .filter(|x| x.seal_status == SealStatus::Pending)\n            .partition(|x| max_user_bytes_per_staged_sector <= sum_piece_bytes(x));\n\n    not_full.sort_unstable_by_key(|x| Reverse(x.sector_id));\n\n    let num_to_skip = if seal_all_staged_sectors {\n        0\n    } else {\n        max_num_staged_sectors as usize\n    };\n\n    chain(full.into_iter(), not_full.into_iter().skip(num_to_skip))\n        .map(|x| x.sector_id)\n        .collect::<Vec<SectorId>>()\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::api::sector_builder::metadata::PieceMetadata;\n    use crate::api::sector_builder::metadata::StagedSectorMetadata;\n    use crate::api::sector_builder::state::StagedState;\n    use crate::api::sector_builder::SectorId;\n    use std::collections::HashMap;\n\n    fn make_meta(\n        m: &mut HashMap<SectorId, StagedSectorMetadata>,\n        sector_id: SectorId,\n        num_bytes: u64,\n        accepting_data: bool,\n    ) {\n        let seal_status = if accepting_data {\n            SealStatus::Pending\n        } else {\n            SealStatus::Sealing\n        };\n\n        m.insert(\n            sector_id,\n            StagedSectorMetadata {\n                sector_id,\n                pieces: vec![PieceMetadata {\n                    piece_key: format!(\"{}\", sector_id),\n                    num_bytes,\n                }],\n                seal_status,\n                ..Default::default()\n            },\n        );\n    }\n\n    #[test]\n    fn test_seals_all() {\n        let mut m: HashMap<SectorId, StagedSectorMetadata> = HashMap::new();\n\n        make_meta(&mut m, 200, 0, true);\n        make_meta(&mut m, 201, 0, true);\n\n        let state = StagedState {\n            sector_id_nonce: 100,\n            sectors: m,\n        };\n\n        let to_seal: Vec<SectorId> = get_sectors_ready_for_sealing(&state, 127, 10, true)\n            .into_iter()\n            .collect();\n\n        assert_eq!(vec![201 as SectorId, 200 as SectorId], to_seal);\n    }\n\n    #[test]\n    fn test_seals_full() {\n        let mut m: HashMap<SectorId, StagedSectorMetadata> = HashMap::new();\n\n        make_meta(&mut m, 200, 127, true);\n        make_meta(&mut m, 201, 0, true);\n\n        let state = StagedState {\n            sector_id_nonce: 100,\n            sectors: m,\n        };\n\n        let to_seal: Vec<SectorId> = get_sectors_ready_for_sealing(&state, 127, 10, false)\n            .into_iter()\n            .collect();\n\n        assert_eq!(vec![200 as SectorId], to_seal);\n    }\n\n    #[test]\n    fn test_seals_excess() {\n        let mut m: HashMap<SectorId, StagedSectorMetadata> = HashMap::new();\n\n        make_meta(&mut m, 200, 0, true);\n        make_meta(&mut m, 201, 0, true);\n        make_meta(&mut m, 202, 0, true);\n        make_meta(&mut m, 203, 0, true);\n\n        let state = StagedState {\n            sector_id_nonce: 100,\n            sectors: m,\n        };\n\n        let to_seal: Vec<SectorId> = get_sectors_ready_for_sealing(&state, 127, 2, false)\n            .into_iter()\n            .collect();\n\n        assert_eq!(vec![201 as SectorId, 200 as SectorId], to_seal);\n    }\n\n    #[test]\n    fn test_noop() {\n        let mut m: HashMap<SectorId, StagedSectorMetadata> = HashMap::new();\n\n        make_meta(&mut m, 200, 0, true);\n        make_meta(&mut m, 201, 0, true);\n        make_meta(&mut m, 202, 0, true);\n        make_meta(&mut m, 203, 0, true);\n\n        let state = StagedState {\n            sector_id_nonce: 100,\n            sectors: m,\n        };\n\n        let to_seal: Vec<SectorId> = get_sectors_ready_for_sealing(&state, 127, 4, false)\n            .into_iter()\n            .collect();\n\n        assert_eq!(vec![0; 0], to_seal);\n    }\n\n    #[test]\n    fn test_noop_all_being_sealed() {\n        let mut m: HashMap<SectorId, StagedSectorMetadata> = HashMap::new();\n\n        make_meta(&mut m, 200, 127, false);\n        make_meta(&mut m, 201, 127, false);\n        make_meta(&mut m, 202, 127, false);\n        make_meta(&mut m, 203, 127, false);\n\n        let state = StagedState {\n            sector_id_nonce: 100,\n            sectors: m,\n        };\n\n        let to_seal: Vec<SectorId> = get_sectors_ready_for_sealing(&state, 127, 4, false)\n            .into_iter()\n            .collect();\n\n        assert_eq!(vec![0; 0], to_seal);\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/helpers/mod.rs",
    "content": "pub mod add_piece;\npub mod get_seal_status;\npub mod get_sectors_ready_for_sealing;\npub mod retrieve_piece;\npub mod seal;\npub mod snapshots;\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/helpers/retrieve_piece.rs",
    "content": "use crate::api::internal;\nuse crate::api::sector_builder::errors::err_unrecov;\nuse crate::api::sector_builder::metadata::sector_id_as_bytes;\nuse crate::api::sector_builder::metadata::SealedSectorMetadata;\nuse crate::api::sector_builder::WrappedSectorStore;\nuse crate::error;\nuse std::path::PathBuf;\nuse std::sync::Arc;\n\n// Unseals and returns the piece-bytes for the first sector found containing\n// a piece with matching key.\npub fn retrieve_piece<'a>(\n    sector_store: &Arc<WrappedSectorStore>,\n    sealed_sector: &SealedSectorMetadata,\n    prover_id: &[u8; 31],\n    piece_key: &'a str,\n) -> error::Result<Vec<u8>> {\n    let staging_sector_access = sector_store\n        .inner\n        .manager()\n        .new_staging_sector_access()\n        .map_err(failure::Error::from)?;\n\n    let result = retrieve_piece_aux(\n        sector_store,\n        sealed_sector,\n        prover_id,\n        piece_key,\n        &staging_sector_access,\n    );\n\n    if result.is_ok() {\n        sector_store\n            .inner\n            .manager()\n            .delete_staging_sector_access(&staging_sector_access)?;\n    }\n\n    let (_, bytes) = result?;\n\n    Ok(bytes)\n}\n\nfn retrieve_piece_aux<'a>(\n    sector_store: &Arc<WrappedSectorStore>,\n    sealed_sector: &SealedSectorMetadata,\n    prover_id: &[u8; 31],\n    piece_key: &'a str,\n    staging_sector_access: &'a str,\n) -> error::Result<(u64, Vec<u8>)> {\n    let (start_offset, num_bytes) = piece_pos(&sealed_sector, piece_key).ok_or_else(|| {\n        let msg = format!(\n            \"piece {} not found in sector {}\",\n            piece_key, &sealed_sector.sector_id\n        );\n        err_unrecov(msg)\n    })?;\n\n    let num_bytes_unsealed = internal::get_unsealed_range(\n        (*sector_store.inner).config(),\n        &PathBuf::from(sealed_sector.sector_access.clone()),\n        &PathBuf::from(staging_sector_access),\n        prover_id,\n        &sector_id_as_bytes(sealed_sector.sector_id)?,\n        start_offset,\n        num_bytes,\n    )?;\n\n    if num_bytes_unsealed != num_bytes {\n        let s = format!(\n            \"expected to unseal {} bytes, but unsealed {} bytes\",\n            num_bytes, num_bytes_unsealed\n        );\n\n        return Err(err_unrecov(s).into());\n    }\n\n    let piece_bytes = sector_store.inner.manager().read_raw(\n        &staging_sector_access.to_string(),\n        0,\n        num_bytes_unsealed,\n    )?;\n\n    Ok((num_bytes_unsealed, piece_bytes))\n}\n\n// Returns a tuple of piece bytes-offset and number-of-bytes in piece if the\n// provided sealed sector contains a matching piece.\nfn piece_pos(sealed_sector: &SealedSectorMetadata, piece_key: &str) -> Option<(u64, u64)> {\n    let (found_piece, start_offset, num_bytes) = sealed_sector.pieces.iter().fold(\n        (false, 0, 0),\n        |(eject, start_offset, num_bytes), item| {\n            if eject {\n                (eject, start_offset, num_bytes)\n            } else if item.piece_key == piece_key {\n                (true, start_offset, item.num_bytes)\n            } else {\n                (false, start_offset + item.num_bytes, item.num_bytes)\n            }\n        },\n    );\n\n    if found_piece {\n        Some((start_offset, num_bytes))\n    } else {\n        None\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::api::sector_builder::metadata::PieceMetadata;\n\n    #[test]\n    fn test_alpha() {\n        let mut sealed_sector: SealedSectorMetadata = Default::default();\n\n        sealed_sector.pieces.push(PieceMetadata {\n            piece_key: String::from(\"x\"),\n            num_bytes: 5,\n        });\n\n        sealed_sector.pieces.push(PieceMetadata {\n            piece_key: String::from(\"y\"),\n            num_bytes: 30,\n        });\n\n        sealed_sector.pieces.push(PieceMetadata {\n            piece_key: String::from(\"z\"),\n            num_bytes: 100,\n        });\n\n        match piece_pos(&sealed_sector, \"x\") {\n            Some(pair) => assert_eq!(pair, (0, 5)),\n            None => panic!(),\n        }\n\n        match piece_pos(&sealed_sector, \"y\") {\n            Some(pair) => assert_eq!(pair, (5, 30)),\n            None => panic!(),\n        }\n\n        match piece_pos(&sealed_sector, \"z\") {\n            Some(pair) => assert_eq!(pair, (35, 100)),\n            None => panic!(),\n        }\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/helpers/seal.rs",
    "content": "use crate::api::internal::seal as seal_internal;\nuse crate::api::internal::SealOutput;\nuse crate::api::sector_builder::metadata::sector_id_as_bytes;\nuse crate::api::sector_builder::metadata::SealedSectorMetadata;\nuse crate::api::sector_builder::metadata::StagedSectorMetadata;\nuse crate::api::sector_builder::WrappedSectorStore;\nuse crate::error;\nuse std::path::PathBuf;\nuse std::sync::Arc;\n\npub fn seal(\n    sector_store: &Arc<WrappedSectorStore>,\n    prover_id: &[u8; 31],\n    staged_sector: StagedSectorMetadata,\n) -> error::Result<SealedSectorMetadata> {\n    // Provision a new sealed sector access through the manager.\n    let sealed_sector_access = sector_store\n        .inner\n        .manager()\n        .new_sealed_sector_access()\n        .map_err(failure::Error::from)?;\n\n    // Run the FPS seal operation. This call will block for a long time, so make\n    // sure you're not holding any locks.\n\n    let SealOutput {\n        comm_r,\n        comm_d,\n        comm_r_star,\n        snark_proof,\n    } = seal_internal(\n        (*sector_store.inner).config(),\n        &PathBuf::from(staged_sector.sector_access.clone()),\n        &PathBuf::from(sealed_sector_access.clone()),\n        prover_id,\n        &sector_id_as_bytes(staged_sector.sector_id)?,\n    )?;\n\n    let newly_sealed_sector = SealedSectorMetadata {\n        sector_id: staged_sector.sector_id,\n        sector_access: sealed_sector_access,\n        pieces: staged_sector.pieces,\n        comm_r_star,\n        comm_r,\n        comm_d,\n        snark_proof,\n    };\n\n    Ok(newly_sealed_sector)\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/helpers/snapshots.rs",
    "content": "use crate::api::sector_builder::state::*;\nuse crate::api::sector_builder::WrappedKeyValueStore;\nuse crate::error::Result;\nuse std::sync::Arc;\n\npub fn load_snapshot(\n    kv_store: &Arc<WrappedKeyValueStore>,\n    prover_id: &[u8; 31],\n) -> Result<Option<StateSnapshot>> {\n    let result: Option<Vec<u8>> = kv_store.inner.get(prover_id)?;\n\n    if let Some(val) = result {\n        return serde_cbor::from_slice(&val[..])\n            .map_err(failure::Error::from)\n            .map(Option::Some);\n    }\n\n    Ok(None)\n}\n\npub fn persist_snapshot(\n    kv_store: &Arc<WrappedKeyValueStore>,\n    snapshot: &StateSnapshot,\n) -> Result<()> {\n    let serialized = serde_cbor::to_vec(snapshot)?;\n    kv_store.inner.put(&snapshot.prover_id[..], &serialized)?;\n    Ok(())\n}\n\npub fn make_snapshot(\n    prover_id: &[u8; 31],\n    staged_state: &StagedState,\n    sealed_state: &SealedState,\n) -> StateSnapshot {\n    StateSnapshot {\n        prover_id: *prover_id,\n        staged: StagedState {\n            sector_id_nonce: staged_state.sector_id_nonce,\n            sectors: staged_state.sectors.clone(),\n        },\n        sealed: SealedState {\n            sectors: sealed_state.sectors.clone(),\n        },\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::api::sector_builder::helpers::snapshots::*;\n    use crate::api::sector_builder::kv_store::fs::FileSystemKvs;\n    use crate::api::sector_builder::metadata::StagedSectorMetadata;\n    use crate::api::sector_builder::state::SealedState;\n    use crate::api::sector_builder::state::StagedState;\n    use crate::api::sector_builder::SectorId;\n    use crate::api::sector_builder::WrappedKeyValueStore;\n    use std::collections::HashMap;\n    use std::sync::Arc;\n    use std::sync::Mutex;\n\n    #[test]\n    fn test_alpha() {\n        let metadata_dir = tempfile::tempdir().unwrap();\n\n        let kv_store = Arc::new(WrappedKeyValueStore {\n            inner: Box::new(FileSystemKvs::initialize(metadata_dir).unwrap()),\n        });\n\n        let prover_id = [0; 31];\n\n        let (staged_state, sealed_state) = {\n            let mut m: HashMap<SectorId, StagedSectorMetadata> = HashMap::new();\n\n            m.insert(123, Default::default());\n\n            let staged_state = Mutex::new(StagedState {\n                sector_id_nonce: 100,\n                sectors: m,\n            });\n\n            let sealed_state: Mutex<SealedState> = Default::default();\n\n            (staged_state, sealed_state)\n        };\n\n        let to_persist = make_snapshot(\n            &prover_id,\n            &staged_state.lock().unwrap(),\n            &sealed_state.lock().unwrap(),\n        );\n\n        let _ = persist_snapshot(&kv_store, &to_persist).unwrap();\n\n        let loaded = load_snapshot(&kv_store, &prover_id).unwrap().unwrap();\n\n        assert_eq!(to_persist, loaded);\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/kv_store/fs.rs",
    "content": "use crate::api::sector_builder::kv_store::KeyValueStore;\nuse crate::error::Result;\nuse blake2::{Blake2b, Digest};\nuse std::fs::{self, File, OpenOptions};\nuse std::io::{ErrorKind, Read, Write};\nuse std::path::{Path, PathBuf};\n\nconst FATAL_NOCREATE: &str = \"[KeyValueStore#put] could not create path\";\n\n// FileSystemKvs is a file system-backed key/value store, mostly lifted from\n// sile/ekvsb\npub struct FileSystemKvs {\n    root_dir: PathBuf,\n}\n\nimpl FileSystemKvs {\n    pub fn initialize<P: AsRef<Path>>(root_dir: P) -> Result<Self> {\n        fs::create_dir_all(&root_dir)?;\n\n        Ok(FileSystemKvs {\n            root_dir: root_dir.as_ref().to_path_buf(),\n        })\n    }\n\n    fn key_to_path(&self, key: &[u8]) -> PathBuf {\n        let mut hasher = Blake2b::new();\n        hasher.input(key);\n\n        let result = hasher.result();\n        let file = format!(\"{:.32x}\", &result);\n\n        self.root_dir.join(file)\n    }\n}\n\nimpl KeyValueStore for FileSystemKvs {\n    fn put(&self, key: &[u8], value: &[u8]) -> Result<()> {\n        let path = self.key_to_path(key);\n\n        fs::create_dir_all(path.parent().expect(FATAL_NOCREATE))?;\n\n        let mut file = OpenOptions::new()\n            .create(true)\n            .write(true)\n            .truncate(true)\n            .open(path)?;\n\n        file.write_all(value)?;\n\n        Ok(())\n    }\n\n    fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {\n        let path = self.key_to_path(key);\n\n        match File::open(path) {\n            Err(e) => {\n                if e.kind() != ErrorKind::NotFound {\n                    Err(e)?;\n                }\n                Ok(None)\n            }\n            Ok(mut file) => {\n                let mut buf = Vec::new();\n                file.read_to_end(&mut buf)?;\n                Ok(Some(buf))\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/kv_store/mod.rs",
    "content": "use crate::error::Result;\n\npub mod fs;\n\npub trait KeyValueStore {\n    fn put(&self, key: &[u8], value: &[u8]) -> Result<()>;\n    fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>>;\n}\n\n#[cfg(test)]\nmod tests {\n    use crate::api::sector_builder::kv_store::fs::FileSystemKvs;\n    use crate::api::sector_builder::kv_store::KeyValueStore;\n\n    #[test]\n    fn test_alpha() {\n        let metadata_dir = tempfile::tempdir().unwrap();\n\n        let db = FileSystemKvs::initialize(metadata_dir).unwrap();\n\n        let k_a = b\"key-xx\";\n        let k_b = b\"key-yy\";\n        let v_a = b\"value-aa\";\n        let v_b = b\"value-bb\";\n\n        db.put(k_a, v_a).unwrap();\n        db.put(k_b, v_b).unwrap();\n\n        let opt = db.get(k_a).unwrap();\n        assert_eq!(format!(\"{:x?}\", opt.unwrap()), format!(\"{:x?}\", v_a));\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/metadata.rs",
    "content": "use crate::api::sector_builder::SectorId;\nuse crate::error;\nuse crate::serde_big_array::BigArray;\nuse byteorder::LittleEndian;\nuse byteorder::WriteBytesExt;\nuse serde::{Deserialize, Serialize};\nuse std::fmt;\n\n#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]\npub struct StagedSectorMetadata {\n    pub sector_id: SectorId,\n    pub sector_access: String,\n    pub pieces: Vec<PieceMetadata>,\n    pub seal_status: SealStatus,\n}\n\n#[derive(Clone, Serialize, Deserialize)]\npub struct SealedSectorMetadata {\n    pub sector_id: SectorId,\n    pub sector_access: String,\n    pub pieces: Vec<PieceMetadata>,\n    pub comm_r_star: [u8; 32],\n    pub comm_r: [u8; 32],\n    pub comm_d: [u8; 32],\n\n    #[serde(with = \"BigArray\")]\n    pub snark_proof: [u8; 384],\n}\n\n#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]\npub struct PieceMetadata {\n    pub piece_key: String,\n    pub num_bytes: u64,\n}\n\n#[derive(Clone, Serialize, Deserialize, Debug, PartialEq)]\npub enum SealStatus {\n    Failed(String),\n    Pending,\n    Sealed(Box<SealedSectorMetadata>),\n    Sealing,\n}\n\nimpl PartialEq for SealedSectorMetadata {\n    fn eq(&self, other: &SealedSectorMetadata) -> bool {\n        self.sector_id == other.sector_id\n            && self.sector_access == other.sector_access\n            && self.pieces == other.pieces\n            && self.comm_r_star == other.comm_r_star\n            && self.comm_r == other.comm_r\n            && self.comm_d == other.comm_d\n            && self.snark_proof.iter().eq(other.snark_proof.iter())\n    }\n}\n\nimpl Default for StagedSectorMetadata {\n    fn default() -> StagedSectorMetadata {\n        StagedSectorMetadata {\n            sector_id: Default::default(),\n            sector_access: Default::default(),\n            pieces: Default::default(),\n            seal_status: SealStatus::Pending,\n        }\n    }\n}\n\nimpl fmt::Debug for SealedSectorMetadata {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"SealedSectorMetadata {{ sector_id: {}, sector_access: {}, pieces: {:?}, comm_r_star: {:?}, comm_r: {:?}, comm_d: {:?} }}\", self.sector_id, self.sector_access, self.pieces, self.comm_r_star, self.comm_r, self.comm_d)\n    }\n}\n\nimpl Default for SealedSectorMetadata {\n    fn default() -> SealedSectorMetadata {\n        SealedSectorMetadata {\n            sector_id: Default::default(),\n            sector_access: Default::default(),\n            pieces: Default::default(),\n            comm_r_star: Default::default(),\n            comm_r: Default::default(),\n            comm_d: Default::default(),\n            snark_proof: [0; 384],\n        }\n    }\n}\n\npub fn sum_piece_bytes(s: &StagedSectorMetadata) -> u64 {\n    s.pieces.iter().map(|x| x.num_bytes).sum()\n}\n\npub fn sector_id_as_bytes(sector_id: SectorId) -> error::Result<[u8; 31]> {\n    // Transmute a u64 sector id to a zero-padded byte array.\n    let mut sector_id_as_bytes = [0u8; 31];\n    sector_id_as_bytes\n        .as_mut()\n        .write_u64::<LittleEndian>(sector_id)?;\n\n    Ok(sector_id_as_bytes)\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/mod.rs",
    "content": "use crate::api::internal::PoStOutput;\nuse crate::api::sector_builder::errors::SectorBuilderErr;\nuse crate::api::sector_builder::kv_store::fs::FileSystemKvs;\nuse crate::api::sector_builder::kv_store::KeyValueStore;\nuse crate::api::sector_builder::metadata::*;\nuse crate::api::sector_builder::scheduler::Request;\nuse crate::api::sector_builder::scheduler::Scheduler;\nuse crate::api::sector_builder::sealer::*;\nuse crate::error::ExpectWithBacktrace;\nuse crate::error::Result;\nuse crate::FCP_LOG;\nuse sector_base::api::disk_backed_storage::new_sector_store;\nuse sector_base::api::disk_backed_storage::ConfiguredStore;\nuse sector_base::api::sector_store::SectorStore;\nuse slog::*;\nuse std::sync::{mpsc, Arc, Mutex};\n\npub mod errors;\nmod helpers;\nmod kv_store;\npub mod metadata;\nmod scheduler;\nmod sealer;\nmod state;\n\nconst NUM_SEAL_WORKERS: usize = 2;\n\nconst FATAL_NOSEND_TASK: &str = \"[run_blocking] could not send\";\nconst FATAL_NORECV_TASK: &str = \"[run_blocking] could not recv\";\n\npub type SectorId = u64;\n\npub struct SectorBuilder {\n    // Prevents FFI consumers from queueing behind long-running seal operations.\n    sealers_tx: mpsc::Sender<SealerInput>,\n\n    // For additional seal concurrency, add more workers here.\n    sealers: Vec<SealerWorker>,\n\n    // The main worker's queue.\n    scheduler_tx: mpsc::SyncSender<Request>,\n\n    // The main worker. Owns all mutable state for the SectorBuilder.\n    scheduler: Scheduler,\n}\n\nimpl SectorBuilder {\n    // Initialize and return a SectorBuilder from metadata persisted to disk if\n    // it exists. Otherwise, initialize and return a fresh SectorBuilder. The\n    // metadata key is equal to the prover_id.\n    pub fn init_from_metadata<S: Into<String>>(\n        sector_store_config: &ConfiguredStore,\n        last_committed_sector_id: SectorId,\n        metadata_dir: S,\n        prover_id: [u8; 31],\n        sealed_sector_dir: S,\n        staged_sector_dir: S,\n        max_num_staged_sectors: u8,\n    ) -> Result<SectorBuilder> {\n        let kv_store = Arc::new(WrappedKeyValueStore {\n            inner: Box::new(FileSystemKvs::initialize(metadata_dir.into())?),\n        });\n\n        // Initialize a SectorStore and wrap it in an Arc so we can access it\n        // from multiple threads. Our implementation assumes that the\n        // SectorStore is safe for concurrent access.\n        let sector_store = Arc::new(WrappedSectorStore {\n            inner: Box::new(new_sector_store(\n                sector_store_config,\n                sealed_sector_dir.into(),\n                staged_sector_dir.into(),\n            )),\n        });\n\n        // Configure the main worker's rendezvous channel.\n        let (main_tx, main_rx) = mpsc::sync_channel(0);\n\n        // Configure seal queue workers and channels.\n        let (seal_tx, seal_workers) = {\n            let (tx, rx) = mpsc::channel();\n            let rx = Arc::new(Mutex::new(rx));\n\n            let workers = (0..NUM_SEAL_WORKERS)\n                .map(|n| SealerWorker::start(n, rx.clone(), sector_store.clone(), prover_id))\n                .collect();\n\n            (tx, workers)\n        };\n\n        // Configure main worker.\n        let main_worker = Scheduler::start_with_metadata(\n            main_rx,\n            main_tx.clone(),\n            seal_tx.clone(),\n            kv_store.clone(),\n            sector_store.clone(),\n            last_committed_sector_id,\n            max_num_staged_sectors,\n            prover_id,\n        );\n\n        Ok(SectorBuilder {\n            scheduler_tx: main_tx,\n            scheduler: main_worker,\n            sealers_tx: seal_tx,\n            sealers: seal_workers,\n        })\n    }\n\n    // Returns the number of user-provided bytes that will fit into a staged\n    // sector.\n    pub fn get_max_user_bytes_per_staged_sector(&self) -> u64 {\n        self.run_blocking(Request::GetMaxUserBytesPerStagedSector)\n    }\n\n    // Stages user piece-bytes for sealing. Note that add_piece calls are\n    // processed sequentially to make bin packing easier.\n    pub fn add_piece(&self, piece_key: String, piece_bytes: &[u8]) -> Result<SectorId> {\n        log_unrecov(self.run_blocking(|tx| Request::AddPiece(piece_key, piece_bytes.to_vec(), tx)))\n    }\n\n    // Returns sealing status for the sector with specified id. If no sealed or\n    // staged sector exists with the provided id, produce an error.\n    pub fn get_seal_status(&self, sector_id: SectorId) -> Result<SealStatus> {\n        log_unrecov(self.run_blocking(|tx| Request::GetSealStatus(sector_id, tx)))\n    }\n\n    // Unseals the sector containing the referenced piece and returns its\n    // bytes. Produces an error if this sector builder does not have a sealed\n    // sector containing the referenced piece.\n    pub fn read_piece_from_sealed_sector(&self, piece_key: String) -> Result<Vec<u8>> {\n        log_unrecov(self.run_blocking(|tx| Request::RetrievePiece(piece_key, tx)))\n    }\n\n    // For demo purposes. Schedules sealing of all staged sectors.\n    pub fn seal_all_staged_sectors(&self) -> Result<()> {\n        log_unrecov(self.run_blocking(Request::SealAllStagedSectors))\n    }\n\n    // Returns all sealed sector metadata.\n    pub fn get_sealed_sectors(&self) -> Result<Vec<SealedSectorMetadata>> {\n        log_unrecov(self.run_blocking(Request::GetSealedSectors))\n    }\n\n    // Returns all staged sector metadata.\n    pub fn get_staged_sectors(&self) -> Result<Vec<StagedSectorMetadata>> {\n        log_unrecov(self.run_blocking(Request::GetStagedSectors))\n    }\n\n    // Generates a proof-of-spacetime. Blocks the calling thread.\n    pub fn generate_post(\n        &self,\n        comm_rs: &[[u8; 32]],\n        challenge_seed: &[u8; 32],\n    ) -> Result<PoStOutput> {\n        log_unrecov(\n            self.run_blocking(|tx| Request::GeneratePoSt(Vec::from(comm_rs), *challenge_seed, tx)),\n        )\n    }\n\n    // Run a task, blocking on the return channel.\n    fn run_blocking<T, F: FnOnce(mpsc::SyncSender<T>) -> Request>(&self, with_sender: F) -> T {\n        let (tx, rx) = mpsc::sync_channel(0);\n\n        self.scheduler_tx\n            .clone()\n            .send(with_sender(tx))\n            .expects(FATAL_NOSEND_TASK);\n\n        rx.recv().expects(FATAL_NORECV_TASK)\n    }\n}\n\nimpl Drop for SectorBuilder {\n    fn drop(&mut self) {\n        // Shut down main worker and sealers, too.\n        let _ = self\n            .scheduler_tx\n            .send(Request::Shutdown)\n            .map_err(|err| println!(\"err sending Shutdown to scheduler: {:?}\", err));\n\n        for _ in &mut self.sealers {\n            let _ = self\n                .sealers_tx\n                .send(SealerInput::Shutdown)\n                .map_err(|err| println!(\"err sending Shutdown to sealer: {:?}\", err));\n        }\n\n        // Wait for worker threads to return.\n        let scheduler_thread = &mut self.scheduler.thread;\n\n        if let Some(thread) = scheduler_thread.take() {\n            let _ = thread\n                .join()\n                .map_err(|err| println!(\"err joining scheduler thread: {:?}\", err));\n        }\n\n        for worker in &mut self.sealers {\n            if let Some(thread) = worker.thread.take() {\n                let _ = thread\n                    .join()\n                    .map_err(|err| println!(\"err joining sealer thread: {:?}\", err));\n            }\n        }\n    }\n}\n\npub struct WrappedSectorStore {\n    inner: Box<SectorStore>,\n}\n\nunsafe impl Sync for WrappedSectorStore {}\nunsafe impl Send for WrappedSectorStore {}\n\npub struct WrappedKeyValueStore {\n    inner: Box<KeyValueStore>,\n}\n\nunsafe impl Sync for WrappedKeyValueStore {}\nunsafe impl Send for WrappedKeyValueStore {}\n\nfn log_unrecov<T>(result: Result<T>) -> Result<T> {\n    if let Err(err) = &result {\n        if let Some(SectorBuilderErr::Unrecoverable(err, backtrace)) = err.downcast_ref() {\n            let backtrace_string = format!(\"{:?}\", backtrace);\n            error!(FCP_LOG, \"unrecoverable error\"; \"backtrace\" => backtrace_string, \"error\" => err);\n        }\n    }\n\n    result\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/scheduler.rs",
    "content": "use crate::api::internal;\nuse crate::api::internal::PoStInput;\nuse crate::api::internal::PoStInputPart;\nuse crate::api::internal::PoStOutput;\nuse crate::api::sector_builder::errors::err_piecenotfound;\nuse crate::api::sector_builder::errors::err_unrecov;\nuse crate::api::sector_builder::helpers::add_piece::add_piece;\nuse crate::api::sector_builder::helpers::get_seal_status::get_seal_status;\nuse crate::api::sector_builder::helpers::get_sectors_ready_for_sealing::get_sectors_ready_for_sealing;\nuse crate::api::sector_builder::helpers::snapshots::load_snapshot;\nuse crate::api::sector_builder::helpers::snapshots::make_snapshot;\nuse crate::api::sector_builder::helpers::snapshots::persist_snapshot;\nuse crate::api::sector_builder::metadata::SealStatus;\nuse crate::api::sector_builder::metadata::SealedSectorMetadata;\nuse crate::api::sector_builder::metadata::StagedSectorMetadata;\nuse crate::api::sector_builder::sealer::SealerInput;\nuse crate::api::sector_builder::state::SectorBuilderState;\nuse crate::api::sector_builder::state::StagedState;\nuse crate::api::sector_builder::SectorId;\nuse crate::api::sector_builder::WrappedKeyValueStore;\nuse crate::api::sector_builder::WrappedSectorStore;\nuse crate::error::ExpectWithBacktrace;\nuse crate::error::Result;\nuse std::collections::HashMap;\nuse std::sync::mpsc;\nuse std::sync::Arc;\nuse std::thread;\n\nconst FATAL_NOLOAD: &str = \"could not load snapshot\";\nconst FATAL_NORECV: &str = \"could not receive task\";\nconst FATAL_NOSEND: &str = \"could not send\";\nconst FATAL_SECMAP: &str = \"insert failed\";\nconst FATAL_SNPSHT: &str = \"could not snapshot\";\nconst FATAL_SLRSND: &str = \"could not send to sealer\";\nconst FATAL_HUNGUP: &str = \"could not send to ret channel\";\nconst FATAL_NOSECT: &str = \"could not find sector\";\n\npub struct Scheduler {\n    pub thread: Option<thread::JoinHandle<()>>,\n}\n\n#[derive(Debug)]\npub enum Request {\n    AddPiece(String, Vec<u8>, mpsc::SyncSender<Result<SectorId>>),\n    GetSealedSectors(mpsc::SyncSender<Result<Vec<SealedSectorMetadata>>>),\n    GetStagedSectors(mpsc::SyncSender<Result<Vec<StagedSectorMetadata>>>),\n    GetSealStatus(SectorId, mpsc::SyncSender<Result<SealStatus>>),\n    GeneratePoSt(\n        Vec<[u8; 32]>,\n        [u8; 32],\n        mpsc::SyncSender<Result<PoStOutput>>,\n    ),\n    RetrievePiece(String, mpsc::SyncSender<Result<Vec<u8>>>),\n    SealAllStagedSectors(mpsc::SyncSender<Result<()>>),\n    GetMaxUserBytesPerStagedSector(mpsc::SyncSender<u64>),\n    HandleSealResult(SectorId, Box<Result<SealedSectorMetadata>>),\n    Shutdown,\n}\n\nimpl Scheduler {\n    pub fn start_with_metadata(\n        scheduler_input_rx: mpsc::Receiver<Request>,\n        scheduler_input_tx: mpsc::SyncSender<Request>,\n        sealer_input_tx: mpsc::Sender<SealerInput>,\n        kv_store: Arc<WrappedKeyValueStore>,\n        sector_store: Arc<WrappedSectorStore>,\n        last_committed_sector_id: SectorId,\n        max_num_staged_sectors: u8,\n        prover_id: [u8; 31],\n    ) -> Scheduler {\n        let thread = thread::spawn(move || {\n            // Build the scheduler's initial state. If available, we\n            // reconstitute this state from persisted metadata. If not, we\n            // create it from scratch.\n            let state = {\n                let loaded = load_snapshot(&kv_store, &prover_id)\n                    .expects(FATAL_NOLOAD)\n                    .map(|x| x.into());\n\n                loaded.unwrap_or_else(|| SectorBuilderState {\n                    prover_id,\n                    staged: StagedState {\n                        sector_id_nonce: last_committed_sector_id,\n                        sectors: Default::default(),\n                    },\n                    sealed: Default::default(),\n                })\n            };\n\n            let max_user_bytes_per_staged_sector =\n                sector_store.inner.config().max_unsealed_bytes_per_sector();\n\n            let mut m = SectorMetadataManager {\n                kv_store,\n                sector_store,\n                state,\n                sealer_input_tx,\n                scheduler_input_tx: scheduler_input_tx.clone(),\n                max_num_staged_sectors,\n                max_user_bytes_per_staged_sector,\n            };\n\n            loop {\n                let task = scheduler_input_rx.recv().expects(FATAL_NORECV);\n\n                // Dispatch to the appropriate task-handler.\n                match task {\n                    Request::AddPiece(key, bytes, tx) => {\n                        tx.send(m.add_piece(key, &bytes)).expects(FATAL_NOSEND);\n                    }\n                    Request::GetSealStatus(sector_id, tx) => {\n                        tx.send(m.get_seal_status(sector_id)).expects(FATAL_NOSEND);\n                    }\n                    Request::RetrievePiece(piece_key, tx) => m.retrieve_piece(piece_key, tx),\n                    Request::GetSealedSectors(tx) => {\n                        tx.send(m.get_sealed_sectors()).expects(FATAL_NOSEND);\n                    }\n                    Request::GetStagedSectors(tx) => {\n                        tx.send(m.get_staged_sectors()).expect(FATAL_NOSEND);\n                    }\n                    Request::GetMaxUserBytesPerStagedSector(tx) => {\n                        tx.send(m.max_user_bytes()).expects(FATAL_NOSEND);\n                    }\n                    Request::SealAllStagedSectors(tx) => {\n                        tx.send(m.seal_all_staged_sectors()).expects(FATAL_NOSEND);\n                    }\n                    Request::HandleSealResult(sector_id, result) => {\n                        m.handle_seal_result(sector_id, *result);\n                    }\n                    Request::GeneratePoSt(comm_rs, chg_seed, tx) => {\n                        m.generate_post(&comm_rs, &chg_seed, tx)\n                    }\n                    Request::Shutdown => break,\n                }\n            }\n        });\n\n        Scheduler {\n            thread: Some(thread),\n        }\n    }\n}\n\n// The SectorBuilderStateManager is the owner of all sector-related metadata.\n// It dispatches expensive operations (e.g. unseal and seal) to the sealer\n// worker-threads. Other, inexpensive work (or work which needs to be performed\n// serially) is handled by the SectorBuilderStateManager itself.\npub struct SectorMetadataManager {\n    kv_store: Arc<WrappedKeyValueStore>,\n    sector_store: Arc<WrappedSectorStore>,\n    state: SectorBuilderState,\n    sealer_input_tx: mpsc::Sender<SealerInput>,\n    scheduler_input_tx: mpsc::SyncSender<Request>,\n    max_num_staged_sectors: u8,\n    max_user_bytes_per_staged_sector: u64,\n}\n\nimpl SectorMetadataManager {\n    pub fn generate_post(\n        &self,\n        comm_rs: &[[u8; 32]],\n        challenge_seed: &[u8; 32],\n        return_channel: mpsc::SyncSender<Result<PoStOutput>>,\n    ) {\n        // reduce our sealed sector state-map to a mapping of comm_r to sealed\n        // sector access (AKA path to sealed sector file)\n        let comm_r_to_sector_access: HashMap<[u8; 32], String> = self\n            .state\n            .sealed\n            .sectors\n            .values()\n            .fold(HashMap::new(), |mut acc, item| {\n                let v = item.sector_access.clone();\n                let k = item.comm_r;\n                acc.entry(k).or_insert(v);\n                acc\n            });\n\n        let mut input_parts: Vec<PoStInputPart> = Default::default();\n\n        // eject from this loop with an error if we've been provided a comm_r\n        // which does not correspond to any sealed sector metadata\n        for comm_r in comm_rs {\n            input_parts.push(PoStInputPart {\n                sealed_sector_access: comm_r_to_sector_access.get(comm_r).cloned(),\n                comm_r: *comm_r,\n            });\n        }\n\n        let output = internal::generate_post(PoStInput {\n            challenge_seed: *challenge_seed,\n            input_parts,\n        });\n\n        // TODO: Where should this work be scheduled? New worker type?\n        return_channel.send(output).expects(FATAL_HUNGUP);\n    }\n\n    // Unseals the sector containing the referenced piece and returns its\n    // bytes. Produces an error if this sector builder does not have a sealed\n    // sector containing the referenced piece.\n    pub fn retrieve_piece(\n        &self,\n        piece_key: String,\n        return_channel: mpsc::SyncSender<Result<Vec<u8>>>,\n    ) {\n        let opt_sealed_sector = self.state.sealed.sectors.values().find(|sector| {\n            sector\n                .pieces\n                .iter()\n                .any(|piece| piece.piece_key == piece_key)\n        });\n\n        if let Some(sealed_sector) = opt_sealed_sector {\n            let sealed_sector = Box::new(sealed_sector.clone());\n            let task = SealerInput::Unseal(piece_key, sealed_sector, return_channel);\n\n            self.sealer_input_tx\n                .clone()\n                .send(task)\n                .expects(FATAL_SLRSND);\n        } else {\n            return_channel\n                .send(Err(err_piecenotfound(piece_key.to_string()).into()))\n                .expects(FATAL_HUNGUP);\n        }\n    }\n\n    // Returns sealing status for the sector with specified id. If no sealed or\n    // staged sector exists with the provided id, produce an error.\n    pub fn get_seal_status(&self, sector_id: SectorId) -> Result<SealStatus> {\n        get_seal_status(&self.state.staged, &self.state.sealed, sector_id)\n    }\n\n    // Write the piece to storage, obtaining the sector id with which the\n    // piece-bytes are now associated.\n    pub fn add_piece(&mut self, piece_key: String, piece_bytes: &[u8]) -> Result<u64> {\n        let destination_sector_id = add_piece(\n            &self.sector_store,\n            &mut self.state.staged,\n            piece_key,\n            piece_bytes,\n        )?;\n\n        self.check_and_schedule(false)?;\n        self.checkpoint()?;\n\n        Ok(destination_sector_id)\n    }\n\n    // For demo purposes. Schedules sealing of all staged sectors.\n    pub fn seal_all_staged_sectors(&mut self) -> Result<()> {\n        self.check_and_schedule(true)?;\n        self.checkpoint()\n    }\n\n    // Produces a vector containing metadata for all sealed sectors that this\n    // SectorBuilder knows about.\n    pub fn get_sealed_sectors(&self) -> Result<Vec<SealedSectorMetadata>> {\n        Ok(self.state.sealed.sectors.values().cloned().collect())\n    }\n\n    // Produces a vector containing metadata for all staged sectors that this\n    // SectorBuilder knows about.\n    pub fn get_staged_sectors(&self) -> Result<Vec<StagedSectorMetadata>> {\n        Ok(self.state.staged.sectors.values().cloned().collect())\n    }\n\n    // Returns the number of user-provided bytes that will fit into a staged\n    // sector.\n    pub fn max_user_bytes(&self) -> u64 {\n        self.max_user_bytes_per_staged_sector\n    }\n\n    // Update metadata to reflect the sealing results.\n    pub fn handle_seal_result(\n        &mut self,\n        sector_id: SectorId,\n        result: Result<SealedSectorMetadata>,\n    ) {\n        // scope exists to end the mutable borrow of self so that we can\n        // checkpoint\n        {\n            let staged_state = &mut self.state.staged;\n            let sealed_state = &mut self.state.sealed;\n\n            if result.is_err() {\n                if let Some(staged_sector) = staged_state.sectors.get_mut(&sector_id) {\n                    staged_sector.seal_status =\n                        SealStatus::Failed(format!(\"{}\", err_unrecov(result.unwrap_err())));\n                };\n            } else {\n                // Remove the staged sector from the state map.\n                let _ = staged_state.sectors.remove(&sector_id);\n\n                // Insert the newly-sealed sector into the other state map.\n                let sealed_sector = result.expects(FATAL_SECMAP);\n\n                sealed_state.sectors.insert(sector_id, sealed_sector);\n            }\n        }\n\n        self.checkpoint().expects(FATAL_SNPSHT);\n    }\n\n    // Check for sectors which should no longer receive new user piece-bytes and\n    // schedule them for sealing.\n    fn check_and_schedule(&mut self, seal_all_staged_sectors: bool) -> Result<()> {\n        let staged_state = &mut self.state.staged;\n\n        let to_be_sealed = get_sectors_ready_for_sealing(\n            staged_state,\n            self.max_user_bytes_per_staged_sector,\n            self.max_num_staged_sectors,\n            seal_all_staged_sectors,\n        );\n\n        // Mark the to-be-sealed sectors as no longer accepting data and then\n        // schedule sealing.\n        for sector_id in to_be_sealed {\n            let mut sector = staged_state\n                .sectors\n                .get_mut(&sector_id)\n                .expects(FATAL_NOSECT);\n            sector.seal_status = SealStatus::Sealing;\n\n            self.sealer_input_tx\n                .clone()\n                .send(SealerInput::Seal(\n                    sector.clone(),\n                    self.scheduler_input_tx.clone(),\n                ))\n                .expects(FATAL_SLRSND);\n        }\n\n        Ok(())\n    }\n\n    // Create and persist metadata snapshot.\n    fn checkpoint(&self) -> Result<()> {\n        let snapshot = make_snapshot(\n            &self.state.prover_id,\n            &self.state.staged,\n            &self.state.sealed,\n        );\n        persist_snapshot(&self.kv_store, &snapshot)?;\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/sealer.rs",
    "content": "use crate::api::sector_builder::helpers::retrieve_piece::retrieve_piece;\nuse crate::api::sector_builder::helpers::seal::seal;\nuse crate::api::sector_builder::metadata::SealedSectorMetadata;\nuse crate::api::sector_builder::metadata::StagedSectorMetadata;\nuse crate::api::sector_builder::scheduler::Request;\nuse crate::api::sector_builder::WrappedSectorStore;\nuse crate::error::ExpectWithBacktrace;\nuse crate::error::Result;\nuse std::sync::mpsc;\nuse std::sync::{Arc, Mutex};\nuse std::thread;\n\nconst FATAL_NOLOCK: &str = \"error acquiring task lock\";\nconst FATAL_RCVTSK: &str = \"error receiving seal task\";\nconst FATAL_SNDTSK: &str = \"error sending task\";\nconst FATAL_SNDRLT: &str = \"error sending result\";\n\npub struct SealerWorker {\n    pub id: usize,\n    pub thread: Option<thread::JoinHandle<()>>,\n}\n\npub enum SealerInput {\n    Seal(StagedSectorMetadata, mpsc::SyncSender<Request>),\n    Unseal(\n        String,\n        Box<SealedSectorMetadata>,\n        mpsc::SyncSender<Result<Vec<u8>>>,\n    ),\n    Shutdown,\n}\n\nimpl SealerWorker {\n    pub fn start(\n        id: usize,\n        seal_task_rx: Arc<Mutex<mpsc::Receiver<SealerInput>>>,\n        sector_store: Arc<WrappedSectorStore>,\n        prover_id: [u8; 31],\n    ) -> SealerWorker {\n        let thread = thread::spawn(move || loop {\n            // Acquire a lock on the rx end of the channel, get a task,\n            // relinquish the lock and return the task. The receiver is mutexed\n            // for coordinating reads across multiple worker-threads.\n            let task = {\n                let rx = seal_task_rx.lock().expects(FATAL_NOLOCK);\n                rx.recv().expects(FATAL_RCVTSK)\n            };\n\n            // Dispatch to the appropriate task-handler.\n            match task {\n                SealerInput::Seal(staged_sector, return_channel) => {\n                    let sector_id = staged_sector.sector_id;\n                    let result = seal(&sector_store.clone(), &prover_id, staged_sector);\n                    let task = Request::HandleSealResult(sector_id, Box::new(result));\n\n                    return_channel.send(task).expects(FATAL_SNDTSK);\n                }\n                SealerInput::Unseal(piece_key, sealed_sector, return_channel) => {\n                    let result = retrieve_piece(\n                        &sector_store.clone(),\n                        &sealed_sector,\n                        &prover_id,\n                        &piece_key,\n                    );\n\n                    return_channel.send(result).expects(FATAL_SNDRLT);\n                }\n                SealerInput::Shutdown => break,\n            }\n        });\n\n        SealerWorker {\n            id,\n            thread: Some(thread),\n        }\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/sector_builder/state.rs",
    "content": "use crate::api::sector_builder::metadata::{SealedSectorMetadata, StagedSectorMetadata};\nuse crate::api::sector_builder::SectorId;\nuse std::collections::HashMap;\n\n#[derive(Default, Serialize, Deserialize, Debug, PartialEq)]\npub struct StagedState {\n    pub sector_id_nonce: SectorId,\n    pub sectors: HashMap<SectorId, StagedSectorMetadata>,\n}\n\n#[derive(Default, Serialize, Deserialize, Debug, PartialEq)]\npub struct SealedState {\n    pub sectors: HashMap<SectorId, SealedSectorMetadata>,\n}\n\n#[derive(Serialize, Deserialize, Debug)]\npub struct SectorBuilderState {\n    pub prover_id: [u8; 31],\n    pub staged: StagedState,\n    pub sealed: SealedState,\n}\n\n#[derive(Serialize, Deserialize, Debug, PartialEq)]\npub struct StateSnapshot {\n    pub prover_id: [u8; 31],\n    pub staged: StagedState,\n    pub sealed: SealedState,\n}\n\nimpl Into<SectorBuilderState> for StateSnapshot {\n    fn into(self) -> SectorBuilderState {\n        SectorBuilderState {\n            prover_id: self.prover_id,\n            staged: self.staged,\n            sealed: self.sealed,\n        }\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/util.rs",
    "content": "use std::mem::size_of;\n\nuse anyhow::{Context, Result};\nuse bellperson::bls::Fr;\nuse filecoin_hashers::{Domain, Hasher};\nuse fr32::{bytes_into_fr, fr_into_bytes};\nuse merkletree::merkle::{get_merkle_tree_leafs, get_merkle_tree_len};\nuse storage_proofs_core::merkle::{get_base_tree_count, MerkleTreeTrait};\nuse typenum::Unsigned;\n\nuse crate::types::{Commitment, SectorSize};\n\npub fn as_safe_commitment<H: Domain, T: AsRef<str>>(\n    comm: &[u8; 32],\n    commitment_name: T,\n) -> Result<H> {\n    bytes_into_fr(comm)\n        .map(Into::into)\n        .with_context(|| format!(\"Invalid commitment ({})\", commitment_name.as_ref(),))\n}\n\npub fn commitment_from_fr(fr: Fr) -> Commitment {\n    let mut commitment = [0; 32];\n    for (i, b) in fr_into_bytes(&fr).iter().enumerate() {\n        commitment[i] = *b;\n    }\n    commitment\n}\n\npub fn get_base_tree_size<Tree: MerkleTreeTrait>(sector_size: SectorSize) -> Result<usize> {\n    let base_tree_leaves = u64::from(sector_size) as usize\n        / size_of::<<Tree::Hasher as Hasher>::Domain>()\n        / get_base_tree_count::<Tree>();\n\n    get_merkle_tree_len(base_tree_leaves, Tree::Arity::to_usize())\n}\n\npub fn get_base_tree_leafs<Tree: MerkleTreeTrait>(base_tree_size: usize) -> Result<usize> {\n    get_merkle_tree_leafs(base_tree_size, Tree::Arity::to_usize())\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/window_post.rs",
    "content": "use std::collections::BTreeMap;\n\nuse anyhow::{ensure, Context, Result};\nuse filecoin_hashers::Hasher;\nuse log::info;\nuse storage_proofs_core::{\n    compound_proof::{self, CompoundProof},\n    merkle::MerkleTreeTrait,\n    multi_proof::MultiProof,\n    sector::SectorId,\n};\nuse storage_proofs_post::fallback::{\n    self, FallbackPoSt, FallbackPoStCompound, PrivateSector, PublicSector,\n};\n\nuse crate::{\n    api::{as_safe_commitment, get_partitions_for_window_post, partition_vanilla_proofs},\n    caches::{get_post_params, get_post_verifying_key},\n    parameters::window_post_setup_params,\n    types::{\n        ChallengeSeed, FallbackPoStSectorProof, PoStConfig, PrivateReplicaInfo, ProverId,\n        PublicReplicaInfo, SnarkProof,\n    },\n    PoStType,\n};\n\n/// Generates a Window proof-of-spacetime with provided vanilla proofs.\npub fn generate_window_post_with_vanilla<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    randomness: &ChallengeSeed,\n    prover_id: ProverId,\n    vanilla_proofs: Vec<FallbackPoStSectorProof<Tree>>,\n) -> Result<SnarkProof> {\n    info!(\"generate_window_post_with_vanilla:start\");\n    ensure!(\n        post_config.typ == PoStType::Window,\n        \"invalid post config type\"\n    );\n\n    let randomness_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(randomness, \"randomness\")?;\n    let prover_id_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(&prover_id, \"prover_id\")?;\n\n    let vanilla_params = window_post_setup_params(&post_config);\n    let partitions = get_partitions_for_window_post(vanilla_proofs.len(), &post_config);\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params,\n        partitions,\n        priority: post_config.priority,\n    };\n\n    let partitions = partitions.unwrap_or(1);\n\n    let pub_params: compound_proof::PublicParams<'_, FallbackPoSt<'_, Tree>> =\n        FallbackPoStCompound::setup(&setup_params)?;\n    let groth_params = get_post_params::<Tree>(&post_config)?;\n\n    let mut pub_sectors = Vec::with_capacity(vanilla_proofs.len());\n    for vanilla_proof in &vanilla_proofs {\n        pub_sectors.push(PublicSector {\n            id: vanilla_proof.sector_id,\n            comm_r: vanilla_proof.comm_r,\n        });\n    }\n\n    let pub_inputs = fallback::PublicInputs {\n        randomness: randomness_safe,\n        prover_id: prover_id_safe,\n        sectors: pub_sectors,\n        k: None,\n    };\n\n    let partitioned_proofs = partition_vanilla_proofs(\n        &post_config,\n        &pub_params.vanilla_params,\n        &pub_inputs,\n        partitions,\n        &vanilla_proofs,\n    )?;\n\n    let proof = FallbackPoStCompound::prove_with_vanilla(\n        &pub_params,\n        &pub_inputs,\n        partitioned_proofs,\n        &groth_params,\n    )?;\n\n    info!(\"generate_window_post_with_vanilla:finish\");\n\n    proof.to_vec()\n}\n\n/// Generates a Window proof-of-spacetime.\npub fn generate_window_post<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    randomness: &ChallengeSeed,\n    replicas: &BTreeMap<SectorId, PrivateReplicaInfo<Tree>>,\n    prover_id: ProverId,\n) -> Result<SnarkProof> {\n    info!(\"generate_window_post:start\");\n    ensure!(\n        post_config.typ == PoStType::Window,\n        \"invalid post config type\"\n    );\n\n    let randomness_safe = as_safe_commitment(randomness, \"randomness\")?;\n    let prover_id_safe = as_safe_commitment(&prover_id, \"prover_id\")?;\n\n    let vanilla_params = window_post_setup_params(&post_config);\n    let partitions = get_partitions_for_window_post(replicas.len(), &post_config);\n\n    let sector_count = vanilla_params.sector_count;\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params,\n        partitions,\n        priority: post_config.priority,\n    };\n\n    let pub_params: compound_proof::PublicParams<'_, FallbackPoSt<'_, Tree>> =\n        FallbackPoStCompound::setup(&setup_params)?;\n    let groth_params = get_post_params::<Tree>(&post_config)?;\n\n    let trees: Vec<_> = replicas\n        .iter()\n        .map(|(sector_id, replica)| {\n            replica\n                .merkle_tree(post_config.sector_size)\n                .with_context(|| {\n                    format!(\"generate_window_post: merkle_tree failed: {:?}\", sector_id)\n                })\n        })\n        .collect::<Result<_>>()?;\n\n    let mut pub_sectors = Vec::with_capacity(sector_count);\n    let mut priv_sectors = Vec::with_capacity(sector_count);\n\n    for ((sector_id, replica), tree) in replicas.iter().zip(trees.iter()) {\n        let comm_r = replica.safe_comm_r().with_context(|| {\n            format!(\"generate_window_post: safe_comm_r failed: {:?}\", sector_id)\n        })?;\n        let comm_c = replica.safe_comm_c();\n        let comm_r_last = replica.safe_comm_r_last();\n\n        pub_sectors.push(PublicSector {\n            id: *sector_id,\n            comm_r,\n        });\n        priv_sectors.push(PrivateSector {\n            tree,\n            comm_c,\n            comm_r_last,\n        });\n    }\n\n    let pub_inputs = fallback::PublicInputs {\n        randomness: randomness_safe,\n        prover_id: prover_id_safe,\n        sectors: pub_sectors,\n        k: None,\n    };\n\n    let priv_inputs = fallback::PrivateInputs::<Tree> {\n        sectors: &priv_sectors,\n    };\n\n    let proof = FallbackPoStCompound::prove(&pub_params, &pub_inputs, &priv_inputs, &groth_params)?;\n\n    info!(\"generate_window_post:finish\");\n\n    proof.to_vec()\n}\n\n/// Verifies a window proof-of-spacetime.\npub fn verify_window_post<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    randomness: &ChallengeSeed,\n    replicas: &BTreeMap<SectorId, PublicReplicaInfo>,\n    prover_id: ProverId,\n    proof: &[u8],\n) -> Result<bool> {\n    info!(\"verify_window_post:start\");\n\n    ensure!(\n        post_config.typ == PoStType::Window,\n        \"invalid post config type\"\n    );\n\n    let randomness_safe = as_safe_commitment(randomness, \"randomness\")?;\n    let prover_id_safe = as_safe_commitment(&prover_id, \"prover_id\")?;\n\n    let vanilla_params = window_post_setup_params(&post_config);\n    let partitions = get_partitions_for_window_post(replicas.len(), &post_config);\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params,\n        partitions,\n        priority: false,\n    };\n    let pub_params: compound_proof::PublicParams<'_, FallbackPoSt<'_, Tree>> =\n        FallbackPoStCompound::setup(&setup_params)?;\n\n    let pub_sectors: Vec<_> = replicas\n        .iter()\n        .map(|(sector_id, replica)| {\n            let comm_r = replica.safe_comm_r().with_context(|| {\n                format!(\"verify_window_post: safe_comm_r failed: {:?}\", sector_id)\n            })?;\n            Ok(PublicSector {\n                id: *sector_id,\n                comm_r,\n            })\n        })\n        .collect::<Result<_>>()?;\n\n    let pub_inputs = fallback::PublicInputs {\n        randomness: randomness_safe,\n        prover_id: prover_id_safe,\n        sectors: pub_sectors,\n        k: None,\n    };\n\n    let is_valid = {\n        let verifying_key = get_post_verifying_key::<Tree>(&post_config)?;\n        let multi_proof = MultiProof::new_from_reader(partitions, proof, &verifying_key)?;\n\n        FallbackPoStCompound::verify(\n            &pub_params,\n            &pub_inputs,\n            &multi_proof,\n            &fallback::ChallengeRequirements {\n                minimum_challenge_count: post_config.challenge_count * post_config.sector_count,\n            },\n        )?\n    };\n    if !is_valid {\n        return Ok(false);\n    }\n\n    info!(\"verify_window_post:finish\");\n\n    Ok(true)\n}\n"
  },
  {
    "path": "filecoin-proofs/src/api/winning_post.rs",
    "content": "use anyhow::{ensure, Context, Result};\nuse filecoin_hashers::Hasher;\nuse log::info;\nuse storage_proofs_core::{\n    compound_proof::{self, CompoundProof},\n    merkle::MerkleTreeTrait,\n    multi_proof::MultiProof,\n    sector::SectorId,\n};\nuse storage_proofs_post::fallback::{\n    self, generate_sector_challenges, FallbackPoSt, FallbackPoStCompound, PrivateSector,\n    PublicSector,\n};\n\nuse crate::{\n    api::{as_safe_commitment, partition_vanilla_proofs},\n    caches::{get_post_params, get_post_verifying_key},\n    parameters::winning_post_setup_params,\n    types::{\n        ChallengeSeed, Commitment, FallbackPoStSectorProof, PoStConfig, PrivateReplicaInfo,\n        ProverId, PublicReplicaInfo, SnarkProof,\n    },\n    PoStType,\n};\n\n/// Generates a Winning proof-of-spacetime with provided vanilla proofs.\npub fn generate_winning_post_with_vanilla<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    randomness: &ChallengeSeed,\n    prover_id: ProverId,\n    vanilla_proofs: Vec<FallbackPoStSectorProof<Tree>>,\n) -> Result<SnarkProof> {\n    info!(\"generate_winning_post_with_vanilla:start\");\n    ensure!(\n        post_config.typ == PoStType::Winning,\n        \"invalid post config type\"\n    );\n\n    ensure!(\n        vanilla_proofs.len() == post_config.sector_count,\n        \"invalid amount of vanilla proofs\"\n    );\n\n    let randomness_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(randomness, \"randomness\")?;\n    let prover_id_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(&prover_id, \"prover_id\")?;\n\n    let vanilla_params = winning_post_setup_params(&post_config)?;\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params,\n        partitions: None,\n        priority: post_config.priority,\n    };\n    let pub_params: compound_proof::PublicParams<'_, FallbackPoSt<'_, Tree>> =\n        FallbackPoStCompound::setup(&setup_params)?;\n    let groth_params = get_post_params::<Tree>(&post_config)?;\n\n    let mut pub_sectors = Vec::with_capacity(vanilla_proofs.len());\n    for vanilla_proof in &vanilla_proofs {\n        pub_sectors.push(PublicSector {\n            id: vanilla_proof.sector_id,\n            comm_r: vanilla_proof.comm_r,\n        });\n    }\n\n    let pub_inputs = fallback::PublicInputs {\n        randomness: randomness_safe,\n        prover_id: prover_id_safe,\n        sectors: pub_sectors,\n        k: None,\n    };\n\n    let partitions = pub_params.partitions.unwrap_or(1);\n    let partitioned_proofs = partition_vanilla_proofs(\n        &post_config,\n        &pub_params.vanilla_params,\n        &pub_inputs,\n        partitions,\n        &vanilla_proofs,\n    )?;\n\n    let proof = FallbackPoStCompound::prove_with_vanilla(\n        &pub_params,\n        &pub_inputs,\n        partitioned_proofs,\n        &groth_params,\n    )?;\n    let proof = proof.to_vec()?;\n\n    info!(\"generate_winning_post_with_vanilla:finish\");\n\n    Ok(proof)\n}\n\n/// Generates a Winning proof-of-spacetime.\npub fn generate_winning_post<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    randomness: &ChallengeSeed,\n    replicas: &[(SectorId, PrivateReplicaInfo<Tree>)],\n    prover_id: ProverId,\n) -> Result<SnarkProof> {\n    info!(\"generate_winning_post:start\");\n    ensure!(\n        post_config.typ == PoStType::Winning,\n        \"invalid post config type\"\n    );\n\n    ensure!(\n        replicas.len() == post_config.sector_count,\n        \"invalid amount of replicas\"\n    );\n\n    let randomness_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(randomness, \"randomness\")?;\n    let prover_id_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(&prover_id, \"prover_id\")?;\n\n    let vanilla_params = winning_post_setup_params(&post_config)?;\n    let param_sector_count = vanilla_params.sector_count;\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params,\n        partitions: None,\n        priority: post_config.priority,\n    };\n    let pub_params: compound_proof::PublicParams<'_, FallbackPoSt<'_, Tree>> =\n        FallbackPoStCompound::setup(&setup_params)?;\n    let groth_params = get_post_params::<Tree>(&post_config)?;\n\n    let trees = replicas\n        .iter()\n        .map(|(sector_id, replica)| {\n            replica\n                .merkle_tree(post_config.sector_size)\n                .with_context(|| {\n                    format!(\"generate_winning_post: merkle_tree failed: {:?}\", sector_id)\n                })\n        })\n        .collect::<Result<Vec<_>>>()?;\n\n    let mut pub_sectors = Vec::with_capacity(param_sector_count);\n    let mut priv_sectors = Vec::with_capacity(param_sector_count);\n\n    for _ in 0..param_sector_count {\n        for ((sector_id, replica), tree) in replicas.iter().zip(trees.iter()) {\n            let comm_r = replica.safe_comm_r().with_context(|| {\n                format!(\"generate_winning_post: safe_comm_r failed: {:?}\", sector_id)\n            })?;\n            let comm_c = replica.safe_comm_c();\n            let comm_r_last = replica.safe_comm_r_last();\n\n            pub_sectors.push(PublicSector::<<Tree::Hasher as Hasher>::Domain> {\n                id: *sector_id,\n                comm_r,\n            });\n            priv_sectors.push(PrivateSector {\n                tree,\n                comm_c,\n                comm_r_last,\n            });\n        }\n    }\n\n    let pub_inputs = fallback::PublicInputs::<<Tree::Hasher as Hasher>::Domain> {\n        randomness: randomness_safe,\n        prover_id: prover_id_safe,\n        sectors: pub_sectors,\n        k: None,\n    };\n\n    let priv_inputs = fallback::PrivateInputs::<Tree> {\n        sectors: &priv_sectors,\n    };\n\n    let proof =\n        FallbackPoStCompound::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs, &groth_params)?;\n    let proof = proof.to_vec()?;\n\n    info!(\"generate_winning_post:finish\");\n\n    Ok(proof)\n}\n\n/// Given some randomness and the length of available sectors, generates the challenged sector.\n///\n/// The returned values are indices in the range of `0..sector_set_size`, requiring the caller\n/// to match the index to the correct sector.\npub fn generate_winning_post_sector_challenge<Tree: MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    randomness: &ChallengeSeed,\n    sector_set_size: u64,\n    prover_id: Commitment,\n) -> Result<Vec<u64>> {\n    info!(\"generate_winning_post_sector_challenge:start\");\n    ensure!(sector_set_size != 0, \"empty sector set is invalid\");\n    ensure!(\n        post_config.typ == PoStType::Winning,\n        \"invalid post config type\"\n    );\n\n    let prover_id_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(&prover_id, \"prover_id\")?;\n\n    let randomness_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(randomness, \"randomness\")?;\n    let result = generate_sector_challenges(\n        randomness_safe,\n        post_config.sector_count,\n        sector_set_size,\n        prover_id_safe,\n    );\n\n    info!(\"generate_winning_post_sector_challenge:finish\");\n\n    result\n}\n\n/// Verifies a winning proof-of-spacetime.\n///\n/// The provided `replicas` must be the same ones as passed to `generate_winning_post`, and be based on\n/// the indices generated by `generate_winning_post_sector_challenge`. It is the responsibility of the\n/// caller to ensure this.\npub fn verify_winning_post<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n    randomness: &ChallengeSeed,\n    replicas: &[(SectorId, PublicReplicaInfo)],\n    prover_id: ProverId,\n    proof: &[u8],\n) -> Result<bool> {\n    info!(\"verify_winning_post:start\");\n\n    ensure!(\n        post_config.typ == PoStType::Winning,\n        \"invalid post config type\"\n    );\n    ensure!(\n        post_config.sector_count == replicas.len(),\n        \"invalid amount of replicas provided\"\n    );\n\n    let randomness_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(randomness, \"randomness\")?;\n    let prover_id_safe: <Tree::Hasher as Hasher>::Domain =\n        as_safe_commitment(&prover_id, \"prover_id\")?;\n\n    let vanilla_params = winning_post_setup_params(&post_config)?;\n    let param_sector_count = vanilla_params.sector_count;\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params,\n        partitions: None,\n        priority: false,\n    };\n    let pub_params: compound_proof::PublicParams<'_, FallbackPoSt<'_, Tree>> =\n        FallbackPoStCompound::setup(&setup_params)?;\n\n    let mut pub_sectors = Vec::with_capacity(param_sector_count);\n    for _ in 0..param_sector_count {\n        for (sector_id, replica) in replicas.iter() {\n            let comm_r = replica.safe_comm_r().with_context(|| {\n                format!(\"verify_winning_post: safe_comm_r failed: {:?}\", sector_id)\n            })?;\n            pub_sectors.push(PublicSector {\n                id: *sector_id,\n                comm_r,\n            });\n        }\n    }\n\n    let pub_inputs = fallback::PublicInputs {\n        randomness: randomness_safe,\n        prover_id: prover_id_safe,\n        sectors: pub_sectors,\n        k: None,\n    };\n\n    let is_valid = {\n        let verifying_key = get_post_verifying_key::<Tree>(&post_config)?;\n\n        let single_proof = MultiProof::new_from_reader(None, proof, &verifying_key)?;\n        if single_proof.len() != 1 {\n            return Ok(false);\n        }\n\n        FallbackPoStCompound::verify(\n            &pub_params,\n            &pub_inputs,\n            &single_proof,\n            &fallback::ChallengeRequirements {\n                minimum_challenge_count: post_config.challenge_count * post_config.sector_count,\n            },\n        )?\n    };\n\n    if !is_valid {\n        return Ok(false);\n    }\n\n    info!(\"verify_winning_post:finish\");\n\n    Ok(true)\n}\n"
  },
  {
    "path": "filecoin-proofs/src/bin/fakeipfsadd.rs",
    "content": "use blake2b_simd::State as Blake2b;\nuse clap::{App, Arg};\nuse std::fs::File;\n\npub fn main() {\n    let matches = App::new(\"fakeipfsadd\")\n        .version(\"0.1\")\n        .about(\n            \"\nThis program is used to simulate the `ipfs add` command while testing. It\naccepts a path to a file and writes 32 characters of its hex-encoded BLAKE2b \nchecksum to stdout. Note: The real `ipfs add` command computes and emits a CID.\n\",\n        )\n        .arg(Arg::with_name(\"add\").index(1).required(true))\n        .arg(Arg::with_name(\"file-path\").index(2).required(true))\n        .arg(\n            Arg::with_name(\"quieter\")\n                .short(\"Q\")\n                .required(true)\n                .help(\"Simulates the -Q argument to `ipfs add`\"),\n        )\n        .get_matches();\n\n    let src_file_path = matches\n        .value_of(\"file-path\")\n        .expect(\"failed to get file path\");\n\n    let mut src_file = File::open(&src_file_path)\n        .unwrap_or_else(|_| panic!(\"failed to open file at {}\", &src_file_path));\n\n    let mut hasher = Blake2b::new();\n\n    std::io::copy(&mut src_file, &mut hasher).expect(\"failed to write BLAKE2b bytes to hasher\");\n\n    let hex_string: String = hasher.finalize().to_hex()[..32].into();\n\n    println!(\"{}\", hex_string)\n}\n"
  },
  {
    "path": "filecoin-proofs/src/bin/paramcache.rs",
    "content": "extern crate filecoin_proofs;\nextern crate rand;\nextern crate sector_base;\nextern crate storage_proofs;\n\nuse rand::{SeedableRng, XorShiftRng};\n\nuse filecoin_proofs::api::internal;\nuse sector_base::api::disk_backed_storage::{FAST_SECTOR_SIZE, REAL_SECTOR_SIZE, SLOW_SECTOR_SIZE};\nuse storage_proofs::circuit::zigzag::ZigZagCompound;\nuse storage_proofs::compound_proof::CompoundProof;\nuse storage_proofs::parameter_cache::CacheableParameters;\n\nfn cache_params(sector_size: u64) {\n    let public_params = internal::public_params(sector_size as usize);\n    let circuit = ZigZagCompound::blank_circuit(&public_params, &internal::ENGINE_PARAMS);\n    let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n    let _ = ZigZagCompound::get_groth_params(circuit, &public_params, rng);\n}\n\n// Run this from the command-line to pre-generate the groth parameters used by the API.\npub fn main() {\n    cache_params(REAL_SECTOR_SIZE);\n    cache_params(FAST_SECTOR_SIZE);\n    cache_params(SLOW_SECTOR_SIZE);\n}\n"
  },
  {
    "path": "filecoin-proofs/src/bin/paramfetch.rs",
    "content": "use std::collections::HashSet;\nuse std::fs::{create_dir_all, rename, File};\nuse std::io::copy;\nuse std::io::prelude::*;\nuse std::io::{BufReader, Stdout};\nuse std::path::{Path, PathBuf};\nuse std::process::exit;\nuse std::process::Command;\nuse std::{fs, io};\n\nuse anyhow::{bail, ensure, Context, Result};\nuse clap::{values_t, App, Arg, ArgMatches};\nuse flate2::read::GzDecoder;\nuse itertools::Itertools;\nuse pbr::{ProgressBar, Units};\nuse reqwest::{header, Client, Proxy, Url};\nuse tar::Archive;\n\nuse filecoin_proofs::param::*;\nuse storage_proofs::parameter_cache::{\n    parameter_cache_dir, GROTH_PARAMETER_EXT, PARAMETER_CACHE_DIR, PARAMETER_CACHE_ENV_VAR,\n};\n\nconst ERROR_PARAMETER_FILE: &str = \"failed to find file in cache\";\nconst ERROR_PARAMETER_ID: &str = \"failed to find key in manifest\";\n\nconst IPGET_PATH: &str = \"/var/tmp/ipget\";\nconst DEFAULT_PARAMETERS: &str = include_str!(\"../../parameters.json\");\nconst IPGET_VERSION: &str = \"v0.4.0\";\n\nstruct FetchProgress<R> {\n    inner: R,\n    progress_bar: ProgressBar<Stdout>,\n}\n\nimpl<R: Read> Read for FetchProgress<R> {\n    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {\n        self.inner.read(buf).map(|n| {\n            self.progress_bar.add(n as u64);\n            n\n        })\n    }\n}\n\npub fn main() {\n    fil_logger::init();\n\n    let matches = App::new(\"paramfetch\")\n        .version(\"1.1\")\n        .about(\n            &format!(\n                \"\nSet {} to specify Groth parameter and verifying key-cache directory.\nDefaults to '{}'\n\",\n                PARAMETER_CACHE_ENV_VAR,\n                PARAMETER_CACHE_DIR\n            )[..],\n        )\n        .arg(\n            Arg::with_name(\"json\")\n                .value_name(\"JSON\")\n                .takes_value(true)\n                .short(\"j\")\n                .long(\"json\")\n                .help(\"Use specific JSON file\"),\n        )\n        .arg(\n            Arg::with_name(\"retry\")\n                .short(\"r\")\n                .long(\"retry\")\n                .help(\"Prompt to retry on failure\"),\n        )\n        .arg(\n            Arg::with_name(\"all\")\n                .short(\"a\")\n                .long(\"all\")\n                .conflicts_with(\"params-for-sector-sizes\")\n                .help(\"Download all available parameters and verifying keys\"),\n        )\n        .arg(\n            Arg::with_name(\"params-for-sector-sizes\")\n                .short(\"z\")\n                .long(\"params-for-sector-sizes\")\n                .conflicts_with(\"all\")\n                .require_delimiter(true)\n                .value_delimiter(\",\")\n                .multiple(true)\n                .help(\"A comma-separated list of sector sizes, in bytes, for which Groth parameters will be downloaded\"),\n        )\n        .arg(\n            Arg::with_name(\"verbose\")\n                .short(\"v\")\n                .long(\"verbose\")\n                .help(\"Print diagnostic information to stdout\"),\n        )\n        .arg(\n            Arg::with_name(\"ipget-bin\")\n                .conflicts_with(\"ipget-version\")\n                .takes_value(true)\n                .short(\"i\")\n                .long(\"ipget-bin\")\n                .help(\"Use specific ipget binary instead of looking for (or installing) one in /var/tmp/ipget/ipget\"),\n        )\n        .arg(\n            Arg::with_name(\"ipget-args\")\n                .takes_value(true)\n                .long(\"ipget-args\")\n                .help(\"Specify additional arguments for ipget\")\n        )\n        .arg(\n            Arg::with_name(\"ipget-version\")\n                .conflicts_with(\"ipget-bin\")\n                .long(\"ipget-version\")\n                .takes_value(true)\n                .help(\"Set the version of ipget to use\")\n        )\n        .get_matches();\n\n    match fetch(&matches) {\n        Ok(_) => println!(\"done\"),\n        Err(err) => {\n            println!(\"fatal error: {}\", err);\n            exit(1);\n        }\n    }\n}\n\nfn fetch(matches: &ArgMatches) -> Result<()> {\n    let manifest = if matches.is_present(\"json\") {\n        let json_path = PathBuf::from(matches.value_of(\"json\").unwrap());\n        println!(\"using JSON file: {:?}\", json_path);\n\n        if !json_path.exists() {\n            bail!(\n                \"JSON file '{}' does not exist\",\n                &json_path.to_str().unwrap_or(\"\")\n            );\n        }\n\n        let file = File::open(&json_path)?;\n        let reader = BufReader::new(file);\n\n        serde_json::from_reader(reader).with_context(|| {\n            format!(\n                \"JSON file '{}' did not parse correctly\",\n                &json_path.to_str().unwrap_or(\"\"),\n            )\n        })?\n    } else {\n        println!(\"using built-in manifest\");\n        serde_json::from_str(&DEFAULT_PARAMETERS)?\n    };\n\n    let retry = matches.is_present(\"retry\");\n\n    let mut filenames = get_filenames_from_parameter_map(&manifest)?;\n\n    println!(\"{} files in manifest...\", filenames.len());\n    println!();\n\n    // if user has specified sector sizes for which they wish to download Groth\n    // parameters, trim non-matching Groth parameter filenames from the list\n    if matches.is_present(\"params-for-sector-sizes\") {\n        let whitelisted_sector_sizes: HashSet<u64> =\n            values_t!(matches.values_of(\"params-for-sector-sizes\"), u64)?\n                .into_iter()\n                .collect();\n\n        // always download all verifying keys - but conditionally skip Groth\n        // parameters for sector sizes the user doesn't care about\n        filenames = filenames\n            .into_iter()\n            .filter(|id| {\n                !has_extension(id, GROTH_PARAMETER_EXT) || {\n                    manifest\n                        .get(id)\n                        .map(|p| p.sector_size)\n                        .map(|n| whitelisted_sector_sizes.contains(&n))\n                        .unwrap_or(false)\n                }\n            })\n            .collect_vec();\n    }\n\n    println!(\"{} files to check for (re)download...\", filenames.len());\n    println!();\n\n    // ensure filename corresponds to asset on disk and that its checksum\n    // matches that which is specified in the manifest\n    filenames = get_filenames_requiring_download(&manifest, filenames)?;\n\n    // don't prompt the user to download files if they've used certain flags\n    if !matches.is_present(\"params-for-sector-sizes\")\n        && !matches.is_present(\"all\")\n        && !filenames.is_empty()\n    {\n        filenames = choose_from(&filenames, |filename| {\n            manifest.get(filename).map(|x| x.sector_size)\n        })?;\n        println!();\n    }\n\n    let is_verbose = matches.is_present(\"verbose\");\n    let ipget_bin_path = matches.value_of(\"ipget-bin\");\n    let ipget_version = matches.value_of(\"ipget-version\").unwrap_or(IPGET_VERSION);\n    let ipget_args = matches.value_of(\"ipget-args\");\n\n    // Make sure we have ipget available\n    if ipget_bin_path.is_none() {\n        ensure_ipget(is_verbose, ipget_version)?;\n    }\n\n    let ipget_path = if let Some(p) = ipget_bin_path {\n        PathBuf::from(p)\n    } else {\n        PathBuf::from(&get_ipget_bin(ipget_version))\n    };\n\n    loop {\n        println!(\"{} files to fetch...\", filenames.len());\n        println!();\n\n        for filename in &filenames {\n            println!(\"fetching: {}\", filename);\n            print!(\"downloading file... \");\n            io::stdout().flush().unwrap();\n\n            match fetch_parameter_file(is_verbose, &manifest, &filename, &ipget_path, ipget_args) {\n                Ok(_) => println!(\"ok\\n\"),\n                Err(err) => println!(\"error: {}\\n\", err),\n            }\n        }\n\n        // if we haven't downloaded a valid copy of each asset specified in the\n        // manifest, ask the user if they wish to try again\n        filenames = get_filenames_requiring_download(&manifest, filenames)?;\n\n        if filenames.is_empty() {\n            break;\n        } else {\n            println!(\"{} files failed to be fetched:\", filenames.len());\n\n            for parameter_id in &filenames {\n                println!(\"{}\", parameter_id);\n            }\n\n            println!();\n\n            if !retry || !choose(\"try again?\") {\n                bail!(\"some files failed to be fetched. try again, or run paramcache to generate locally\");\n            }\n        }\n    }\n\n    Ok(())\n}\n\nfn get_ipget_bin(version: &str) -> String {\n    format!(\"{}-{}/ipget/ipget\", IPGET_PATH, version)\n}\n\n/// Check if ipget is available, dowwnload it otherwise.\nfn ensure_ipget(is_verbose: bool, version: &str) -> Result<()> {\n    let ipget_bin = get_ipget_bin(version);\n    if Path::new(&ipget_bin).exists() {\n        Ok(())\n    } else {\n        download_ipget(is_verbose, version)\n    }\n    .map(|_| {\n        if is_verbose {\n            println!(\"ipget installed: {}\", ipget_bin);\n        }\n    })\n}\n\n/// Download a version of ipget.\nfn download_ipget(is_verbose: bool, version: &str) -> Result<()> {\n    let (os, extension) = if cfg!(target_os = \"macos\") {\n        (\"darwin\", \"tar.gz\")\n    } else if cfg!(target_os = \"windows\") {\n        (\"windows\", \"zip\")\n    } else {\n        (\"linux\", \"tar.gz\")\n    };\n\n    let url = Url::parse(&format!(\n        \"https://dist.ipfs.io/ipget/{}/ipget_{}_{}-amd64.{}\",\n        version, version, os, extension\n    ))?;\n\n    if is_verbose {\n        println!(\"downloading ipget@{}-{}...\", version, os);\n    }\n\n    // download file\n    let p = format!(\"{}-{}.{}\", IPGET_PATH, version, extension);\n    download_file(url, &p, is_verbose)?;\n\n    // extract file\n    if extension == \"tar.gz\" {\n        let tar_gz = fs::File::open(p)?;\n        let tar = GzDecoder::new(tar_gz);\n        let mut archive = Archive::new(tar);\n        archive.unpack(format!(\"/var/tmp/ipget-{}\", version))?;\n    } else {\n        // TODO: handle zip archives on windows\n        unimplemented!(\"failed to install ipget: unzip is not yet supported\");\n    }\n\n    Ok(())\n}\n\n/// Download the given file.\nfn download_file(url: Url, target: impl AsRef<Path>, is_verbose: bool) -> Result<()> {\n    let mut file = File::create(target)?;\n\n    let client = Client::builder()\n        .proxy(Proxy::custom(move |url| env_proxy::for_url(&url).to_url()))\n        .build()?;\n    let total_size = {\n        let res = client.head(url.as_str()).send()?;\n        if res.status().is_success() {\n            res.headers()\n                .get(header::CONTENT_LENGTH)\n                .and_then(|ct_len| ct_len.to_str().ok())\n                .and_then(|ct_len| ct_len.parse().ok())\n                .unwrap_or(0)\n        } else {\n            bail!(\"failed to download file: {}\", url);\n        }\n    };\n\n    let req = client.get(url.as_str());\n    if is_verbose {\n        let mut pb = ProgressBar::new(total_size);\n        pb.set_units(Units::Bytes);\n\n        let mut source = FetchProgress {\n            inner: req.send()?,\n            progress_bar: pb,\n        };\n\n        let _ = copy(&mut source, &mut file)?;\n    } else {\n        let mut source = req.send()?;\n        let _ = copy(&mut source, &mut file)?;\n    }\n\n    Ok(())\n}\n\nfn fetch_parameter_file(\n    is_verbose: bool,\n    parameter_map: &ParameterMap,\n    filename: &str,\n    ipget_bin_path: impl AsRef<Path>,\n    ipget_args: Option<impl AsRef<str>>,\n) -> Result<()> {\n    let parameter_data = parameter_map_lookup(parameter_map, filename)?;\n    let path = get_full_path_for_file_within_cache(filename);\n\n    create_dir_all(parameter_cache_dir())?;\n    download_file_with_ipget(\n        &parameter_data.cid,\n        path,\n        is_verbose,\n        ipget_bin_path,\n        ipget_args,\n    )\n}\n\nfn download_file_with_ipget(\n    cid: impl AsRef<str>,\n    target: impl AsRef<Path>,\n    is_verbose: bool,\n    ipget_bin_path: impl AsRef<Path>,\n    ipget_args: Option<impl AsRef<str>>,\n) -> Result<()> {\n    let mut cmd = Command::new(ipget_bin_path.as_ref().as_os_str());\n    cmd.arg(\"-o\")\n        .arg(target.as_ref().to_str().unwrap())\n        .arg(cid.as_ref());\n\n    if let Some(args) = ipget_args {\n        cmd.args(args.as_ref().split(' '));\n    }\n\n    let output = cmd.output()?;\n\n    if is_verbose {\n        io::stdout().write_all(&output.stdout)?;\n        io::stderr().write_all(&output.stderr)?;\n    }\n\n    ensure!(\n        output.status.success(),\n        \"failed to download {}\",\n        target.as_ref().display()\n    );\n\n    Ok(())\n}\n\nfn get_filenames_requiring_download(\n    parameter_map: &ParameterMap,\n    parameter_ids: Vec<String>,\n) -> Result<Vec<String>> {\n    Ok(parameter_ids\n        .into_iter()\n        .filter(|parameter_id| {\n            println!(\"checking: {}\", parameter_id);\n            print!(\"does file exist... \");\n\n            if get_full_path_for_file_within_cache(parameter_id).exists() {\n                println!(\"yes\");\n                print!(\"is file valid... \");\n                io::stdout().flush().unwrap();\n\n                match validate_parameter_file(&parameter_map, &parameter_id) {\n                    Ok(true) => {\n                        println!(\"yes\\n\");\n                        false\n                    }\n                    Ok(false) => {\n                        println!(\"no\\n\");\n                        invalidate_parameter_file(&parameter_id).unwrap();\n                        true\n                    }\n                    Err(err) => {\n                        println!(\"error: {}\\n\", err);\n                        true\n                    }\n                }\n            } else {\n                println!(\"no\\n\");\n                true\n            }\n        })\n        .collect())\n}\n\nfn get_filenames_from_parameter_map(parameter_map: &ParameterMap) -> Result<Vec<String>> {\n    Ok(parameter_map.iter().map(|(k, _)| k.clone()).collect())\n}\n\nfn validate_parameter_file(parameter_map: &ParameterMap, filename: &str) -> Result<bool> {\n    let parameter_data = parameter_map_lookup(parameter_map, filename)?;\n    let digest = get_digest_for_file_within_cache(filename)?;\n\n    if parameter_data.digest != digest {\n        Ok(false)\n    } else {\n        Ok(true)\n    }\n}\n\nfn invalidate_parameter_file(filename: &str) -> Result<()> {\n    let parameter_file_path = get_full_path_for_file_within_cache(filename);\n    let target_parameter_file_path =\n        parameter_file_path.with_file_name(format!(\"{}-invalid-digest\", filename));\n\n    ensure!(parameter_file_path.exists(), ERROR_PARAMETER_FILE);\n    rename(parameter_file_path, target_parameter_file_path)?;\n\n    Ok(())\n}\n\nfn parameter_map_lookup<'a>(\n    parameter_map: &'a ParameterMap,\n    filename: &str,\n) -> Result<&'a ParameterData> {\n    ensure!(parameter_map.contains_key(filename), ERROR_PARAMETER_ID);\n\n    Ok(parameter_map.get(filename).unwrap())\n}\n"
  },
  {
    "path": "filecoin-proofs/src/bin/paramgen.rs",
    "content": "extern crate filecoin_proofs;\nextern crate phase2;\nextern crate rand;\nextern crate sector_base;\nextern crate storage_proofs;\n\nuse rand::OsRng;\nuse std::env;\nuse std::fs::File;\n\nuse filecoin_proofs::api::internal;\nuse sector_base::api::disk_backed_storage::REAL_SECTOR_SIZE;\nuse storage_proofs::circuit::zigzag::ZigZagCompound;\nuse storage_proofs::compound_proof::CompoundProof;\n\n// Run this from the command-line, passing the path to the file to which the parameters will be written.\npub fn main() {\n    let args: Vec<String> = env::args().collect();\n    let out_file = &args[1];\n\n    let public_params = internal::public_params(REAL_SECTOR_SIZE as usize);\n\n    let circuit = ZigZagCompound::blank_circuit(&public_params, &internal::ENGINE_PARAMS);\n    let mut params = phase2::MPCParameters::new(circuit).unwrap();\n\n    let rng = &mut OsRng::new().unwrap();\n    let hash = params.contribute(rng);\n\n    {\n        let circuit = ZigZagCompound::blank_circuit(&public_params, &internal::ENGINE_PARAMS);\n        let contributions = params.verify(circuit).expect(\"parameters should be valid!\");\n\n        // We need to check the `contributions` to see if our `hash`\n        // is in it (see above, when we first contributed)\n        assert!(phase2::contains_contribution(&contributions, &hash));\n    }\n\n    let mut buffer = File::create(out_file).unwrap();\n    params.write(&mut buffer).unwrap();\n}\n"
  },
  {
    "path": "filecoin-proofs/src/bin/parampublish.rs",
    "content": "use std::collections::BTreeMap;\nuse std::fs::{read_dir, File};\nuse std::io;\nuse std::io::prelude::*;\nuse std::io::BufWriter;\nuse std::path::{Path, PathBuf};\nuse std::process::{exit, Command};\n\nuse anyhow::{ensure, Context, Result};\nuse clap::{App, Arg, ArgMatches};\nuse dialoguer::{theme::ColorfulTheme, MultiSelect, Select};\nuse humansize::{file_size_opts, FileSize};\nuse itertools::Itertools;\n\nuse filecoin_proofs::param::{\n    add_extension, choose_from, filename_to_parameter_id, get_digest_for_file_within_cache,\n    get_full_path_for_file_within_cache, has_extension, parameter_id_to_metadata_map,\n    ParameterData, ParameterMap,\n};\nuse filecoin_proofs::{\n    SECTOR_SIZE_2_KIB, SECTOR_SIZE_32_GIB, SECTOR_SIZE_512_MIB, SECTOR_SIZE_64_GIB,\n    SECTOR_SIZE_8_MIB,\n};\nuse storage_proofs::parameter_cache::{\n    parameter_cache_dir, CacheEntryMetadata, GROTH_PARAMETER_EXT, PARAMETER_CACHE_DIR,\n    PARAMETER_METADATA_EXT, VERIFYING_KEY_EXT,\n};\n\nconst ERROR_IPFS_COMMAND: &str = \"failed to run ipfs\";\nconst ERROR_IPFS_PUBLISH: &str = \"failed to publish via ipfs\";\nconst PUBLISH_SECTOR_SIZES: [u64; 5] = [\n    SECTOR_SIZE_2_KIB,\n    SECTOR_SIZE_8_MIB,\n    SECTOR_SIZE_512_MIB,\n    SECTOR_SIZE_32_GIB,\n    SECTOR_SIZE_64_GIB,\n];\n\npub fn main() {\n    fil_logger::init();\n\n    let matches = App::new(\"parampublish\")\n        .version(\"1.0\")\n        .about(\n            &format!(\n                \"\nSet $FIL_PROOFS_PARAMETER_CACHE to specify parameter directory.\nDefaults to '{}'\n\",\n                PARAMETER_CACHE_DIR\n            )[..],\n        )\n        .arg(\n            Arg::with_name(\"json\")\n                .value_name(\"JSON\")\n                .takes_value(true)\n                .short(\"j\")\n                .long(\"json\")\n                .help(\"Use specific json file\"),\n        )\n        .arg(\n            Arg::with_name(\"all\")\n                .short(\"a\")\n                .long(\"all\")\n                .help(\"Publish all local Groth parameters and verifying keys\"),\n        )\n        .arg(\n            Arg::with_name(\"ipfs-bin\")\n                .takes_value(true)\n                .short(\"i\")\n                .long(\"ipfs-bin\")\n                .help(\"Use specific ipfs binary instead of searching for one in $PATH\"),\n        )\n        .get_matches();\n\n    match publish(&matches) {\n        Ok(_) => println!(\"done\"),\n        Err(err) => {\n            println!(\"fatal error: {}\", err);\n            exit(1);\n        }\n    }\n}\n\nfn publish(matches: &ArgMatches) -> Result<()> {\n    let ipfs_bin_path = matches.value_of(\"ipfs-bin\").unwrap_or(\"ipfs\");\n\n    // Get all valid parameter IDs which have all three files, `.meta`, `.params and `.vk`\n    // associated with them. If one of the files is missing, it won't show up in the selection.\n    let (mut parameter_ids, counter) = get_filenames_in_cache_dir()?\n        .iter()\n        .filter(|f| {\n            has_extension(f, GROTH_PARAMETER_EXT)\n                || has_extension(f, VERIFYING_KEY_EXT)\n                || has_extension(f, PARAMETER_METADATA_EXT)\n        })\n        .sorted()\n        // Make sure there are always three files per parameter ID\n        .fold(\n            (Vec::new(), 0),\n            |(mut result, mut counter): (std::vec::Vec<String>, u8), filename| {\n                let parameter_id = filename_to_parameter_id(&filename).unwrap();\n                // Check if previous file had the same parameter ID\n                if !result.is_empty() && &parameter_id == result.last().unwrap() {\n                    counter += 1;\n                } else {\n                    // There weren't three files for the same parameter ID, hence remove it from\n                    // the list\n                    if counter < 3 {\n                        result.pop();\n                    }\n\n                    // It's a new parameter ID, hence reset the counter and add it to the list\n                    counter = 1;\n                    result.push(parameter_id);\n                }\n\n                (result, counter)\n            },\n        );\n\n    // There might be lef-overs from the last fold iterations\n    if counter < 3 {\n        parameter_ids.pop();\n    }\n\n    if parameter_ids.is_empty() {\n        println!(\n            \"No valid parameters in directory {:?} found.\",\n            parameter_cache_dir()\n        );\n        std::process::exit(1)\n    }\n\n    // build a mapping from parameter id to metadata\n    let meta_map = parameter_id_to_metadata_map(&parameter_ids)?;\n\n    let filenames = if !matches.is_present(\"all\") {\n        let tmp_filenames = meta_map\n            .keys()\n            .flat_map(|parameter_id| {\n                vec![\n                    add_extension(parameter_id, GROTH_PARAMETER_EXT),\n                    add_extension(parameter_id, VERIFYING_KEY_EXT),\n                ]\n            })\n            .collect_vec();\n        choose_from(&tmp_filenames, |filename| {\n            filename_to_parameter_id(PathBuf::from(filename))\n                .as_ref()\n                .and_then(|p_id| meta_map.get(p_id).map(|x| x.sector_size))\n        })?\n    } else {\n        // `--all` let's you select a specific version\n        let versions: Vec<String> = meta_map\n            .keys()\n            // Split off the version of the parameters\n            .map(|parameter_id| parameter_id.split('-').next().unwrap().to_string())\n            // Sort by descending order, newest parameter first\n            .sorted_by(|a, b| Ord::cmp(&b, &a))\n            .dedup()\n            .collect();\n        let selected_version = Select::with_theme(&ColorfulTheme::default())\n            .with_prompt(\"Select a version (press 'q' to quit)\")\n            .default(0)\n            .items(&versions[..])\n            .interact_opt()\n            .unwrap();\n        let version = match selected_version {\n            Some(index) => &versions[index],\n            None => {\n                println!(\"Aborted.\");\n                std::process::exit(1)\n            }\n        };\n\n        // The parameter IDs that should bet published\n        let mut parameter_ids = meta_map\n            .keys()\n            // Filter out all that don't match the selected version\n            .filter(|parameter_id| parameter_id.starts_with(version))\n            .collect_vec();\n\n        // Display the sector sizes\n        let sector_sizes_iter = parameter_ids\n            .iter()\n            // Get sector size and parameter ID\n            .map(|&parameter_id| {\n                meta_map\n                    .get(parameter_id)\n                    .map(|x| (x.sector_size, parameter_id))\n                    .unwrap()\n            })\n            // Sort it ascending by sector size\n            .sorted_by(|a, b| Ord::cmp(&a.0, &b.0));\n\n        // The parameters IDs need to be sorted the same way as the menu we display, else\n        // the selected items won't match the list we select from\n        parameter_ids = sector_sizes_iter\n            .clone()\n            .map(|(_, parameter_id)| parameter_id)\n            .collect_vec();\n\n        let sector_sizes = sector_sizes_iter\n            .clone()\n            // Format them\n            .map(|(sector_size, parameter_id)| {\n                format!(\n                    \"({:?}) {:?}\",\n                    sector_size.file_size(file_size_opts::BINARY).unwrap(),\n                    parameter_id\n                )\n            })\n            .collect_vec();\n        // Set the default, pre-selected sizes\n        let default_sector_sizes = sector_sizes_iter\n            .map(|(sector_size, _)| PUBLISH_SECTOR_SIZES.contains(&sector_size))\n            .collect_vec();\n        let selected_sector_sizes = MultiSelect::with_theme(&ColorfulTheme::default())\n            .with_prompt(\"Select the sizes to publish\")\n            .items(&sector_sizes[..])\n            .defaults(&default_sector_sizes)\n            .interact()\n            .unwrap();\n\n        if selected_sector_sizes.is_empty() {\n            println!(\"Nothing selected. Abort.\");\n        } else {\n            // Filter out the selected ones\n            parameter_ids = parameter_ids\n                .into_iter()\n                .enumerate()\n                .filter_map(|(index, parameter_id)| {\n                    if selected_sector_sizes.contains(&index) {\n                        Some(parameter_id)\n                    } else {\n                        None\n                    }\n                })\n                .collect_vec();\n        }\n\n        // Generate filenames based on their parameter IDs\n        parameter_ids\n            .iter()\n            .flat_map(|parameter_id| {\n                vec![\n                    add_extension(parameter_id, GROTH_PARAMETER_EXT),\n                    add_extension(parameter_id, VERIFYING_KEY_EXT),\n                ]\n            })\n            .collect_vec()\n    };\n    println!();\n\n    let json = PathBuf::from(matches.value_of(\"json\").unwrap_or(\"./parameters.json\"));\n    let mut parameter_map: ParameterMap = BTreeMap::new();\n\n    if !filenames.is_empty() {\n        println!(\"publishing {} files...\", filenames.len());\n        println!();\n\n        for filename in filenames {\n            let id = filename_to_parameter_id(&filename)\n                .with_context(|| format!(\"failed to parse id from file name {}\", filename))?;\n\n            let meta: &CacheEntryMetadata = meta_map\n                .get(&id)\n                .with_context(|| format!(\"no metadata found for parameter id {}\", id))?;\n\n            println!(\"publishing: {}\", filename);\n            print!(\"publishing to ipfs... \");\n            io::stdout().flush().unwrap();\n\n            match publish_parameter_file(&ipfs_bin_path, &filename) {\n                Ok(cid) => {\n                    println!(\"ok\");\n                    print!(\"generating digest... \");\n                    io::stdout().flush().unwrap();\n\n                    let digest = get_digest_for_file_within_cache(&filename)?;\n                    let data = ParameterData {\n                        cid,\n                        digest,\n                        sector_size: meta.sector_size,\n                    };\n\n                    parameter_map.insert(filename, data);\n\n                    println!(\"ok\");\n                }\n                Err(err) => println!(\"error: {}\", err),\n            }\n\n            println!();\n        }\n\n        write_parameter_map_to_disk(&parameter_map, &json)?;\n    } else {\n        println!(\"no files to publish\");\n    }\n\n    Ok(())\n}\n\nfn get_filenames_in_cache_dir() -> Result<Vec<String>> {\n    let path = parameter_cache_dir();\n\n    if path.exists() {\n        Ok(read_dir(path)?\n            .map(|f| f.unwrap().path())\n            .filter(|p| p.is_file())\n            .map(|p| {\n                p.as_path()\n                    .file_name()\n                    .unwrap()\n                    .to_str()\n                    .unwrap()\n                    .to_string()\n            })\n            .collect())\n    } else {\n        println!(\n            \"parameter directory '{}' does not exist\",\n            path.as_path().to_str().unwrap()\n        );\n\n        Ok(Vec::new())\n    }\n}\n\nfn publish_parameter_file(ipfs_bin_path: &str, filename: &str) -> Result<String> {\n    let path = get_full_path_for_file_within_cache(filename);\n\n    let output = Command::new(ipfs_bin_path)\n        .arg(\"add\")\n        .arg(\"-Q\")\n        .arg(&path)\n        .output()\n        .expect(ERROR_IPFS_COMMAND);\n\n    ensure!(output.status.success(), ERROR_IPFS_PUBLISH);\n\n    Ok(String::from_utf8(output.stdout)?.trim().to_string())\n}\n\nfn write_parameter_map_to_disk<P: AsRef<Path>>(\n    parameter_map: &ParameterMap,\n    dest_path: P,\n) -> Result<()> {\n    let p: &Path = dest_path.as_ref();\n    let file = File::create(p)?;\n    let writer = BufWriter::new(file);\n    serde_json::to_writer_pretty(writer, &parameter_map)?;\n\n    Ok(())\n}\n"
  },
  {
    "path": "filecoin-proofs/src/bin/phase2.rs",
    "content": "/// A CLI program for running Phase2 of Filecoin's trusted-setup.\n///\n/// # Build\n///\n/// From the directory `rust-fil-proofs` run:\n///\n/// ```\n/// $ RUSTFLAGS=\"-C target-cpu=native\" cargo build --release -p filecoin-proofs --bin=phase2\n/// ```\n///\n/// # Usage\n///\n/// ```\n/// # Create initial params for a circuit using:\n/// $ RUST_BACKTRACE=1 ./target/release/phase2 new \\\n///     <--porep, --epost, --fpost> \\\n///     [--poseidon (default), --sha-pedersen] \\\n///     <--2kib, --8mib, --512mib, --32gib, --64gib>\n///\n/// # Contribute randomness to the phase2 params for a circuit:\n/// $ RUST_BACKTRACE=1 ./target/release/phase2 contribute <path to params file>\n///\n/// # Verify the transition from one phase2 params file to another:\n/// $ RUST_BACKTRACE=1 ./target/release/phase2 verify \\\n///     --paths=<comma separated list of file paths to params> \\\n///     --contributions=<comma separated list of contribution digests>\n///\n/// # Run verification as a daemon - verify the parameters and contributions as they are written to\n/// # the `rust-fil-proofs` directory:\n/// $ RUST_BACKTRACE=1 ./target/release/phase2 verifyd\n/// ```\nuse std::fmt::{self, Display, Formatter};\nuse std::fs::{self, File};\nuse std::io::{BufReader, BufWriter};\nuse std::path::Path;\nuse std::process::Command;\nuse std::str::{self, FromStr};\nuse std::thread::sleep;\nuse std::time::{Duration, Instant};\n\nuse clap::{App, AppSettings, Arg, ArgGroup, SubCommand};\nuse filecoin_proofs::constants::*;\nuse filecoin_proofs::parameters::{\n    setup_params, window_post_public_params, winning_post_public_params,\n};\nuse filecoin_proofs::types::*;\nuse filecoin_proofs::with_shape;\nuse log::info;\nuse phase2::{verify_contribution, MPCParameters};\nuse rand::SeedableRng;\nuse simplelog::{self, CombinedLogger, LevelFilter, TermLogger, TerminalMode, WriteLogger};\nuse storage_proofs::compound_proof::{self, CompoundProof};\nuse storage_proofs::hasher::Sha256Hasher;\nuse storage_proofs::merkle::MerkleTreeTrait;\nuse storage_proofs::porep::stacked::{StackedCircuit, StackedCompound, StackedDrg};\nuse storage_proofs::post::fallback::{FallbackPoSt, FallbackPoStCircuit, FallbackPoStCompound};\n\n#[derive(Clone, Copy)]\nenum Proof {\n    Porep,\n    WinningPost,\n    WindowPost,\n}\n\nimpl Display for Proof {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        let s = match self {\n            Proof::Porep => \"PoRep\",\n            Proof::WinningPost => \"WinningPoSt\",\n            Proof::WindowPost => \"WindowPoSt\",\n        };\n        write!(f, \"{}\", s)\n    }\n}\n\n#[derive(Clone, Copy)]\nenum Hasher {\n    Poseidon,\n    // ShaPedersen,\n}\n\nimpl Display for Hasher {\n    fn fmt(&self, f: &mut Formatter) -> fmt::Result {\n        let s = match self {\n            Hasher::Poseidon => \"Poseidon\",\n            // Hasher::ShaPedersen => \"SHA-Pedersen\",\n        };\n        write!(f, \"{}\", s)\n    }\n}\n\nfn display_sector_size(sector_size: u64) -> String {\n    match sector_size {\n        SECTOR_SIZE_2_KIB => \"2KiB\".to_string(),\n        SECTOR_SIZE_8_MIB => \"8MiB\".to_string(),\n        SECTOR_SIZE_512_MIB => \"512MiB\".to_string(),\n        SECTOR_SIZE_32_GIB => \"32GiB\".to_string(),\n        SECTOR_SIZE_64_GIB => \"64GiB\".to_string(),\n        _ => unreachable!(),\n    }\n}\n\nfn get_head_commit() -> String {\n    let output = Command::new(\"git\")\n        .args(&[\"rev-parse\", \"--short=7\", \"HEAD\"])\n        .output()\n        .expect(\"failed to execute child process: `git rev-parse --short=7 HEAD`\");\n\n    str::from_utf8(&output.stdout).unwrap().trim().to_string()\n}\n\nfn params_filename(\n    proof: Proof,\n    hasher: Hasher,\n    sector_size: u64,\n    head: &str,\n    param_number: usize,\n) -> String {\n    let mut filename = format!(\n        \"{}_{}_{}_{}_{}\",\n        proof,\n        hasher,\n        display_sector_size(sector_size),\n        head,\n        param_number\n    );\n    filename.make_ascii_lowercase();\n    filename.replace(\"-\", \"_\")\n}\n\nfn initial_params_filename(proof: Proof, hasher: Hasher, sector_size: u64) -> String {\n    params_filename(proof, hasher, sector_size, &get_head_commit(), 0)\n}\n\n/// Parses a phase2 parameters filename `path` (e.g. \"porep_poseidon_32gib_abcdef_0\") to a tuple\n/// containing the proof, hasher, sector-size, shortened head commit, and contribution number (e.g.\n/// `(Proof::Porep, Hasher::Poseidon, SECTOR_SIZE_32_GIB, \"abcdef1\", 0)`).\nfn parse_params_filename(path: &str) -> (Proof, Hasher, u64, String, usize) {\n    let filename = path.rsplitn(2, '/').next().unwrap();\n    let split: Vec<&str> = filename.split('_').collect();\n\n    let proof = match split[0] {\n        \"porep\" => Proof::Porep,\n        \"winning-post\" => Proof::WinningPost,\n        \"window-post\" => Proof::WindowPost,\n        other => panic!(\"invalid proof id in filename: {}\", other),\n    };\n\n    // TODO: this is broken if we enable SHA-Pedersen.\n    let hasher = match split[1] {\n        \"poseidon\" => Hasher::Poseidon,\n        // \"sha_pedersen\" => Hasher::ShaPedersen,\n        other => panic!(\"invalid hasher id in filename: {}\", other),\n    };\n\n    let sector_size = match split[2] {\n        \"2kib\" => SECTOR_SIZE_2_KIB,\n        \"8mib\" => SECTOR_SIZE_8_MIB,\n        \"512mib\" => SECTOR_SIZE_512_MIB,\n        \"32gib\" => SECTOR_SIZE_32_GIB,\n        \"64gib\" => SECTOR_SIZE_64_GIB,\n        other => panic!(\"invalid sector-size id in filename: {}\", other),\n    };\n\n    let head = split[3].to_string();\n\n    let param_number = usize::from_str(split[4])\n        .unwrap_or_else(|_| panic!(\"invalid param number in filename: {}\", split[3]));\n\n    (proof, hasher, sector_size, head, param_number)\n}\n\nfn blank_porep_poseidon_circuit<Tree: MerkleTreeTrait>(\n    sector_size: u64,\n) -> StackedCircuit<'static, Tree, Sha256Hasher> {\n    let n_partitions = *POREP_PARTITIONS.read().unwrap().get(&sector_size).unwrap();\n\n    let porep_config = PoRepConfig {\n        sector_size: SectorSize(sector_size),\n        partitions: PoRepProofPartitions(n_partitions),\n    };\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params: setup_params(\n            PaddedBytesAmount::from(porep_config),\n            usize::from(PoRepProofPartitions::from(porep_config)),\n        )\n        .unwrap(),\n        partitions: Some(usize::from(PoRepProofPartitions::from(porep_config))),\n        priority: false,\n    };\n\n    let public_params = <StackedCompound<Tree, Sha256Hasher> as CompoundProof<\n        StackedDrg<Tree, Sha256Hasher>,\n        _,\n    >>::setup(&setup_params)\n    .unwrap();\n\n    <StackedCompound<Tree, Sha256Hasher> as CompoundProof<\n        StackedDrg<Tree, Sha256Hasher>,\n        _,\n    >>::blank_circuit(&public_params.vanilla_params)\n}\n\n/*\nfn blank_porep_sha_pedersen_circuit(\n    sector_size: u64,\n) -> StackedCircuit<'static, PedersenHasher, Sha256Hasher> {\n    let\tn_partitions = *POREP_PARTITIONS.read().unwrap().get(&sector_size).unwrap();\n\n    let porep_config = PoRepConfig {\n        sector_size: SectorSize(sector_size),\n        partitions: PoRepProofPartitions(n_partitions),\n    };\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params: setup_params(\n            PaddedBytesAmount::from(porep_config),\n            usize::from(PoRepProofPartitions::from(porep_config)),\n        )\n        .unwrap(),\n        partitions: Some(usize::from(PoRepProofPartitions::from(porep_config))),\n        priority: false,\n    };\n\n    let public_params =\n        <StackedCompound<PedersenHasher, Sha256Hasher> as CompoundProof<_, StackedDrg<PedersenHasher, Sha256Hasher>, _>>::setup(\n            &setup_params,\n        )\n        .unwrap();\n\n    <StackedCompound<PedersenHasher, Sha256Hasher> as CompoundProof<\n        _,\n        StackedDrg<PedersenHasher, Sha256Hasher>,\n        _,\n    >>::blank_circuit(&public_params.vanilla_params)\n}\n*/\n\nfn blank_winning_post_poseidon_circuit<Tree: 'static + MerkleTreeTrait>(\n    sector_size: u64,\n) -> FallbackPoStCircuit<Tree> {\n    let post_config = PoStConfig {\n        sector_size: SectorSize(sector_size),\n        challenge_count: WINNING_POST_CHALLENGE_COUNT,\n        sector_count: WINNING_POST_SECTOR_COUNT,\n        typ: PoStType::Winning,\n        priority: false,\n    };\n\n    let public_params = winning_post_public_params::<Tree>(&post_config).unwrap();\n\n    <FallbackPoStCompound<Tree> as CompoundProof<\n        FallbackPoSt<Tree>,\n        FallbackPoStCircuit<Tree>,\n    >>::blank_circuit(&public_params)\n}\n\nfn blank_window_post_poseidon_circuit<Tree: 'static + MerkleTreeTrait>(\n    sector_size: u64,\n) -> FallbackPoStCircuit<Tree> {\n    let post_config = PoStConfig {\n        sector_size: SectorSize(sector_size),\n        challenge_count: WINDOW_POST_CHALLENGE_COUNT,\n        sector_count: *WINDOW_POST_SECTOR_COUNT\n            .read()\n            .unwrap()\n            .get(&sector_size)\n            .unwrap(),\n        typ: PoStType::Window,\n        priority: false,\n    };\n\n    let public_params = window_post_public_params::<Tree>(&post_config).unwrap();\n\n    <FallbackPoStCompound<Tree> as CompoundProof<\n        FallbackPoSt<Tree>,\n        FallbackPoStCircuit<Tree>,\n    >>::blank_circuit(&public_params)\n}\n/*\nfn blank_fallback_post_sha_pedersen_circuit(sector_size: u64) -> ... {}\n*/\n\n/// Creates the first phase2 parameters for a circuit and writes them to a file.\nfn create_initial_params<Tree: 'static + MerkleTreeTrait>(\n    proof: Proof,\n    hasher: Hasher,\n    sector_size: u64,\n) {\n    let start_total = Instant::now();\n\n    info!(\n        \"creating initial params for circuit: {} {} {} {}\",\n        proof,\n        hasher,\n        display_sector_size(sector_size),\n        get_head_commit()\n    );\n\n    let params_path = initial_params_filename(proof, hasher, sector_size);\n    let params_file = File::create(&params_path).unwrap();\n    let mut params_writer = BufWriter::with_capacity(1024 * 1024, params_file);\n\n    let dt_create_circuit: u64;\n    let dt_create_params: u64;\n\n    let params = match (proof, hasher) {\n        (Proof::Porep, Hasher::Poseidon) => {\n            let start = Instant::now();\n            let circuit = blank_porep_poseidon_circuit::<Tree>(sector_size);\n            dt_create_circuit = start.elapsed().as_secs();\n            let start = Instant::now();\n            let params = phase2::MPCParameters::new(circuit).unwrap();\n            dt_create_params = start.elapsed().as_secs();\n            params\n        }\n        /*\n        (Proof::Porep, Hasher::ShaPedersen) => {\n            let start = Instant::now();\n            let circuit = blank_porep_sha_pedersen_circuit(sector_size);\n            dt_create_circuit = start.elapsed().as_secs();\n            let start = Instant::now();\n            let params = phase2::MPCParameters::new(circuit).unwrap();\n            dt_create_params = start.elapsed().as_secs();\n            params\n        }\n        */\n        (Proof::WinningPost, Hasher::Poseidon) => {\n            let start = Instant::now();\n            let circuit = blank_winning_post_poseidon_circuit::<Tree>(sector_size);\n            dt_create_circuit = start.elapsed().as_secs();\n            let start = Instant::now();\n            let params = phase2::MPCParameters::new(circuit).unwrap();\n            dt_create_params = start.elapsed().as_secs();\n            params\n        }\n        (Proof::WindowPost, Hasher::Poseidon) => {\n            let start = Instant::now();\n            let circuit = blank_window_post_poseidon_circuit::<Tree>(sector_size);\n            dt_create_circuit = start.elapsed().as_secs();\n            let start = Instant::now();\n            let params = phase2::MPCParameters::new(circuit).unwrap();\n            dt_create_params = start.elapsed().as_secs();\n            params\n        } /*(Proof::FallbackPost, Hasher::ShaPedersen) => { ... }\n           */\n    };\n\n    info!(\n        \"successfully created initial params for circuit, dt_create_circuit={}s, dt_create_params={}s\",\n        dt_create_circuit,\n        dt_create_params\n    );\n\n    info!(\"writing initial params to file: {}\", params_path);\n    params.write(&mut params_writer).unwrap();\n\n    info!(\n        \"successfully created initial params for circuit: {} {} {} {}, dt_total={}s\",\n        proof,\n        hasher,\n        display_sector_size(sector_size),\n        get_head_commit(),\n        start_total.elapsed().as_secs()\n    );\n}\n\n/// Prompt the user to mash on their keyboard to gather entropy.\nfn prompt_for_randomness() -> [u8; 32] {\n    use dialoguer::{theme::ColorfulTheme, Password};\n\n    let raw = Password::with_theme(&ColorfulTheme::default())\n        .with_prompt(\n            \"Please randomly press your keyboard for entropy (press Return/Enter when finished)\",\n        )\n        .interact()\n        .unwrap();\n\n    let hashed = blake2b_simd::blake2b(raw.as_bytes());\n\n    let mut seed = [0u8; 32];\n    seed.copy_from_slice(&hashed.as_ref()[..32]);\n    seed\n}\n\n/// Contributes entropy to the current phase2 parameters for a circuit, then writes the updated\n/// parameters to a new file.\nfn contribute_to_params(path_before: &str) {\n    let start_total = Instant::now();\n\n    let (proof, hasher, sector_size, head, param_number_before) =\n        parse_params_filename(path_before);\n\n    info!(\n        \"contributing randomness to params for circuit: {} {} {} {}\",\n        proof,\n        hasher,\n        display_sector_size(sector_size),\n        head\n    );\n\n    assert_eq!(\n        head,\n        get_head_commit(),\n        \"cannot contribute to parameters using a different circuit version\",\n    );\n\n    let seed = prompt_for_randomness();\n    let mut rng = rand_chacha::ChaChaRng::from_seed(seed);\n\n    info!(\"reading 'before' params from disk: {}\", path_before);\n    let file_before = File::open(path_before).unwrap();\n    let mut params_reader = BufReader::with_capacity(1024 * 1024, file_before);\n    let start = Instant::now();\n    let mut params = phase2::MPCParameters::read(&mut params_reader, true).unwrap();\n    info!(\n        \"successfully read 'before' params from disk, dt_read={}s\",\n        start.elapsed().as_secs()\n    );\n\n    info!(\"making contribution\");\n    let start = Instant::now();\n    let contribution = params.contribute(&mut rng);\n    info!(\n        \"successfully made contribution, contribution hash: {}, dt_contribute={}s\",\n        hex::encode(&contribution[..]),\n        start.elapsed().as_secs()\n    );\n\n    let path_after = params_filename(proof, hasher, sector_size, &head, param_number_before + 1);\n    info!(\"writing 'after' params to file: {}\", path_after);\n    let file_after = File::create(path_after).unwrap();\n    let mut params_writer = BufWriter::with_capacity(1024 * 1024, file_after);\n    params.write(&mut params_writer).unwrap();\n    info!(\n        \"successfully made contribution, dt_total={}s\",\n        start_total.elapsed().as_secs()\n    );\n}\n\n/// Verifies a sequence of parameter transitions against a sequence of corresponding contribution\n/// hashes. For example, verifies that the first digest in `contribution_hashes` transitions the\n/// first parameters file in `param_paths` to the second file, then verifies that the second\n/// contribution hash transitions the second parameters file to the third file.\nfn verify_param_transitions(param_paths: &[&str], contribution_hashes: &[[u8; 64]]) {\n    let start_total = Instant::now();\n\n    assert_eq!(\n        param_paths.len() - 1,\n        contribution_hashes.len(),\n        \"the number of contributions must be one less than the number of parameter files\"\n    );\n\n    let mut next_params_before: Option<phase2::MPCParameters> = None;\n\n    for (param_pair, provided_contribution_hash) in\n        param_paths.windows(2).zip(contribution_hashes.iter())\n    {\n        let path_before = param_pair[0];\n        let path_after = param_pair[1];\n\n        info!(\n            \"verifying transition:\\n\\tparams: {} -> {}\\n\\tcontribution: {}\",\n            path_before,\n            path_after,\n            hex::encode(&provided_contribution_hash[..])\n        );\n\n        // If we are verifying the first contribution read both `path_before` and `path_after`\n        // files. For every subsequent verification, move the previous loop's \"after\" params to this\n        // loop's \"before\" params then read this loop's \"after\" params file. This will minimize the\n        // number of expensive parameter file reads.\n        let params_before = match next_params_before.take() {\n            Some(params_before) => params_before,\n            None => {\n                info!(\"reading 'before' params from disk: {}\", path_before);\n                let file = File::open(path_before).unwrap();\n                let mut reader = BufReader::with_capacity(1024 * 1024, file);\n                let start = Instant::now();\n                let params_before = phase2::MPCParameters::read(&mut reader, true).unwrap();\n                info!(\n                    \"successfully read 'before' params from disk, dt_read={}s\",\n                    start.elapsed().as_secs()\n                );\n                params_before\n            }\n        };\n\n        let params_after = {\n            info!(\"reading 'after' params from disk: {}\", path_after);\n            let file = File::open(path_after).unwrap();\n            let mut reader = BufReader::with_capacity(1024 * 1024, file);\n            let start = Instant::now();\n            let params_after = phase2::MPCParameters::read(&mut reader, true).unwrap();\n            info!(\n                \"successfully read 'after' params from disk, dt_read={}s\",\n                start.elapsed().as_secs()\n            );\n            params_after\n        };\n\n        info!(\"verifying param transition\");\n        let start_verification = Instant::now();\n\n        let calculated_contribution_hash =\n            phase2::verify_contribution(&params_before, &params_after).expect(\"invalid transition\");\n\n        assert_eq!(\n            &provided_contribution_hash[..],\n            &calculated_contribution_hash[..],\n            \"provided contribution hash ({}) does not match calculated contribution hash ({})\",\n            hex::encode(&provided_contribution_hash[..]),\n            hex::encode(&calculated_contribution_hash[..]),\n        );\n\n        info!(\n            \"successfully verified param transition, dt_verify={}s\",\n            start_verification.elapsed().as_secs()\n        );\n\n        next_params_before = Some(params_after);\n    }\n\n    info!(\n        \"successfully verified all param transitions, dt_total={}s\",\n        start_total.elapsed().as_secs()\n    );\n}\n\nfn verify_param_transistions_daemon(proof: Proof, hasher: Hasher, sector_size: u64) {\n    const SLEEP_SECS: u64 = 10;\n\n    let head = get_head_commit();\n\n    info!(\n        \"starting the verification daemon for the circuit: {} {} {} {}\",\n        proof,\n        hasher,\n        display_sector_size(sector_size),\n        head\n    );\n\n    let mut next_before_params: Option<MPCParameters> = None;\n    let mut next_before_filename: Option<String> = None;\n    let mut param_number: usize = 0;\n\n    loop {\n        let (before_params, before_filename) = if next_before_params.is_some() {\n            let before_params = next_before_params.take().unwrap();\n            let before_filename = next_before_filename.take().unwrap();\n            (before_params, before_filename)\n        } else {\n            let before_filename = params_filename(proof, hasher, sector_size, &head, param_number);\n            let before_path = Path::new(&before_filename);\n            if !before_path.exists() {\n                info!(\"waiting for params file: {}\", before_filename);\n                while !before_path.exists() {\n                    sleep(Duration::from_secs(SLEEP_SECS));\n                }\n            }\n            info!(\"found file: {}\", before_filename);\n            info!(\"reading params file: {}\", before_filename);\n            let file = File::open(&before_path).unwrap();\n            let mut reader = BufReader::with_capacity(1024 * 1024, file);\n            let read_start = Instant::now();\n            let before_params = MPCParameters::read(&mut reader, true).unwrap();\n            info!(\n                \"successfully read params, dt_read={}s\",\n                read_start.elapsed().as_secs()\n            );\n            param_number += 1;\n            (before_params, before_filename)\n        };\n\n        let after_filename = params_filename(proof, hasher, sector_size, &head, param_number);\n        let after_path = Path::new(&after_filename);\n\n        if !after_path.exists() {\n            info!(\"waiting for params file: {}\", after_filename);\n            while !after_path.exists() {\n                sleep(Duration::from_secs(SLEEP_SECS));\n            }\n        }\n        info!(\"found file: {}\", after_filename);\n\n        let after_params = {\n            info!(\"reading params file: {}\", after_filename);\n            let file = File::open(&after_path).unwrap();\n            let mut reader = BufReader::with_capacity(1024 * 1024, file);\n            let read_start = Instant::now();\n            let params = MPCParameters::read(&mut reader, true).unwrap();\n            info!(\n                \"successfully read params, dt_read={}s\",\n                read_start.elapsed().as_secs()\n            );\n            param_number += 1;\n            params\n        };\n\n        let contribution_hash_filename = format!(\"{}_contribution\", after_filename);\n        let contribution_hash_path = Path::new(&contribution_hash_filename);\n\n        if !contribution_hash_path.exists() {\n            info!(\n                \"waiting for contribution hash file: {}\",\n                contribution_hash_filename\n            );\n            while !contribution_hash_path.exists() {\n                sleep(Duration::from_secs(SLEEP_SECS));\n            }\n        }\n        info!(\"found file: {}\", contribution_hash_filename);\n\n        let hex_str = fs::read_to_string(&contribution_hash_path)\n            .expect(\"failed to read contribution hash file\")\n            .trim()\n            .to_string();\n\n        let provided_contribution_hash = {\n            let mut arr = [0u8; 64];\n            let vec = hex::decode(&hex_str).unwrap_or_else(|_| {\n                panic!(\"contribution hash is not a valid hex string: {}\", hex_str)\n            });\n            info!(\"parsed contribution hash\");\n            arr.copy_from_slice(&vec[..]);\n            arr\n        };\n\n        info!(\n            \"verifying param transition:\\n\\t{} -> {}\\n\\t{}\",\n            before_filename, after_filename, hex_str\n        );\n\n        let start_verification = Instant::now();\n\n        let calculated_contribution_hash =\n            verify_contribution(&before_params, &after_params).expect(\"invalid transition\");\n\n        assert_eq!(\n            &provided_contribution_hash[..],\n            &calculated_contribution_hash[..],\n            \"provided contribution hash ({}) does not match calculated contribution hash ({})\",\n            hex_str,\n            hex::encode(&calculated_contribution_hash[..]),\n        );\n\n        info!(\n            \"successfully verified param transition, dt_verify={}s\\n\",\n            start_verification.elapsed().as_secs()\n        );\n\n        next_before_params = Some(after_params);\n        next_before_filename = Some(after_filename);\n    }\n}\n\n/// Creates the logger for the \"new\" CLI subcommand. Writes info logs to stdout, error logs to\n/// stderr, and all logs to the file: `<proof>_<hasher>_<sector-size>_<head>_0.log`.\nfn setup_new_logger(proof: Proof, hasher: Hasher, sector_size: u64) {\n    let log_filename = format!(\n        \"{}.log\",\n        initial_params_filename(proof, hasher, sector_size)\n    );\n\n    let log_file = File::create(&log_filename)\n        .unwrap_or_else(|_| panic!(\"failed to create log file: {}\", log_filename));\n\n    CombinedLogger::init(vec![\n        TermLogger::new(\n            LevelFilter::Info,\n            simplelog::Config::default(),\n            TerminalMode::Mixed,\n        )\n        .unwrap(),\n        WriteLogger::new(LevelFilter::Info, simplelog::Config::default(), log_file),\n    ])\n    .expect(\"failed to setup logger\");\n}\n\n/// Creates the logger for the \"contribute\" CLI subcommand. Writes info logs to stdout, error logs\n/// to stderr, and all logs to the file:\n/// `<proof>_<hasher>_<sector-size>_<head>_<param number containing contribution>.log`.\nfn setup_contribute_logger(path_before: &str) {\n    let (proof, hasher, sector_size, head, param_number_before) =\n        parse_params_filename(path_before);\n\n    let mut log_filename =\n        params_filename(proof, hasher, sector_size, &head, param_number_before + 1);\n\n    log_filename.push_str(\".log\");\n\n    let log_file = File::create(&log_filename)\n        .unwrap_or_else(|_| panic!(\"failed to create log file: {}\", log_filename));\n\n    CombinedLogger::init(vec![\n        TermLogger::new(\n            LevelFilter::Info,\n            simplelog::Config::default(),\n            TerminalMode::Mixed,\n        )\n        .unwrap(),\n        WriteLogger::new(LevelFilter::Info, simplelog::Config::default(), log_file),\n    ])\n    .expect(\"failed to setup logger\");\n}\n\n/// Creates the logger for the \"contribute\" CLI subcommand. Writes info logs to stdout, error logs\n/// to stderr, and all logs to the file:\n/// <proof>_<hasher>_<sector-size>_<head>_verify_<first param number>_<last param number>.log\nfn setup_verify_logger(param_paths: &[&str]) {\n    let (proof, hasher, sector_size, head, first_param_number) =\n        parse_params_filename(param_paths.first().unwrap());\n\n    let last_param_number = parse_params_filename(param_paths.last().unwrap()).4;\n\n    let mut log_filename = format!(\n        \"{}_{}_{}_{}_verify_{}_{}.log\",\n        proof,\n        hasher,\n        display_sector_size(sector_size),\n        head,\n        first_param_number,\n        last_param_number\n    );\n    log_filename.make_ascii_lowercase();\n    let log_filename = log_filename.replace(\"-\", \"_\");\n\n    let log_file = File::create(&log_filename)\n        .unwrap_or_else(|_| panic!(\"failed to create log file: {}\", log_filename));\n\n    CombinedLogger::init(vec![\n        TermLogger::new(\n            LevelFilter::Info,\n            simplelog::Config::default(),\n            TerminalMode::Mixed,\n        )\n        .unwrap(),\n        WriteLogger::new(LevelFilter::Info, simplelog::Config::default(), log_file),\n    ])\n    .expect(\"failed to setup logger\");\n}\n\n/// Setup the logger for the `verifyd` CLI subcommand. Writes info logs to stdout, error logs to\n/// stderr, and all logs to the file: <proof>_<hasher>_<sector-size>_<head>_verifyd.log\nfn setup_verifyd_logger(proof: Proof, hasher: Hasher, sector_size: u64) {\n    let mut log_filename = format!(\n        \"{}_{}_{}_{}_verifyd.log\",\n        proof,\n        hasher,\n        display_sector_size(sector_size),\n        &get_head_commit(),\n    );\n    log_filename.make_ascii_lowercase();\n    let log_filename = log_filename.replace(\"-\", \"_\");\n\n    let log_file = File::create(&log_filename)\n        .unwrap_or_else(|_| panic!(\"failed to create log file: {}\", log_filename));\n\n    CombinedLogger::init(vec![\n        TermLogger::new(\n            LevelFilter::Info,\n            simplelog::Config::default(),\n            TerminalMode::Mixed,\n        )\n        .unwrap(),\n        WriteLogger::new(LevelFilter::Info, simplelog::Config::default(), log_file),\n    ])\n    .expect(\"failed to setup logger\");\n}\n\n#[allow(clippy::cognitive_complexity)]\nfn main() {\n    let new_command = SubCommand::with_name(\"new\")\n        .about(\"Create parameters\")\n        .arg(\n            Arg::with_name(\"porep\")\n                .long(\"porep\")\n                .help(\"Generate PoRep parameters\"),\n        )\n        .arg(\n            Arg::with_name(\"winning-post\")\n                .long(\"winning-post\")\n                .help(\"Generate WinningPoSt parameters\"),\n        )\n        .arg(\n            Arg::with_name(\"window-post\")\n                .long(\"window-post\")\n                .help(\"Generate WindowPoSt parameters\"),\n        )\n        .group(\n            ArgGroup::with_name(\"proof\")\n                .args(&[\"porep\", \"winning-post\", \"window-post\"])\n                .required(true)\n                .multiple(false),\n        )\n        .arg(\n            Arg::with_name(\"poseidon\")\n                .long(\"poseidon\")\n                .help(\"Use the Poseidon hash function for column commitments and Merkle trees\"),\n        )\n        /*\n        .arg(\n            Arg::with_name(\"sha-pedersen\")\n                .long(\"sha-pedersen\")\n                .help(\"Use SHA256 for column commitments and Pedersen hash for Merkle trees\"),\n        )\n        */\n        .group(\n            ArgGroup::with_name(\"hasher\")\n                .args(&[\"poseidon\"])\n                .required(false), /*\n                                  .args(&[\"poseidon\", \"sha-pedersen\"])\n                                  .required(true)\n                                  .multiple(false),\n                                  */\n        )\n        .arg(\n            Arg::with_name(\"2kib\")\n                .long(\"2kib\")\n                .help(\"Use circuits with 2KiB sector sizes\"),\n        )\n        .arg(\n            Arg::with_name(\"8mib\")\n                .long(\"8mib\")\n                .help(\"Use circuits with 16MiB sector sizes\"),\n        )\n        .arg(\n            Arg::with_name(\"512mib\")\n                .long(\"512mib\")\n                .help(\"Use circuits with 256MiB sector sizes\"),\n        )\n        .arg(\n            Arg::with_name(\"32gib\")\n                .long(\"32gib\")\n                .help(\"Use circuits with 32GiB sector sizes\"),\n        )\n        .arg(\n            Arg::with_name(\"64gib\")\n                .long(\"64gib\")\n                .help(\"Use circuits with 64GiB sector sizes\"),\n        )\n        .group(\n            ArgGroup::with_name(\"sector-size\")\n                .args(&[\"2kib\", \"8mib\", \"512mib\", \"32gib\", \"64gib\"])\n                .required(true)\n                .multiple(false),\n        );\n\n    let contribute_command = SubCommand::with_name(\"contribute\")\n        .about(\"Contribute to parameters\")\n        .arg(\n            Arg::with_name(\"path-before\")\n                .index(1)\n                .required(true)\n                .help(\"The path to the parameters file to read and contribute to\"),\n        );\n\n    let verify_command = SubCommand::with_name(\"verify\")\n        .about(\"Verify that a set of contribution hashes correctly transition a set of params\")\n        .arg(\n            Arg::with_name(\"paths\")\n                .long(\"paths\")\n                .required(true)\n                .takes_value(true)\n                .value_delimiter(\",\")\n                .min_values(2)\n                .help(\"Comma separated list (no whitespace between items) of paths to parameter files\"),\n        )\n        .arg(\n            Arg::with_name(\"contributions\")\n                .long(\"contributions\")\n                .required(true)\n                .takes_value(true)\n                .case_insensitive(true)\n                .value_delimiter(\",\")\n                .min_values(1)\n                .help(\n                    \"An ordered (first to most recent) comma separated list of hex-encoded \\\n                    contribution hashes. There must be no whitespace in any of the digest strings \\\n                    or between any items in the list. Each digest must be 128 characters long \\\n                    (i.e. each digest hex string encodes 64 bytes), digest strings can use upper \\\n                    or lower case hex characters.\"\n                ),\n        );\n\n    let verifyd_command = SubCommand::with_name(\"verifyd\")\n        .about(\"Run the param verification daemon\")\n        .arg(\n            Arg::with_name(\"porep\")\n                .long(\"porep\")\n                .help(\"Generate PoRep parameters\"),\n        )\n        .arg(\n            Arg::with_name(\"winning-post\")\n                .long(\"winning-post\")\n                .help(\"Generate WinningPoSt parameters\"),\n        )\n        .arg(\n            Arg::with_name(\"window-post\")\n                .long(\"window-post\")\n                .help(\"Generate WindowPoSt parameters\"),\n        )\n        .group(\n            ArgGroup::with_name(\"proof\")\n                .args(&[\"porep\", \"winning-post\", \"window-post\"])\n                .required(true)\n                .multiple(false),\n        )\n        .arg(\n            Arg::with_name(\"poseidon\")\n                .long(\"poseidon\")\n                .help(\"Use the Poseidon hash function for column commitments and Merkle trees\"),\n        )\n        /*\n        .arg(\n            Arg::with_name(\"sha-pedersen\")\n                .long(\"sha-pedersen\")\n                .help(\"Use SHA256 for column commitments and Pedersen hash for Merkle trees\"),\n        )\n        */\n        .group(\n            ArgGroup::with_name(\"hasher\")\n                .args(&[\"poseidon\"])\n                .required(false), /*\n                                  .args(&[\"poseidon\", \"sha-pedersen\"])\n                                  .required(true)\n                                  .multiple(false),\n                                  */\n        )\n        .arg(\n            Arg::with_name(\"2kib\")\n                .long(\"2kib\")\n                .help(\"Use circuits with 2KiB sector sizes\"),\n        )\n        .arg(\n            Arg::with_name(\"8mib\")\n                .long(\"8mib\")\n                .help(\"Use circuits with 16MiB sector sizes\"),\n        )\n        .arg(\n            Arg::with_name(\"512mib\")\n                .long(\"512mib\")\n                .help(\"Use circuits with 256MiB sector sizes\"),\n        )\n        .arg(\n            Arg::with_name(\"32gib\")\n                .long(\"32gib\")\n                .help(\"Use circuits with 32GiB sector sizes\"),\n        )\n        .arg(\n            Arg::with_name(\"64gib\")\n                .long(\"64gib\")\n                .help(\"Use circuits with 64GiB sector sizes\"),\n        )\n        .group(\n            ArgGroup::with_name(\"sector-size\")\n                .args(&[\"2kib\", \"8mib\", \"512mib\", \"32gib\", \"64gib\"])\n                .required(true)\n                .multiple(false),\n        );\n\n    let matches = App::new(\"phase2\")\n        .version(\"1.0\")\n        .setting(AppSettings::ArgRequiredElseHelp)\n        .setting(AppSettings::SubcommandRequired)\n        .subcommand(new_command)\n        .subcommand(contribute_command)\n        .subcommand(verify_command)\n        .subcommand(verifyd_command)\n        .get_matches();\n\n    if let (subcommand, Some(matches)) = matches.subcommand() {\n        match subcommand {\n            \"new\" => {\n                let proof = if matches.is_present(\"porep\") {\n                    Proof::Porep\n                } else if matches.is_present(\"winning-post\") {\n                    Proof::WinningPost\n                } else {\n                    Proof::WindowPost\n                };\n\n                // Default to using Poseidon for the hasher.\n                let hasher = Hasher::Poseidon;\n                /*\n                let hasher = if matches.is_present(\"sha-pedersen\") {\n                    Hasher::ShaPedersen\n                } else {\n                    Hasher::Poseidon\n                };\n                */\n\n                let sector_size = if matches.is_present(\"2kib\") {\n                    SECTOR_SIZE_2_KIB\n                } else if matches.is_present(\"8mib\") {\n                    SECTOR_SIZE_8_MIB\n                } else if matches.is_present(\"512mib\") {\n                    SECTOR_SIZE_512_MIB\n                } else if matches.is_present(\"32gib\") {\n                    SECTOR_SIZE_32_GIB\n                } else {\n                    SECTOR_SIZE_64_GIB\n                };\n\n                setup_new_logger(proof, hasher, sector_size);\n                with_shape!(\n                    sector_size,\n                    create_initial_params,\n                    proof,\n                    hasher,\n                    sector_size\n                );\n            }\n            \"contribute\" => {\n                let path_before = matches.value_of(\"path-before\").unwrap();\n                setup_contribute_logger(path_before);\n                contribute_to_params(path_before);\n            }\n            \"verify\" => {\n                let param_paths: Vec<&str> = matches.values_of(\"paths\").unwrap().collect();\n\n                let contribution_hashes: Vec<[u8; 64]> = matches\n                    .values_of(\"contributions\")\n                    .unwrap()\n                    .map(|hex_str| {\n                        let mut digest_bytes_arr = [0u8; 64];\n                        let digest_bytes_vec = hex::decode(hex_str).unwrap_or_else(|_| {\n                            panic!(\"contribution hash is not a valid hex string: {}\", hex_str)\n                        });\n                        digest_bytes_arr.copy_from_slice(&digest_bytes_vec[..]);\n                        digest_bytes_arr\n                    })\n                    .collect();\n\n                setup_verify_logger(&param_paths);\n                verify_param_transitions(&param_paths, &contribution_hashes);\n            }\n            \"verifyd\" => {\n                let proof = if matches.is_present(\"porep\") {\n                    Proof::Porep\n                } else if matches.is_present(\"winning-post\") {\n                    Proof::WinningPost\n                } else {\n                    Proof::WindowPost\n                };\n\n                // Default to using Poseidon for the hasher.\n                let hasher = Hasher::Poseidon;\n                /*\n                let hasher = if matches.is_present(\"sha-pedersen\") {\n                    Hasher::ShaPedersen\n                } else {\n                    Hasher::Poseidon\n                };\n                */\n\n                let sector_size = if matches.is_present(\"2kib\") {\n                    SECTOR_SIZE_2_KIB\n                } else if matches.is_present(\"8mib\") {\n                    SECTOR_SIZE_8_MIB\n                } else if matches.is_present(\"512mib\") {\n                    SECTOR_SIZE_512_MIB\n                } else if matches.is_present(\"32gib\") {\n                    SECTOR_SIZE_32_GIB\n                } else {\n                    SECTOR_SIZE_64_GIB\n                };\n\n                setup_verifyd_logger(proof, hasher, sector_size);\n                verify_param_transistions_daemon(proof, hasher, sector_size);\n            }\n            _ => unreachable!(),\n        }\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/caches.rs",
    "content": "use std::collections::HashMap;\nuse std::sync::{Arc, Mutex};\n\nuse anyhow::Result;\nuse bellperson::{\n    bls::Bls12,\n    groth16::{self, prepare_verifying_key},\n};\nuse lazy_static::lazy_static;\nuse log::info;\nuse rand::rngs::OsRng;\nuse storage_proofs_core::{compound_proof::CompoundProof, merkle::MerkleTreeTrait};\nuse storage_proofs_porep::stacked::{StackedCompound, StackedDrg};\nuse storage_proofs_post::fallback::{FallbackPoSt, FallbackPoStCircuit, FallbackPoStCompound};\n\nuse crate::{\n    constants::DefaultPieceHasher,\n    parameters::{public_params, window_post_public_params, winning_post_public_params},\n    types::{PaddedBytesAmount, PoRepConfig, PoRepProofPartitions, PoStConfig, PoStType},\n};\n\ntype Bls12GrothParams = groth16::MappedParameters<Bls12>;\npub type Bls12PreparedVerifyingKey = groth16::PreparedVerifyingKey<Bls12>;\n\ntype Cache<G> = HashMap<String, Arc<G>>;\ntype GrothMemCache = Cache<Bls12GrothParams>;\ntype VerifyingKeyMemCache = Cache<Bls12PreparedVerifyingKey>;\n\nlazy_static! {\n    static ref GROTH_PARAM_MEMORY_CACHE: Mutex<GrothMemCache> = Default::default();\n    static ref VERIFYING_KEY_MEMORY_CACHE: Mutex<VerifyingKeyMemCache> = Default::default();\n}\n\npub fn cache_lookup<F, G>(\n    cache_ref: &Mutex<Cache<G>>,\n    identifier: String,\n    generator: F,\n) -> Result<Arc<G>>\nwhere\n    F: FnOnce() -> Result<G>,\n    G: Send + Sync,\n{\n    info!(\"trying parameters memory cache for: {}\", &identifier);\n    {\n        let cache = (*cache_ref).lock().expect(\"poisoned cache\");\n\n        if let Some(entry) = cache.get(&identifier) {\n            info!(\"found params in memory cache for {}\", &identifier);\n            return Ok(entry.clone());\n        }\n    }\n\n    info!(\"no params in memory cache for {}\", &identifier);\n\n    let new_entry = Arc::new(generator()?);\n    let res = new_entry.clone();\n    {\n        let cache = &mut (*cache_ref).lock().expect(\"poisoned cache\");\n        cache.insert(identifier, new_entry);\n    }\n\n    Ok(res)\n}\n\n#[inline]\npub fn lookup_groth_params<F>(identifier: String, generator: F) -> Result<Arc<Bls12GrothParams>>\nwhere\n    F: FnOnce() -> Result<Bls12GrothParams>,\n{\n    cache_lookup(&*GROTH_PARAM_MEMORY_CACHE, identifier, generator)\n}\n\n#[inline]\npub fn lookup_verifying_key<F>(\n    identifier: String,\n    generator: F,\n) -> Result<Arc<Bls12PreparedVerifyingKey>>\nwhere\n    F: FnOnce() -> Result<Bls12PreparedVerifyingKey>,\n{\n    let vk_identifier = format!(\"{}-verifying-key\", &identifier);\n    cache_lookup(&*VERIFYING_KEY_MEMORY_CACHE, vk_identifier, generator)\n}\n\npub fn get_stacked_params<Tree: 'static + MerkleTreeTrait>(\n    porep_config: PoRepConfig,\n) -> Result<Arc<Bls12GrothParams>> {\n    let public_params = public_params::<Tree>(\n        PaddedBytesAmount::from(porep_config),\n        usize::from(PoRepProofPartitions::from(porep_config)),\n        porep_config.porep_id,\n        porep_config.api_version,\n    )?;\n\n    let parameters_generator = || {\n        <StackedCompound<Tree, DefaultPieceHasher> as CompoundProof<\n            StackedDrg<'_, Tree, DefaultPieceHasher>,\n            _,\n        >>::groth_params::<OsRng>(None, &public_params)\n        .map_err(Into::into)\n    };\n\n    lookup_groth_params(\n        format!(\n            \"STACKED[{}]\",\n            usize::from(PaddedBytesAmount::from(porep_config))\n        ),\n        parameters_generator,\n    )\n}\n\npub fn get_post_params<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n) -> Result<Arc<Bls12GrothParams>> {\n    match post_config.typ {\n        PoStType::Winning => {\n            let post_public_params = winning_post_public_params::<Tree>(post_config)?;\n\n            let parameters_generator = || {\n                <FallbackPoStCompound<Tree> as CompoundProof<\n                    FallbackPoSt<'_, Tree>,\n                    FallbackPoStCircuit<Tree>,\n                >>::groth_params::<OsRng>(None, &post_public_params)\n                .map_err(Into::into)\n            };\n\n            Ok(lookup_groth_params(\n                format!(\n                    \"WINNING_POST[{}]\",\n                    usize::from(post_config.padded_sector_size())\n                ),\n                parameters_generator,\n            )?)\n        }\n        PoStType::Window => {\n            let post_public_params = window_post_public_params::<Tree>(post_config)?;\n\n            let parameters_generator = || {\n                <FallbackPoStCompound<Tree> as CompoundProof<\n                    FallbackPoSt<'_, Tree>,\n                    FallbackPoStCircuit<Tree>,\n                >>::groth_params::<OsRng>(None, &post_public_params)\n                .map_err(Into::into)\n            };\n\n            Ok(lookup_groth_params(\n                format!(\n                    \"Window_POST[{}]\",\n                    usize::from(post_config.padded_sector_size())\n                ),\n                parameters_generator,\n            )?)\n        }\n    }\n}\n\npub fn get_stacked_verifying_key<Tree: 'static + MerkleTreeTrait>(\n    porep_config: PoRepConfig,\n) -> Result<Arc<Bls12PreparedVerifyingKey>> {\n    let public_params = public_params(\n        PaddedBytesAmount::from(porep_config),\n        usize::from(PoRepProofPartitions::from(porep_config)),\n        porep_config.porep_id,\n        porep_config.api_version,\n    )?;\n\n    let vk_generator = || {\n        let vk = <StackedCompound<Tree, DefaultPieceHasher> as CompoundProof<\n            StackedDrg<'_, Tree, DefaultPieceHasher>,\n            _,\n        >>::verifying_key::<OsRng>(None, &public_params)?;\n        Ok(prepare_verifying_key(&vk))\n    };\n\n    lookup_verifying_key(\n        format!(\n            \"STACKED[{}]\",\n            usize::from(PaddedBytesAmount::from(porep_config))\n        ),\n        vk_generator,\n    )\n}\n\npub fn get_post_verifying_key<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n) -> Result<Arc<Bls12PreparedVerifyingKey>> {\n    match post_config.typ {\n        PoStType::Winning => {\n            let post_public_params = winning_post_public_params::<Tree>(post_config)?;\n\n            let vk_generator = || {\n                let vk = <FallbackPoStCompound<Tree> as CompoundProof<\n                    FallbackPoSt<'_, Tree>,\n                    FallbackPoStCircuit<Tree>,\n                >>::verifying_key::<OsRng>(None, &post_public_params)?;\n                Ok(prepare_verifying_key(&vk))\n            };\n\n            Ok(lookup_verifying_key(\n                format!(\n                    \"WINNING_POST[{}]\",\n                    usize::from(post_config.padded_sector_size())\n                ),\n                vk_generator,\n            )?)\n        }\n        PoStType::Window => {\n            let post_public_params = window_post_public_params::<Tree>(post_config)?;\n\n            let vk_generator = || {\n                let vk = <FallbackPoStCompound<Tree> as CompoundProof<\n                    FallbackPoSt<'_, Tree>,\n                    FallbackPoStCircuit<Tree>,\n                >>::verifying_key::<OsRng>(None, &post_public_params)?;\n                Ok(prepare_verifying_key(&vk))\n            };\n\n            Ok(lookup_verifying_key(\n                format!(\n                    \"WINDOW_POST[{}]\",\n                    usize::from(post_config.padded_sector_size())\n                ),\n                vk_generator,\n            )?)\n        }\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/commitment_reader.rs",
    "content": "use std::cmp::min;\nuse std::io::{self, Read};\n\nuse anyhow::{ensure, Result};\nuse filecoin_hashers::{HashFunction, Hasher};\nuse rayon::prelude::{ParallelIterator, ParallelSlice};\n\nuse crate::{constants::DefaultPieceHasher, pieces::piece_hash};\n\n/// Calculates comm-d of the data piped through to it.\n/// Data must be bit padded and power of 2 bytes.\npub struct CommitmentReader<R> {\n    source: R,\n    buffer: [u8; 64],\n    buffer_pos: usize,\n    current_tree: Vec<<DefaultPieceHasher as Hasher>::Domain>,\n}\n\nimpl<R: Read> CommitmentReader<R> {\n    pub fn new(source: R) -> Self {\n        CommitmentReader {\n            source,\n            buffer: [0u8; 64],\n            buffer_pos: 0,\n            current_tree: Vec::new(),\n        }\n    }\n\n    /// Attempt to generate the next hash, but only if the buffers are full.\n    fn try_hash(&mut self) {\n        if self.buffer_pos < 63 {\n            return;\n        }\n\n        // WARNING: keep in sync with DefaultPieceHasher and its .node impl\n        let hash = <DefaultPieceHasher as Hasher>::Function::hash(&self.buffer);\n        self.current_tree.push(hash);\n        self.buffer_pos = 0;\n\n        // TODO: reduce hashes when possible, instead of keeping them around.\n    }\n\n    pub fn finish(self) -> Result<<DefaultPieceHasher as Hasher>::Domain> {\n        ensure!(self.buffer_pos == 0, \"not enough inputs provided\");\n\n        let CommitmentReader { current_tree, .. } = self;\n\n        let mut current_row = current_tree;\n\n        while current_row.len() > 1 {\n            let next_row = current_row\n                .par_chunks(2)\n                .map(|chunk| piece_hash(chunk[0].as_ref(), chunk[1].as_ref()))\n                .collect::<Vec<_>>();\n\n            current_row = next_row;\n        }\n        debug_assert_eq!(current_row.len(), 1);\n\n        Ok(current_row\n            .into_iter()\n            .next()\n            .expect(\"should have been caught by debug build: len==1\"))\n    }\n}\n\nimpl<R: Read> Read for CommitmentReader<R> {\n    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {\n        let start = self.buffer_pos;\n        let left = 64 - self.buffer_pos;\n        let end = start + min(left, buf.len());\n\n        // fill the buffer as much as possible\n        let r = self.source.read(&mut self.buffer[start..end])?;\n\n        // write the data, we read\n        buf[..r].copy_from_slice(&self.buffer[start..start + r]);\n\n        self.buffer_pos += r;\n\n        // try to hash\n        self.try_hash();\n\n        Ok(r)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use std::io::Cursor;\n\n    use fr32::Fr32Reader;\n    use storage_proofs_core::pieces::generate_piece_commitment_bytes_from_source;\n\n    use crate::types::{PaddedBytesAmount, UnpaddedBytesAmount};\n\n    #[test]\n    fn test_commitment_reader() {\n        let piece_size = 127 * 8;\n        let source = vec![255u8; piece_size];\n        let mut fr32_reader = Fr32Reader::new(Cursor::new(&source));\n\n        let commitment1 = generate_piece_commitment_bytes_from_source::<DefaultPieceHasher>(\n            &mut fr32_reader,\n            PaddedBytesAmount::from(UnpaddedBytesAmount(piece_size as u64)).into(),\n        )\n        .expect(\"failed to generate piece commitment bytes from source\");\n\n        let fr32_reader = Fr32Reader::new(Cursor::new(&source));\n        let mut commitment_reader = CommitmentReader::new(fr32_reader);\n        io::copy(&mut commitment_reader, &mut io::sink()).expect(\"io copy failed\");\n\n        let commitment2 = commitment_reader.finish().expect(\"failed to finish\");\n\n        assert_eq!(&commitment1[..], AsRef::<[u8]>::as_ref(&commitment2));\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/constants.rs",
    "content": "use std::collections::HashMap;\nuse std::sync::RwLock;\n\npub use storage_proofs_core::drgraph::BASE_DEGREE as DRG_DEGREE;\npub use storage_proofs_porep::stacked::EXP_DEGREE;\n\nuse filecoin_hashers::{poseidon::PoseidonHasher, sha256::Sha256Hasher, Hasher};\nuse lazy_static::lazy_static;\nuse storage_proofs_core::{\n    merkle::{BinaryMerkleTree, LCTree, OctLCMerkleTree, OctMerkleTree},\n    util::NODE_SIZE,\n    MAX_LEGACY_POREP_REGISTERED_PROOF_ID,\n};\nuse typenum::{U0, U2, U8};\n\nuse crate::types::UnpaddedBytesAmount;\n\npub const SECTOR_SIZE_2_KIB: u64 = 1 << 11;\npub const SECTOR_SIZE_4_KIB: u64 = 1 << 12;\npub const SECTOR_SIZE_16_KIB: u64 = 1 << 14;\npub const SECTOR_SIZE_32_KIB: u64 = 1 << 15;\npub const SECTOR_SIZE_8_MIB: u64 = 1 << 23;\npub const SECTOR_SIZE_16_MIB: u64 = 1 << 24;\npub const SECTOR_SIZE_512_MIB: u64 = 1 << 29;\npub const SECTOR_SIZE_1_GIB: u64 = 1 << 30;\npub const SECTOR_SIZE_32_GIB: u64 = 1 << 35;\npub const SECTOR_SIZE_64_GIB: u64 = 1 << 36;\n\npub const WINNING_POST_CHALLENGE_COUNT: usize = 66;\npub const WINNING_POST_SECTOR_COUNT: usize = 1;\n\npub const WINDOW_POST_CHALLENGE_COUNT: usize = 10;\n\npub const MAX_LEGACY_REGISTERED_SEAL_PROOF_ID: u64 = MAX_LEGACY_POREP_REGISTERED_PROOF_ID;\n\n/// Sector sizes for which parameters have been published.\npub const PUBLISHED_SECTOR_SIZES: [u64; 10] = [\n    SECTOR_SIZE_2_KIB,\n    SECTOR_SIZE_4_KIB,\n    SECTOR_SIZE_16_KIB,\n    SECTOR_SIZE_32_KIB,\n    SECTOR_SIZE_8_MIB,\n    SECTOR_SIZE_16_MIB,\n    SECTOR_SIZE_512_MIB,\n    SECTOR_SIZE_1_GIB,\n    SECTOR_SIZE_32_GIB,\n    SECTOR_SIZE_64_GIB,\n];\n\nlazy_static! {\n    pub static ref POREP_MINIMUM_CHALLENGES: RwLock<HashMap<u64, u64>> = RwLock::new(\n        [\n            (SECTOR_SIZE_2_KIB, 2),\n            (SECTOR_SIZE_4_KIB, 2),\n            (SECTOR_SIZE_16_KIB, 2),\n            (SECTOR_SIZE_32_KIB, 2),\n            (SECTOR_SIZE_8_MIB, 2),\n            (SECTOR_SIZE_16_MIB, 2),\n            (SECTOR_SIZE_512_MIB, 2),\n            (SECTOR_SIZE_1_GIB, 2),\n            (SECTOR_SIZE_32_GIB, 176),\n            (SECTOR_SIZE_64_GIB, 176),\n        ]\n        .iter()\n        .copied()\n        .collect()\n    );\n    pub static ref POREP_PARTITIONS: RwLock<HashMap<u64, u8>> = RwLock::new(\n        [\n            (SECTOR_SIZE_2_KIB, 1),\n            (SECTOR_SIZE_4_KIB, 1),\n            (SECTOR_SIZE_16_KIB, 1),\n            (SECTOR_SIZE_32_KIB, 1),\n            (SECTOR_SIZE_8_MIB, 1),\n            (SECTOR_SIZE_16_MIB, 1),\n            (SECTOR_SIZE_512_MIB, 1),\n            (SECTOR_SIZE_1_GIB, 1),\n            (SECTOR_SIZE_32_GIB, 10),\n            (SECTOR_SIZE_64_GIB, 10),\n        ]\n        .iter()\n        .copied()\n        .collect()\n    );\n    pub static ref LAYERS: RwLock<HashMap<u64, usize>> = RwLock::new(\n        [\n            (SECTOR_SIZE_2_KIB, 2),\n            (SECTOR_SIZE_4_KIB, 2),\n            (SECTOR_SIZE_16_KIB, 2),\n            (SECTOR_SIZE_32_KIB, 2),\n            (SECTOR_SIZE_8_MIB, 2),\n            (SECTOR_SIZE_16_MIB, 2),\n            (SECTOR_SIZE_512_MIB, 2),\n            (SECTOR_SIZE_1_GIB, 2),\n            (SECTOR_SIZE_32_GIB, 11),\n            (SECTOR_SIZE_64_GIB, 11),\n        ]\n        .iter()\n        .copied()\n        .collect()\n    );\n    // These numbers must match those used for Window PoSt scheduling in the miner actor.\n    // Please coordinate changes with actor code.\n    // https://github.com/filecoin-project/specs-actors/blob/master/actors/abi/sector.go\n    pub static ref WINDOW_POST_SECTOR_COUNT: RwLock<HashMap<u64, usize>> = RwLock::new(\n        [\n            (SECTOR_SIZE_2_KIB, 2),\n            (SECTOR_SIZE_4_KIB, 2),\n            (SECTOR_SIZE_16_KIB, 2),\n            (SECTOR_SIZE_32_KIB, 2),\n            (SECTOR_SIZE_8_MIB, 2),\n            (SECTOR_SIZE_16_MIB, 2),\n            (SECTOR_SIZE_512_MIB, 2),\n            (SECTOR_SIZE_1_GIB, 2),\n            (SECTOR_SIZE_32_GIB, 2349), // this gives 125,279,217 constraints, fitting in a single partition\n            (SECTOR_SIZE_64_GIB, 2300), // this gives 129,887,900 constraints, fitting in a single partition\n        ]\n        .iter()\n        .copied()\n        .collect()\n    );\n}\n\n/// The size of a single snark proof.\npub const SINGLE_PARTITION_PROOF_LEN: usize = 192;\n\npub const MINIMUM_RESERVED_LEAVES_FOR_PIECE_IN_SECTOR: u64 = 4;\n\n// Bit padding causes bytes to only be aligned at every 127 bytes (for 31.75 bytes).\npub const MINIMUM_RESERVED_BYTES_FOR_PIECE_IN_FULLY_ALIGNED_SECTOR: u64 =\n    (MINIMUM_RESERVED_LEAVES_FOR_PIECE_IN_SECTOR * NODE_SIZE as u64) - 1;\n\n/// The minimum size a single piece must have before padding.\npub const MIN_PIECE_SIZE: UnpaddedBytesAmount = UnpaddedBytesAmount(127);\n\n/// The hasher used for creating comm_d.\npub type DefaultPieceHasher = Sha256Hasher;\npub type DefaultPieceDomain = <DefaultPieceHasher as Hasher>::Domain;\n\n/// The default hasher for merkle trees currently in use.\npub type DefaultTreeHasher = PoseidonHasher;\npub type DefaultTreeDomain = <DefaultTreeHasher as Hasher>::Domain;\n\npub type DefaultBinaryTree = BinaryMerkleTree<DefaultTreeHasher>;\npub type DefaultOctTree = OctMerkleTree<DefaultTreeHasher>;\npub type DefaultOctLCTree = OctLCMerkleTree<DefaultTreeHasher>;\n\n// Generic shapes\npub type SectorShapeBase = LCTree<DefaultTreeHasher, U8, U0, U0>;\npub type SectorShapeSub2 = LCTree<DefaultTreeHasher, U8, U2, U0>;\npub type SectorShapeSub8 = LCTree<DefaultTreeHasher, U8, U8, U0>;\npub type SectorShapeTop2 = LCTree<DefaultTreeHasher, U8, U8, U2>;\n\n// Specific size constants by shape\npub type SectorShape2KiB = SectorShapeBase;\npub type SectorShape8MiB = SectorShapeBase;\npub type SectorShape512MiB = SectorShapeBase;\n\npub type SectorShape4KiB = SectorShapeSub2;\npub type SectorShape16MiB = SectorShapeSub2;\npub type SectorShape1GiB = SectorShapeSub2;\n\npub type SectorShape16KiB = SectorShapeSub8;\npub type SectorShape32GiB = SectorShapeSub8;\n\npub type SectorShape32KiB = SectorShapeTop2;\npub type SectorShape64GiB = SectorShapeTop2;\n\npub fn is_sector_shape_base(sector_size: u64) -> bool {\n    matches!(\n        sector_size,\n        SECTOR_SIZE_2_KIB | SECTOR_SIZE_8_MIB | SECTOR_SIZE_512_MIB\n    )\n}\n\npub fn is_sector_shape_sub2(sector_size: u64) -> bool {\n    matches!(\n        sector_size,\n        SECTOR_SIZE_4_KIB | SECTOR_SIZE_16_MIB | SECTOR_SIZE_1_GIB\n    )\n}\n\npub fn is_sector_shape_sub8(sector_size: u64) -> bool {\n    matches!(sector_size, SECTOR_SIZE_16_KIB | SECTOR_SIZE_32_GIB)\n}\n\npub fn is_sector_shape_top2(sector_size: u64) -> bool {\n    matches!(sector_size, SECTOR_SIZE_32_KIB | SECTOR_SIZE_64_GIB)\n}\n\n/// Calls a function with the type hint of the sector shape matching the provided sector.\n/// Panics if provided with an unknown sector size.\n#[macro_export]\nmacro_rules! with_shape {\n    ($size:expr, $f:ident) => {\n        with_shape!($size, $f,)\n    };\n    ($size:expr, $f:ident, $($args:expr,)*) => {\n        match $size {\n            _x if $size == $crate::constants::SECTOR_SIZE_2_KIB => {\n              $f::<$crate::constants::SectorShape2KiB>($($args),*)\n            },\n            _x if $size == $crate::constants::SECTOR_SIZE_4_KIB => {\n              $f::<$crate::constants::SectorShape4KiB>($($args),*)\n            },\n            _x if $size == $crate::constants::SECTOR_SIZE_16_KIB => {\n              $f::<$crate::constants::SectorShape16KiB>($($args),*)\n            },\n            _x if $size == $crate::constants::SECTOR_SIZE_32_KIB => {\n              $f::<$crate::constants::SectorShape32KiB>($($args),*)\n            },\n            _xx if $size == $crate::constants::SECTOR_SIZE_8_MIB => {\n              $f::<$crate::constants::SectorShape8MiB>($($args),*)\n            },\n            _xx if $size == $crate::constants::SECTOR_SIZE_16_MIB => {\n              $f::<$crate::constants::SectorShape16MiB>($($args),*)\n            },\n            _x if $size == $crate::constants::SECTOR_SIZE_512_MIB => {\n              $f::<$crate::constants::SectorShape512MiB>($($args),*)\n            },\n            _x if $size == $crate::constants::SECTOR_SIZE_1_GIB => {\n              $f::<$crate::constants::SectorShape1GiB>($($args),*)\n            },\n            _x if $size == $crate::constants::SECTOR_SIZE_32_GIB => {\n              $f::<$crate::constants::SectorShape32GiB>($($args),*)\n            },\n            _x if $size == $crate::constants::SECTOR_SIZE_64_GIB => {\n              $f::<$crate::constants::SectorShape64GiB>($($args),*)\n            },\n            _ => panic!(\"unsupported sector size: {}\", $size),\n        }\n    };\n    ($size:expr, $f:ident, $($args:expr),*) => {\n        with_shape!($size, $f, $($args,)*)\n    };\n}\n\npub const TEST_SEED: [u8; 16] = [\n    0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, 0xe5,\n];\n"
  },
  {
    "path": "filecoin-proofs/src/error.rs",
    "content": "use crate::FCP_LOG;\nuse failure::{Backtrace, Error};\nuse slog::*;\n\npub type Result<T> = ::std::result::Result<T, Error>;\n\npub trait ExpectWithBacktrace<T> {\n    fn expects(self, msg: &str) -> T;\n}\n\nimpl<T, E: std::fmt::Debug> ExpectWithBacktrace<T> for ::std::result::Result<T, E> {\n    fn expects(self, msg: &str) -> T {\n        if let Err(ref err) = self {\n            let err = format!(\"{:?}\", err);\n            let backtrace = format!(\"{:?}\", Backtrace::new());\n            error!(FCP_LOG, \"expected Result to be Ok\"; \"error\" => err, \"backtrace\" => backtrace);\n        }\n        self.expect(msg)\n    }\n}\n\nimpl<T> ExpectWithBacktrace<T> for Option<T> {\n    fn expects(self, msg: &str) -> T {\n        if self.is_none() {\n            let backtrace = format!(\"{:?}\", Backtrace::new());\n            error!(FCP_LOG, \"expected Option to be Some\"; \"backtrace\" => backtrace);\n        }\n        self.expect(msg)\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/fr32.rs",
    "content": "use std::cmp::min;\nuse std::io::{self, Error, ErrorKind, Read, Seek, SeekFrom, Write};\n\nuse anyhow::{ensure, Result};\nuse bitvec::{order::Lsb0 as LittleEndian, vec::BitVec};\n\n/** PaddingMap represents a mapping between data and its padded equivalent.\n\nThe padding process takes a *byte-aligned stream* of unpadded *raw* data\nas input and returns another byte stream where padding is applied every\n`data_bits` to align them to the byte boundary (`element_bits`). The\n(inverse) *unpadding* process maps that output back to the raw input\nthat generated it.\n\n# Padded layout\n\nAt the *byte-level*, the padded layout is:\n\n```text\n      (full element)              (full)                 (incomplete)\n||  data_bits  pad_bits  ||  data_bits  pad_bits  ||  some_data  (no_padding)\n                         ^^                               ^^\n                  element boundary                (some_data < data_bits)\n                   (byte-aligned)\n```\n\nEach *element* is a byte-aligned stream comprised of a *full unit* of `data_bits`\nwith `pad_bits` at the end to byte-align it (where `pad_bits` is less than a byte,\nthis is a *sub-byte padding* scheme). After the last element boundary there may be\nan incomplete unit of data (`some_data`) with a length smaller than `data_bits`\nthat hasn't been padded. The padding rules are:\n  1. Padding is always applied to a full unit of `data_bits`.\n  2. A full data unit cannot exist without its corresponding padding.\n  3. A unit of padding is complete by definition: padding can only be\n     applied fully to each element.\n  4. If there is padding present then there has to be an already formed\n     element there (an element is full if and only if its data unit is full).\n\n# Last byte\n\nWhen returning the byte-aligned output generated from the padded *bitstream*\n(since the padding is done at the bit-level) the conversion results in the\nlast byte having (potentially) more bits than desired. At the *bit-level*\nthe layout of the last byte can either be a complete element (bits of raw\ndata followed by the corresponding padding bits) or an incomplete unit of\ndata: some number of *valid* data (D) bits followed by any number of *extra*\nbits (X) necessary to complete the byte-aligned stream:\n\n```text\n |   D   D   D   D   X   X   X   X   |\n         (data)         (extra)      ^ byte boundary (end of output)\n```\n\n(This diagram is just for illustrative purposes, we actually return the output\n in little-endian order, see `BitVecLEu8`).\n\nIt's important to distinguish these extra bits (generated as a side\neffect of the conversion to a byte-aligned stream) from the padding bits\nthemselves introduced in the padding process: even though both will be\nleft with a zero value, these extra bits are a place-holder for the actual\nraw data bits needed to complete the current unit of data (and hence also\nthe element, with the corresponding padding bits added after it). Since\nextra bits are only a product of an incomplete unit of data there can't\nbe extra bits after padding bits.\n\nThere's no metadata signaling the number of extra bits present in the\nlast byte in any given padded layout, this is deduced from the fact\nthat there's only a single number of valid data bits in the last byte,\nand hence a number of data bits in total, that maps to a byte-aligned\n(multiple of 8) raw data stream that could have been used as input.\n\n# Example: `FR32_PADDING_MAP`\n\nIn this case the `PaddingMap` is defined with a data unit of 254 bits that\nare byte aligned to a 256-bit (32-byte) element. If the user writes as input,\nsay, 40 bytes (320 bits) of raw input data to the padding process the resulting\nlayout would be, at the element (byte) level:\n\n```text\n      (full element: 32 bytes)         (incomplete: 9 bytes)\n||  data_bits: 254  pad_bits: 2  ||   some_data: 66 bits (+ extra bits)\n                                 ^^\n                          element boundary\n```\n\nThat is, of the original 320 bits (40 bytes) of raw input data, 254 are\npadded in the first element and the remaining 66 bits form the incomplete\ndata unit after it, which is aligned to 9 bytes. At the bit level, that\nlast incomplete byte will have 2 valid bits and 6 extra bits.\n\n# Alignment of raw data bytes in the padded output\n\nThis section is not necessary to use this structure but it does help to\nreason about it. By the previous definition, the raw data bits *embedded*\nin the padded layout are not necessarily grouped in the same byte units\nas in the original raw data input (due to the inclusion of the padding\nbits interleaved in that bit stream, which keep shifting the data bits\nafter them).\n\nThis can also be stated as: the offsets of the bits (relative to the byte\nthey belong to, i.e., *bit-offset*) in the raw data input won't necessarily\nmatch the bit-offsets of the raw data bits embedded in the padded layout.\nThe consequence is that each raw byte written to the padded layout won't\nresult in a byte-aligned bit stream output, i.e., it may cause the appearance\nof extra bits (to convert the output to a byte-aligned stream).\n\nThere are portions of the padded layout, however, where this alignment does\nhappen. Particularly, when the padded layout accumulates enough padding bits\nthat they altogether add up to a byte, the following raw data byte written\nwill result in a byte-aligned output, and the same is true for all the other\nraw data byte that follow it up until the element end, where new padding bits\nshift away this alignment. (The other obvious case is the first element, which,\nwith no padded bits in front of it, has by definition all its embedded raw data\nbytes aligned, independently of the `data_bits`/`pad_bits` configuration used.)\n\nIn the previous example, that happens after the fourth element, where 4 units\nof `pad_bits` add up to one byte and all of the raw data bytes in the fifth\nelement will keep its original alignment from the byte input stream (and the\nsame will happen with every other element multiple of 4). When that fourth\nelement is completed we have then 127 bytes of raw data and 1 byte of padding\n(totalling 32 * 4 = 128 bytes of padded output), so the interval of raw data\nbytes `[127..159]` (indexed like this in the input raw data stream) will keep\nits original alignment when embedded in the padded layout, i.e., every raw\ndata byte written will keep the output bit stream byte-aligned (without extra\nbits). (Technically, the last byte actually won't be a full byte since its last\nbits will be replaced by padding).\n\n# Key terms\n\nCollection of terms introduced in this documentation (with the format\n`*<new-term>*`). This section doesn't provide a self-contained definition\nof them (to avoid unnecessary repetition), it just provides (when appropriate)\nan additional summary of what was already discussed.\n\n * Raw data: unpadded user-supplied data (we don't use the *unpadded* term\n   to avoid excessive *padding* suffixes in the code). Padding (data) bits.\n * Element: byte-aligned stream consisting of a full unit of data plus the\n   padding bits.\n * Full unit of raw `data_bits` (always followed by padding). Incomplete unit,\n   not followed by padding, doesn't form an element.\n * Byte-aligned stream: always input and output of the (un)padding process,\n   either as raw data or padded (using the term \"byte-aligned\" and not \"byte\n   stream\" to stress the boundaries of the elements). Bit streams: used internally\n   when padding data (never returned as bits).\n * Valid data bits, only in the context of the last byte of a byte-aligned stream\n   generated from the padding process. Extra bits: what's left unused of the last\n   byte (in a way the extra bits are the padding at the byte-level, but we don't\n   use that term here to avoid confusions).\n * Sub-byte padding.\n * Bit-offset: offset of a bit within the byte it belongs to, ranging in `[0..8]`.\n * Embedded raw data: view of the input raw data when it has been decomposed in\n   bit streams and padded in the resulting output.\n\n**/\n#[derive(Debug)]\npub struct PaddingMap {\n    /// The number of bits of raw data in an element.\n    data_bits: usize,\n    /// Number of bits in an element: `data_bits` + `pad_bits()`. Its value\n    /// is fixed to the next byte-aligned size after `data_bits` (sub-byte padding).\n    element_bits: usize,\n}\n// TODO: Optimization: Evaluate saving the state of a (un)padding operation\n// inside (e.g., as a cursor like in `BitVec`), maybe not in this structure but\n// in a new `Padder` structure which would remember the positions (remaining\n// data bits in the element, etc.) to avoid recalculating them each time across\n// different (un)pad calls.\n\n// This is the padding map corresponding to Fr32.\n// Most of the code in this module is general-purpose and could move elsewhere.\n// The application-specific wrappers which implicitly use Fr32 embed the FR32_PADDING_MAP.\npub const FR32_PADDING_MAP: PaddingMap = PaddingMap {\n    data_bits: 254,\n    element_bits: 256,\n};\n\npub type BitVecLEu8 = BitVec<LittleEndian, u8>;\n\n////////////////////////////////////////////////////////////////////////////////////////////////////\n// Convenience interface for API functions – all bundling FR32_PADDING_MAP\n// parameter/return types are tuned for current caller convenience.\n\npub fn target_unpadded_bytes<W: ?Sized>(target: &mut W) -> io::Result<u64>\nwhere\n    W: Seek,\n{\n    let (_, unpadded, _) = FR32_PADDING_MAP.target_offsets(target)?;\n\n    Ok(unpadded)\n}\n\n// Leave the actual truncation to caller, since we can't do it generically.\n// Return the length to which target should be truncated.\n// We might should also handle zero-padding what will become the final byte of target.\n// Technically, this should be okay though because that byte will always be overwritten later.\n// If we decide this is unnecessary, then we don't need to pass target at all.\npub fn almost_truncate_to_unpadded_bytes<W: ?Sized>(\n    _target: &mut W,\n    length: u64,\n) -> io::Result<usize>\nwhere\n    W: Read + Write + Seek,\n{\n    let padded =\n        BitByte::from_bits(FR32_PADDING_MAP.transform_bit_offset((length * 8) as usize, true));\n    let real_length = padded.bytes_needed();\n    let _final_bit_count = padded.bits;\n    Ok(real_length)\n}\n\npub fn to_unpadded_bytes(padded_bytes: u64) -> u64 {\n    FR32_PADDING_MAP.transform_byte_offset(padded_bytes as usize, false) as u64\n}\n\npub fn to_padded_bytes(unpadded_bytes: usize) -> usize {\n    FR32_PADDING_MAP.transform_byte_offset(unpadded_bytes, true)\n}\n\n////////////////////////////////////////////////////////////////////////////////////////////////////\n// BitByte represents a size expressed in bytes extended\n// with bit precision, that is, not rounded.\n// Invariant: it is an error for bits to be > 7.\n#[derive(Debug)]\npub struct BitByte {\n    bytes: usize,\n    bits: usize,\n}\n\nimpl BitByte {\n    // Create a BitByte from number of bits. Guaranteed to return a well-formed value (bits < 8)\n    pub fn from_bits(bits: usize) -> BitByte {\n        BitByte {\n            bytes: bits / 8,\n            bits: bits % 8,\n        }\n    }\n\n    pub fn from_bytes(bytes: usize) -> BitByte {\n        Self::from_bits(bytes * 8)\n    }\n\n    // How many bits in the BitByte (inverse of from_bits).\n    pub fn total_bits(&self) -> usize {\n        self.bytes * 8 + self.bits\n    }\n\n    // True if the BitByte has no bits component.\n    pub fn is_byte_aligned(&self) -> bool {\n        self.bits == 0\n    }\n\n    // How many distinct bytes are needed to represent data of this size?\n    pub fn bytes_needed(&self) -> usize {\n        self.bytes + if self.bits == 0 { 0 } else { 1 }\n    }\n}\n\nimpl PaddingMap {\n    pub fn new(data_bits: usize, element_bits: usize) -> Result<PaddingMap> {\n        // Check that we add less than 1 byte of padding (sub-byte padding).\n        ensure!(\n            element_bits - data_bits <= 7,\n            \"Padding (num bits: {}) must be less than 1 byte.\",\n            element_bits - data_bits\n        );\n        // Check that the element is byte aligned.\n        ensure!(\n            element_bits % 8 == 0,\n            \"Element (num bits: {}) must be byte aligned.\",\n            element_bits\n        );\n\n        Ok(PaddingMap {\n            data_bits,\n            element_bits,\n        })\n    }\n\n    pub fn pad(&self, bits_out: &mut BitVecLEu8) {\n        for _ in 0..self.pad_bits() {\n            bits_out.push(false)\n        }\n        // TODO: Optimization: Drop this explicit `push` padding, the padding\n        // should happen implicitly when byte-aligning the data unit.\n    }\n\n    pub fn pad_bits(&self) -> usize {\n        self.element_bits - self.data_bits\n    }\n\n    // Transform an offset (either a position or a size) *expressed in\n    // bits* in a raw byte-aligned data stream to its equivalent in a\n    // generated padded bit stream, that is, not byte aligned (so we\n    // don't count the extra bits here). If `padding` is `false` calculate\n    // the inverse transformation.\n    pub fn transform_bit_offset(&self, pos: usize, padding: bool) -> usize {\n        // Set the sizes we're converting to and from.\n        let (from_size, to_size) = if padding {\n            (self.data_bits, self.element_bits)\n        } else {\n            (self.element_bits, self.data_bits)\n        };\n\n        // For both the padding and unpadding cases the operation is the same.\n        // The quotient is the number of full, either elements, in the padded layout,\n        // or groups of `data_bits`, in the raw data input (that will be converted\n        // to full elements).\n        // The remainder (in both cases) is the last *incomplete* part of either of\n        // the two. Even in the padded layout, if there is an incomplete element it\n        // has to consist *only* of data (see `PaddingMap#padded-layout`). That amount\n        // of spare raw data doesn't need conversion, it can just be added to the new\n        // position.\n        let (full_elements, incomplete_data) = div_rem(pos, from_size);\n        (full_elements * to_size) + incomplete_data\n    }\n\n    // Similar to `transform_bit_pos` this function transforms an offset\n    // expressed in bytes, that is, we are taking into account the extra\n    // bits here.\n    // TODO: Evaluate the relationship between this function and `transform_bit_offset`,\n    // it seems the two could be merged, or at least restructured to better expose\n    // their differences.\n    pub fn transform_byte_offset(&self, pos: usize, padding: bool) -> usize {\n        let transformed_bit_pos = self.transform_bit_offset(pos * 8, padding);\n\n        let transformed_byte_pos = transformed_bit_pos as f64 / 8.;\n        // TODO: Optimization: It might end up being cheaper to avoid this\n        // float conversion and use / and %.\n\n        // When padding, the final bits in the bit stream will grow into the\n        // last (potentially incomplete) byte of the byte stream, so round the\n        // number up (`ceil`). When unpadding, there's no way to know a priori\n        // how many valid bits are in the last byte, we have to choose the number\n        // that fits in a byte-aligned raw data stream, so round the number down\n        // to that (`floor`).\n        (if padding {\n            transformed_byte_pos.ceil()\n        } else {\n            transformed_byte_pos.floor()\n        }) as usize\n    }\n\n    // From the `position` specified, it returns:\n    // - the absolute position of the start of the next element,\n    //   in bytes (since elements -with padding- are byte aligned).\n    // - the number of bits left to read (write) from (to) the current\n    //   data unit (assuming it's full).\n    pub fn next_boundary(&self, position: &BitByte) -> (usize, usize) {\n        let position_bits = position.total_bits();\n\n        let (_, bits_after_last_boundary) = div_rem(position_bits, self.element_bits);\n\n        let remaining_data_unit_bits = self.data_bits - bits_after_last_boundary;\n\n        let next_element_position_bits = position_bits + remaining_data_unit_bits + self.pad_bits();\n\n        (next_element_position_bits / 8, remaining_data_unit_bits)\n    }\n\n    // For a `Seek`able `target` of a byte-aligned padded layout, return:\n    // - the size in bytes\n    // - the size in bytes of raw data which corresponds to the `target` size\n    // - a BitByte representing the number of padded bits contained in the\n    //   byte-aligned padded layout\n    pub fn target_offsets<W: ?Sized>(&self, target: &mut W) -> io::Result<(u64, u64, BitByte)>\n    where\n        W: Seek,\n    {\n        // The current position in `target` is the number of padded bytes already written\n        // to the byte-aligned stream.\n        let padded_bytes = target.seek(SeekFrom::End(0))?;\n\n        // Deduce the number of input raw bytes that generated that padded byte size.\n        let raw_data_bytes = self.transform_byte_offset(padded_bytes as usize, false);\n\n        // With the number of raw data bytes elucidated it can now be specified the\n        // number of padding bits in the generated bit stream (before it was converted\n        // to a byte-aligned stream), that is, `raw_data_bytes * 8` is not necessarily\n        // `padded_bits`).\n        let padded_bits = self.transform_bit_offset(raw_data_bytes * 8, true);\n\n        Ok((\n            padded_bytes,\n            raw_data_bytes as u64,\n            BitByte::from_bits(padded_bits),\n        ))\n        // TODO: Why do we use `usize` internally and `u64` externally?\n    }\n}\n\n#[inline]\nfn div_rem(a: usize, b: usize) -> (usize, usize) {\n    let div = a / b;\n    let rem = a % b;\n    (div, rem)\n}\n\n// TODO: The following extraction functions could be moved to a different file.\n\n/** Shift an `amount` of bits from the `input` in the direction indicated by `is_left`.\n\nThis function tries to imitate the behavior of `shl` and `shr` of a\n`BitVec<LittleEndian, u8>`, where the inner vector is traversed one byte\nat a time (`u8`), and inside each byte, bits are traversed (`LittleEndian`)\nfrom LSB (\"right\") to MSB (\"left\"). For example, the bits in the this two-byte\nslice will be traversed according to their numbering:\n\n```text\nADDR     |  7  6  5  4  3  2  1  0  |\n\nADDR +1  |  F  E  D  C  B  A  9  8  |\n```\n\n`BitVec` uses the opposite naming convention than this function, shifting left\nhere is equivalent to `shr` there, and shifting right to `shl`.\n\nIf shifting in the left direction, the `input` is expanded by one extra byte to\naccommodate the overflow (instead of just discarding it, which is what's done\nin the right direction).\n\nThe maximum `amount` to shift is 7 (and the minimum is 1), that is, we always\nshift less than a byte. This precondition is only checked during testing (with\n`debug_assert!`) for performance reasons, it is up to the caller to enforce it.\n\n# Examples\n\nShift the `input` (taken from the diagram above) left by an `amount` of 3 bits,\ngrowing the output slice:\n\n```text\nADDR     |  4  3  2  1  0  _  _  _  |  Filled with zeros.\n\nADDR +1  |  C  B  A  9  8  7  6  5  |\n\nADDR +2  |  _  _  _  _  _  F  E  D  |  The overflow of the last input byte\n                                               is moved to this (new) byte.\n```\n\nSame, but shift right:\n\n```text\nADDR     |  A  9  8  7  6  5  4  3  |  The overflow `[2,1,0]` is just discarded,\n                                                         the slice doesn't grow.\nADDR +1  |  _  _  _  F  E  D  C  B  |\n```\n\n(Note: `0`, `1`, `2`, etc. are bits identified by their original position,\n`_` means a bit left at zero after shifting, to avoid confusions with\nthe unique bit `0`, that just *started* at that position but doesn't\nnecessarily carry that value.)\n\n**/\npub fn shift_bits(input: &[u8], amount: usize, is_left: bool) -> Vec<u8> {\n    debug_assert!(amount >= 1);\n    debug_assert!(amount <= 7);\n\n    // Create the `output` vector from the original input values, extending\n    // its size by one if shifting left.\n    let mut output = Vec::with_capacity(input.len() + if is_left { 1 } else { 0 });\n    output.extend_from_slice(input);\n    if is_left {\n        output.push(0);\n    }\n    // TODO: Is there a cleaner way to do this? Is the extra byte worth the initial\n    // `with_capacity` call?\n\n    // Split the shift in two parts. First, do a simple bit shift (losing the\n    // overflow) for each byte, then, in a second pass, recover the lost overflow\n    // from the `input`. The advantage of splitting it like this is that the place-holder\n    // spaces are already being cleared with zeros to just join the overflow part with an\n    // single `OR` operation (instead of assembling both parts together at the same time\n    // which requires an extra clear operation with a mask of zeros).\n    for output_byte in output.iter_mut().take(input.len()) {\n        if is_left {\n            *output_byte <<= amount;\n        } else {\n            *output_byte >>= amount;\n        }\n    }\n\n    if is_left {\n        // The `output` looks at this point like this (following the original\n        // example):\n        //\n        // ADDR     |  4  3  2  1  0  _  _  _  |\n        //\n        // ADDR +1  |  C  B  A  9  8  _  _  _  |\n        //\n        // ADDR +2  |  _  _  _  _  _  _  _  _  |  Extra byte allocated to extend the `input`,\n        //                                            hasn't been modified in the first pass.\n        //\n        // We need to recover the overflow of each shift (e.g., `[7,6,5]` from\n        // the first byte and `[F,E,D]` from the second) and move it to the next\n        // byte, shifting it to place it at the \"start\" (in the current ordering\n        // that means aligning it to the LSB). For example, the overflow of (also)\n        // `amount` bits from the first byte is:\n        //\n        // ADDR     |  7  6  5  4  3  2  1  0  |\n        //             +-----+\n        //           overflow lost\n        //\n        // and it's \"recovered\" with a shift in the opposite direction, which both\n        // positions it in the correct place *and* leaves cleared the rest of the\n        // bits to be able to `OR` (join) it with the next byte of `output` (shifted\n        // in the first pass):\n        //\n        // (`output` so far)\n        // ADDR +1  |  C  B  A  9  8  _  _  _  |    +\n        //                                          |\n        // (shifted overflow                        |  join both (`|=`)\n        //      from `input`)                       |\n        // ADDR     |  _  _  _  _  _  7  6  5  |    V\n        //             +------------->\n        //\n        for i in 0..input.len() {\n            let overflow = input[i] >> (8 - amount);\n            output[i + 1] |= overflow;\n        }\n    } else {\n        // The overflow handling in the right shift follows the same logic as the left\n        // one with just two differences: (1) the overflow goes to the *previous* byte\n        // in memory and (2) the overflow of the first byte is discarded (hence the `for`\n        // loop iterates just `input.len` *minus one* positions).\n        for i in 1..input.len() {\n            let overflow = input[i] << (8 - amount);\n            output[i - 1] |= overflow;\n        }\n    }\n\n    // TODO: Optimization: Join both passes in one `for` loop for cache\n    // efficiency (do everything we need to do in the same address once).\n    // (This is low priority since we normally shift small arrays -32 byte\n    // elements- per call.)\n\n    output\n}\n\n/** Extract bits and relocate them.\n\nExtract `num_bits` from the `input` starting at absolute `pos` (expressed in\nbits). Format the extracted bit stream as a byte stream `output` (in a `Vec<u8>`)\nwhere the extracted bits start at `new_offset` bits in the first byte (i.e.,\n`new_offset` can't be bigger than 7) allowing them to be relocated from their\noriginal bit-offset (encoded in `pos`). The rest of the bits (below `new_offset`\nand after the extracted `num_bits`) are left at zero (to prepare them to be\njoined with another extracted `output`). This function follows the ordering in\n`BitVec<LittleEndian, u8>` (see `shift_bits` for more details).\n\nThe length of the input must be big enough to perform the extraction\nof `num_bits`. This precondition is only checked during testing (with\n`debug_assert!`) for performance reasons, it is up to the caller to enforce it.\n\n# Example\n\nTaking as `input` the original two-byte layout from `shift_bits`, extracting 4\n`num_bits` from `pos` 12 and relocating them in `new_offset` 2 would result in\nan `output` of a single byte like:\n\n```text\nADDR     |  _  _  F  E  D  C  _  _  |\n```\n\n(The second byte in `ADDR +1` has been dropped after the extraction\nas it's no longer needed.)\n\n**/\n//\n// TODO: Replace the byte terminology for a generic term that can mean\n// anything that implements the `bitvec::Bits` trait (`u8`, `u32`, etc.).\n// `BitVec` calls it \"element\" but that's already used here (this function\n// may need to be moved elsewhere which would allow to reuse that term).\n// This also will imply removing the hardcoded `8`s (size of byte).\n#[inline]\npub fn extract_bits_and_shift(\n    input: &[u8],\n    pos: usize,\n    num_bits: usize,\n    new_offset: usize,\n) -> Vec<u8> {\n    debug_assert!(input.len() * 8 >= pos + num_bits);\n    debug_assert!(new_offset <= 7);\n\n    // 1. Trim the whole bytes (before and after) we don't need for the\n    //    extraction (we don't want to waste shift operations on them).\n    // 2. Shift from the original `pos` to the `new_offset`.\n    // 3. Trim the bits in the first and last byte we also don't need.\n    //\n    // TODO: Does (3) need to happen *after* the shift in (2)? It feels\n    // more natural but can't we just trim everything in (1)?\n\n    // Determine from `pos` the number of full bytes that can be completely skipped\n    // (`skip_bytes`), and the number of bits within the first byte of interest that\n    // we'll start extracting from (`extraction_offset`).\n    let (skip_bytes, extraction_offset) = div_rem(pos, 8);\n\n    // (1).\n    let input = &input[skip_bytes..];\n    let input = &input[..BitByte::from_bits(extraction_offset + num_bits).bytes_needed()];\n\n    // (2).\n    use std::cmp::Ordering;\n    let mut output = match new_offset.cmp(&extraction_offset) {\n        Ordering::Less => {\n            // Shift right.\n            shift_bits(input, extraction_offset - new_offset, false)\n        }\n        Ordering::Greater => {\n            // Shift left.\n            shift_bits(input, new_offset - extraction_offset, true)\n        }\n        Ordering::Equal => {\n            // No shift needed, take the `input` as is.\n            input.to_vec()\n        }\n    };\n\n    // After the shift we may not need the last byte of the `output` (either\n    // because the left shift extended it by one byte or because the right shift\n    // move the extraction span below that threshold).\n    if output.len() > BitByte::from_bits(new_offset + num_bits).bytes_needed() {\n        output.pop();\n    }\n    // TODO: Optimization: A more specialized shift would have just dropped\n    // that byte (we would need to pass it the `num_bits` we want).\n\n    // (3).\n    if new_offset != 0 {\n        clear_right_bits(output.first_mut().unwrap(), new_offset);\n    }\n    let end_offset = (new_offset + num_bits) % 8;\n    if end_offset != 0 {\n        clear_left_bits(output.last_mut().unwrap(), end_offset);\n    }\n\n    output\n}\n\n// Set to zero all the bits to the \"left\" of the `offset` including\n// it, that is, [MSB; `offset`].\n#[inline]\npub fn clear_left_bits(byte: &mut u8, offset: usize) {\n    *(byte) &= (1 << offset) - 1\n}\n\n// Set to zero all the bits to the \"right\" of the `offset` excluding\n// it, that is, (`offset`; LSB].\n#[inline]\npub fn clear_right_bits(byte: &mut u8, offset: usize) {\n    *(byte) &= !((1 << offset) - 1)\n}\n\n/** Padding process.\n\nRead a `source` of raw byte-aligned data, pad it in a bit stream and\nwrite a byte-aligned version of it in the `target`. The `target` needs\nto implement (besides `Write`) the `Read` and `Seek` traits since the\nlast byte written may be incomplete and will need to be rewritten.\n\nThe reader will always be byte-aligned, the writer will operate with\nbit precision since we may have (when calling this function multiple\ntimes) a written `target` with extra bits (that need to be overwritten)\nand also incomplete data units.\nThe ideal alignment scenario is for the writer to be positioned at the\nbyte-aligned element boundary and just write whole chunks of `data_chunk_bits`\n(full data units) followed by its corresponding padding. To get there then we\nneed to handle the potential bit-level misalignments:\n  1. extra bits: the last byte is only partially valid so we\n     need to get some bits from the `source` to overwrite them.\n  2. Incomplete data unit: we need to fill the rest of it and add the padding\n     to form a element that would position the writer at the desired boundary.\n**/\n\n// offset and num_bytes are based on the unpadded data, so\n// if [0, 1, ..., 255] was the original unpadded data, offset 3 and len 4 would return\n// [3, 4, 5, 6].\npub fn write_unpadded<W: ?Sized>(\n    source: &[u8],\n    target: &mut W,\n    offset: usize,\n    len: usize,\n) -> io::Result<usize>\nwhere\n    W: Write,\n{\n    // Check that there's actually `len` raw data bytes encoded inside\n    // `source` starting at `offset`.\n    let read_pos = BitByte::from_bits(FR32_PADDING_MAP.transform_bit_offset(offset * 8, true));\n    let raw_data_size = BitByte::from_bits(\n        FR32_PADDING_MAP.transform_bit_offset(source.len() * 8 - read_pos.total_bits(), false),\n    )\n    .bytes_needed();\n    if raw_data_size < len {\n        return Err(Error::new(\n            ErrorKind::Other,\n            format!(\n                \"requested extraction of {} raw data bytes when there's at most {} in the source\",\n                len, raw_data_size\n            ),\n        ));\n    }\n\n    // In order to optimize alignment in the common case of writing from an aligned start,\n    // we should make the chunk a multiple of 128 (4 full elements in the padded layout).\n    // n was hand-tuned to do reasonably well in the benchmarks.\n    let n = 1000;\n    let chunk_size = 128 * n;\n\n    let mut written = 0;\n\n    let mut offset = offset;\n    let mut len = len;\n\n    for chunk in source.chunks(chunk_size) {\n        let write_len = min(len, chunk.len());\n\n        written += write_unpadded_aux(&FR32_PADDING_MAP, source, target, offset, write_len)?;\n        offset += write_len;\n        len -= write_len;\n    }\n\n    Ok(written)\n}\n\n/**  Unpadding process.\n\nRead a `source` of padded data and recover from it the byte-aligned\nraw data writing it in `target`, where `write_pos` specifies from which\nbyte of the raw data stream to start recovering to, up to `max_write_size`\nbytes.\n\nThere are 3 limits that tell us how much padded data to process in\neach iteration (`bits_to_extract`):\n1. Element boundary: we can process only one element at a time (to be\n   able to skip the padding bits).\n2. End of `source`: no more data to read.\n3. No more space to write the recovered raw data: we shouldn't write\n   into the `target` beyond `max_write_size`.\n\nThe reader will generally operate with bit precision, even if the padded\nlayout is byte-aligned (no extra bits) the data inside it isn't (since\nwe pad at the bit-level).\n**/\npub fn write_unpadded_aux<W: ?Sized>(\n    padding_map: &PaddingMap,\n    source: &[u8],\n    target: &mut W,\n    write_pos: usize,\n    max_write_size: usize,\n) -> io::Result<usize>\nwhere\n    W: Write,\n{\n    // Position of the reader in the padded bit stream layout, deduced from\n    // the position of the writer (`write_pos`) in the raw data layout.\n    let mut read_pos = BitByte::from_bits(padding_map.transform_bit_offset(write_pos * 8, true));\n\n    // Specify the maximum data to recover (write) in bits, since the data unit\n    // in the element (in contrast with the original raw data that generated it)\n    // is not byte aligned.\n    let max_write_size_bits = max_write_size * 8;\n\n    // Estimate how many bytes we'll need for the `raw_data` to allocate\n    // them all at once. We need to take into account both how much do\n    // we have left to read *and* write, and even then, since we may start\n    // in the middle of an element (`write_pos`) there's some variability\n    // as to how many padding bits will be encountered.\n    // Allow then an *over*-estimation error of 1 byte: `transform_bit_offset`\n    // has the implicit assumption that the data provided is starting at the\n    // beginning of an element, i.e., the padding bits are as far as possible,\n    // which maximizes the chances of not getting an extra `pad_bits` in the\n    // `source` (which are unpadded away and not carried to the `target`). That\n    // is, in this context `transform_bit_offset` is optimistic about the number\n    // of raw data bits we'll be able to recover from a fixed number of `source`\n    // bits.\n    let mut raw_data_size = BitByte::from_bits(\n        padding_map.transform_bit_offset(source.len() * 8 - read_pos.total_bits(), false),\n    )\n    .bytes_needed();\n    raw_data_size = min(raw_data_size, max_write_size);\n\n    // Recovered raw data unpadded from the `source` which will\n    // be written to the `target`.\n    let mut raw_data: Vec<u8> = Vec::with_capacity(raw_data_size);\n\n    // Total number of raw data bits we have written (unpadded from the `source`).\n    let mut written_bits = 0;\n    // Bit offset within the last byte at which the next write needs to happen\n    // (derived from `written_bits`), we keep track of this since we write in chunks\n    // that may not be byte-aligned.\n    let mut write_bit_offset = 0;\n\n    // If there is no more data to read or no more space to write stop.\n    while read_pos.bytes < source.len() && written_bits < max_write_size_bits {\n        // (1): Find the element boundary and, assuming that there is a full\n        //      unit of data (which actually may be incomplete), how many bits\n        //      are left to read from `read_pos`.\n        let (next_element_position, mut bits_to_extract) = padding_map.next_boundary(&read_pos);\n\n        // (2): As the element may be incomplete check how much data is\n        //      actually available so as not to access the `source` past\n        //      its limit.\n        bits_to_extract = min(bits_to_extract, source.len() * 8 - read_pos.total_bits());\n\n        // (3): Don't read more than `max_write_size`.\n        let bits_left_to_write = max_write_size_bits - written_bits;\n        bits_to_extract = min(bits_to_extract, bits_left_to_write);\n\n        // Extract the next data unit from the element (or whatever space we\n        // have left to write) and reposition it in the `write_bit_offset`.\n        // N.B., the bit offset of the data in the original raw data byte\n        // stream and the same data in the padded layout are not necessarily\n        // the same (since the added padding bits shift it).\n        let mut recovered = extract_bits_and_shift(\n            &source,\n            read_pos.total_bits(),\n            bits_to_extract,\n            write_bit_offset,\n        );\n\n        if write_bit_offset != 0 {\n            // Since the two data units we are joining are not byte-aligned we can't\n            // just append the whole bytes to `raw_data`, we need to join the last\n            // byte of the already written `raw_data` with the first one of data unit\n            // `recovered` in this iteration. Since `extract_bits_and_shift` already\n            // takes care of setting to zero the bits beyond the extraction limit we\n            // can just `OR` the two.\n            *(raw_data.last_mut().unwrap()) |= *(recovered.first().unwrap());\n            raw_data.append(&mut recovered[1..].to_vec());\n        } else {\n            raw_data.append(&mut recovered);\n        }\n\n        written_bits += bits_to_extract;\n        write_bit_offset = written_bits % 8;\n\n        // Position the reader in the next element boundary, this will be ignored\n        // if we already hit limits (2) or (3) (in that case this was the last iteration).\n        read_pos = BitByte {\n            bytes: next_element_position,\n            bits: 0,\n        };\n    }\n\n    // TODO: Don't write the whole output into a huge BitVec.\n    // Instead, write it incrementally –\n    // but ONLY when the bits waiting in bits_out are byte-aligned. i.e. a multiple of 8\n\n    // Check that our estimated size was correct, allow it to be overestimated\n    // (not *under*) by 1 byte.\n    debug_assert!(raw_data_size - raw_data.len() <= 1);\n    debug_assert!(raw_data_size >= raw_data.len());\n\n    target.write_all(&raw_data)?;\n\n    Ok(raw_data.len())\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use itertools::Itertools;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    #[test]\n    fn test_position() {\n        let mut bits = 0;\n        for i in 0..10 {\n            for j in 0..8 {\n                let position = BitByte { bytes: i, bits: j };\n                assert_eq!(position.total_bits(), bits);\n                bits += 1;\n            }\n        }\n    }\n\n    // Test the `extract_bits_le` function against the `BitVec` functionality\n    // (assumed to be correct).\n    #[test]\n    fn test_random_bit_extraction() {\n        // Length of the data vector we'll be extracting from.\n        let len = 20;\n\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n        let data: Vec<u8> = (0..len).map(|_| rng.gen()).collect();\n\n        // TODO: Evaluate designing a scattered pattered of `pos` and `num_bits`\n        // instead of repeating too many iterations with any number.\n        for _ in 0..100 {\n            let pos = rng.gen_range(0, data.len() / 2);\n            let num_bits = rng.gen_range(1, data.len() * 8 - pos);\n            let new_offset = rng.gen_range(0, 8);\n\n            let mut bv = BitVecLEu8::new();\n            bv.extend(\n                BitVecLEu8::from(&data[..])\n                    .into_iter()\n                    .skip(pos)\n                    .take(num_bits),\n            );\n            let shifted_bv: BitVecLEu8 = bv >> new_offset;\n\n            assert_eq!(\n                shifted_bv.as_slice(),\n                &extract_bits_and_shift(&data, pos, num_bits, new_offset)[..],\n            );\n        }\n    }\n\n    // Test the `shift_bits` function against the `BitVec<LittleEndian, u8>`\n    // implementation of `shr_assign` and `shl_assign`.\n    #[test]\n    fn test_bit_shifts() {\n        let len = 5;\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for amount in 1..8 {\n            for left in [true, false].iter() {\n                let data: Vec<u8> = (0..len).map(|_| rng.gen()).collect();\n\n                let shifted_bits = shift_bits(&data, amount, *left);\n\n                let mut bv: BitVec<LittleEndian, u8> = data.into();\n                if *left {\n                    bv >>= amount;\n                } else {\n                    bv <<= amount;\n                }\n                // We use the opposite shift notation (see `shift_bits`).\n\n                assert_eq!(bv.as_slice(), shifted_bits.as_slice());\n            }\n        }\n    }\n\n    // Simple (and slow) padder implementation using `BitVec`.\n    // It is technically not quite right to use `BitVec` to test\n    // `write_padded` since at the moment that function still uses\n    // it for some corner cases, but since largely this implementation\n    // has been replaced it seems reasonable.\n    fn bit_vec_padding(raw_data: Vec<u8>) -> Box<[u8]> {\n        let mut padded_data: BitVec<LittleEndian, u8> = BitVec::new();\n        let raw_data: BitVec<LittleEndian, u8> = BitVec::from(raw_data);\n\n        for data_unit in raw_data\n            .into_iter()\n            .chunks(FR32_PADDING_MAP.data_bits)\n            .into_iter()\n        {\n            padded_data.extend(data_unit.into_iter());\n\n            // To avoid reconverting the iterator, we deduce if we need the padding\n            // by the length of `padded_data`: a full data unit would not leave the\n            // padded layout aligned (it would leave it unaligned by just `pad_bits()`).\n            if padded_data.len() % 8 != 0 {\n                for _ in 0..FR32_PADDING_MAP.pad_bits() {\n                    padded_data.push(false);\n                }\n            }\n        }\n\n        padded_data.into_boxed_slice()\n    }\n\n    // `write_padded` and `write_unpadded` for 1016 bytes of 1s, check the\n    // recovered raw data.\n    #[test]\n    fn test_read_write_padded() {\n        let len = 1016; // Use a multiple of 254.\n        let data = vec![255u8; len];\n        let mut padded = Vec::new();\n        let mut reader = crate::fr32_reader::Fr32Reader::new(io::Cursor::new(&data));\n        reader.read_to_end(&mut padded).unwrap();\n\n        assert_eq!(\n            padded.len(),\n            FR32_PADDING_MAP.transform_byte_offset(len, true)\n        );\n\n        let mut unpadded = Vec::new();\n        let unpadded_written = write_unpadded(&padded, &mut unpadded, 0, len).unwrap();\n        assert_eq!(unpadded_written, len);\n        assert_eq!(data, unpadded);\n        assert_eq!(padded.into_boxed_slice(), bit_vec_padding(data));\n    }\n\n    // `write_padded` and `write_unpadded` for 1016 bytes of random data, recover\n    // different lengths of raw data at different offset, check integrity.\n    #[test]\n    fn test_read_write_padded_offset() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let len = 1016;\n        let data: Vec<u8> = (0..len).map(|_| rng.gen()).collect();\n\n        let mut padded = Vec::new();\n        let mut reader = crate::fr32_reader::Fr32Reader::new(io::Cursor::new(&data));\n        reader.read_to_end(&mut padded).unwrap();\n\n        {\n            let mut unpadded = Vec::new();\n            write_unpadded(&padded, &mut unpadded, 0, 1016).unwrap();\n            let expected = &data[0..1016];\n\n            assert_eq!(expected.len(), unpadded.len());\n            assert_eq!(expected, &unpadded[..]);\n        }\n\n        {\n            let mut unpadded = Vec::new();\n            write_unpadded(&padded, &mut unpadded, 0, 44).unwrap();\n            let expected = &data[0..44];\n\n            assert_eq!(expected.len(), unpadded.len());\n            assert_eq!(expected, &unpadded[..]);\n        }\n\n        let excessive_len = 35;\n        for start in (1016 - excessive_len + 2)..1016 {\n            assert!(write_unpadded(&padded, &mut Vec::new(), start, excessive_len).is_err());\n        }\n    }\n\n    // TODO: Add a test that drops the last part of an element and tries to recover\n    // the rest of the data (may already be present in some form in the above tests).\n}\n"
  },
  {
    "path": "filecoin-proofs/src/fr32_reader.rs",
    "content": "use std::io;\n\nconst DATA_BITS: u64 = 254;\nconst TARGET_BITS: u64 = 256;\n\n#[derive(Debug)]\npub struct Fr32Reader<R> {\n    /// The source being padded.\n    source: R,\n    /// How much of the target already was `read` from, in bits.\n    target_offset: u64,\n    /// Currently read byte.\n    buffer: Buffer,\n    /// Are we done reading?\n    done: bool,\n}\n\nimpl<R: io::Read> Fr32Reader<R> {\n    pub fn new(source: R) -> Self {\n        Fr32Reader {\n            source,\n            target_offset: 0,\n            buffer: Default::default(),\n            done: false,\n        }\n    }\n\n    fn read_u8_no_pad(&mut self, target: &mut [u8]) -> io::Result<usize> {\n        target[0] = self.buffer.read_u8();\n        self.target_offset += 8;\n\n        Ok(1)\n    }\n\n    fn read_u16_no_pad(&mut self, target: &mut [u8]) -> io::Result<usize> {\n        self.buffer.read_u16_into(&mut target[..2]);\n        self.target_offset += 16;\n\n        Ok(2)\n    }\n\n    fn read_u32_no_pad(&mut self, target: &mut [u8]) -> io::Result<usize> {\n        self.buffer.read_u32_into(&mut target[..4]);\n        self.target_offset += 32;\n\n        Ok(4)\n    }\n\n    fn read_u64_no_pad(&mut self, target: &mut [u8]) -> io::Result<usize> {\n        self.buffer.read_u64_into(&mut target[..8]);\n        self.target_offset += 64;\n\n        Ok(8)\n    }\n\n    /// Read up to 8 bytes into the targets first element.\n    /// Assumes that target is not empty.\n    fn read_bytes(&mut self, target: &mut [u8]) -> io::Result<usize> {\n        let bit_pos = self.target_offset % TARGET_BITS;\n        let bits_to_padding = if bit_pos < DATA_BITS {\n            DATA_BITS as usize - bit_pos as usize\n        } else {\n            0\n        };\n\n        if bits_to_padding >= 8 {\n            self.fill_buffer()?;\n        }\n\n        let available = self.buffer.available();\n        if available > 0 {\n            let target_len = target.len();\n            // Try to avoid padding, and copy as much as possible over at once.\n\n            if bits_to_padding >= 64 && available >= 64 && target_len >= 8 {\n                return self.read_u64_no_pad(target);\n            }\n\n            if bits_to_padding >= 32 && available >= 32 && target_len >= 4 {\n                return self.read_u32_no_pad(target);\n            }\n\n            if bits_to_padding >= 16 && available >= 16 && target_len >= 2 {\n                return self.read_u16_no_pad(target);\n            }\n\n            if bits_to_padding >= 8 && available >= 8 && target_len >= 1 {\n                return self.read_u8_no_pad(target);\n            }\n        }\n\n        self.read_u8_padded(target, bits_to_padding, available)\n    }\n\n    fn read_u8_padded(\n        &mut self,\n        target: &mut [u8],\n        bits_to_padding: usize,\n        available: u64,\n    ) -> io::Result<usize> {\n        target[0] = 0;\n\n        if available >= 6 {\n            match bits_to_padding {\n                6 => {\n                    target[0] = self.buffer.read_u8_range(6);\n                    self.target_offset += 8;\n                    return Ok(1);\n                }\n                5 => {\n                    target[0] = self.buffer.read_u8_range(5);\n                    if self.buffer.read_bit() {\n                        set_bit(&mut target[0], 7);\n                    }\n                    self.target_offset += 8;\n                    return Ok(1);\n                }\n                _ => {}\n            }\n        }\n\n        for i in 0..8 {\n            if self.target_offset % TARGET_BITS < DATA_BITS {\n                if !self.fill_buffer()? {\n                    if i > 0 {\n                        return Ok(1);\n                    } else {\n                        return Ok(0);\n                    }\n                }\n\n                if self.buffer.read_bit() {\n                    set_bit(&mut target[0], i);\n                }\n            };\n\n            self.target_offset += 1;\n        }\n\n        Ok(1)\n    }\n\n    /// Fill the inner buffer, only if necessary. Returns `true` if more data is available.\n    fn fill_buffer(&mut self) -> io::Result<bool> {\n        if self.buffer.available() > 0 {\n            // Nothing to do, already some data available.\n            return Ok(true);\n        }\n\n        let read = self.source.read(&mut self.buffer[..])?;\n        self.buffer.reset_available(read as u64 * 8);\n\n        Ok(read > 0)\n    }\n}\n\nimpl<R: io::Read> io::Read for Fr32Reader<R> {\n    fn read(&mut self, target: &mut [u8]) -> io::Result<usize> {\n        if self.done || target.is_empty() {\n            return Ok(0);\n        }\n\n        let mut read = 0;\n        while read < target.len() {\n            let current_read = self.read_bytes(&mut target[read..])?;\n            read += current_read;\n\n            if current_read == 0 {\n                self.done = true;\n                break;\n            }\n        }\n\n        Ok(read)\n    }\n}\n\nfn set_bit(x: &mut u8, bit: usize) {\n    *x |= 1 << bit\n}\n\nuse std::ops::{Deref, DerefMut};\n\n#[derive(Debug, Default, Clone, Copy)]\nstruct Buffer {\n    data: u64,\n    /// Bits already consumed.\n    pos: u64,\n    /// Bits available.\n    avail: u64,\n}\n\nimpl Deref for Buffer {\n    type Target = [u8; 8];\n\n    fn deref(&self) -> &Self::Target {\n        unsafe { &*(&self.data as *const u64 as *const [u8; 8]) }\n    }\n}\n\nimpl DerefMut for Buffer {\n    fn deref_mut(&mut self) -> &mut Self::Target {\n        unsafe { &mut *(&mut self.data as *mut u64 as *mut [u8; 8]) }\n    }\n}\n\nimpl Buffer {\n    /// How many bits are available to read.\n    #[inline]\n    pub fn available(&self) -> u64 {\n        self.avail - self.pos\n    }\n\n    pub fn reset_available(&mut self, bits: u64) {\n        self.pos = 0;\n        self.avail = bits;\n    }\n\n    /// Read a single bit at the current position.\n    pub fn read_bit(&mut self) -> bool {\n        let res = self.data & (1 << self.pos) != 0;\n        debug_assert!(self.available() >= 1);\n        self.pos += 1;\n        res\n    }\n\n    #[cfg(target_endian = \"little\")]\n    pub fn read_u8_range(&mut self, len: u64) -> u8 {\n        use bitintr::Bextr;\n        debug_assert!(self.available() >= len);\n        let res = self.data.bextr(self.pos, len) as u8;\n        self.pos += len;\n        res\n    }\n\n    #[cfg(target_endian = \"little\")]\n    pub fn read_u8(&mut self) -> u8 {\n        use bitintr::Bextr;\n        debug_assert!(self.available() >= 8);\n        let res = self.data.bextr(self.pos, 8) as u8;\n        self.pos += 8;\n        res\n    }\n\n    #[cfg(target_endian = \"little\")]\n    pub fn read_u16(&mut self) -> u16 {\n        debug_assert!(self.available() >= 16);\n\n        use bitintr::Bextr;\n        let res = self.data.bextr(self.pos, 16) as u16;\n        self.pos += 16;\n        res\n    }\n\n    #[cfg(target_endian = \"little\")]\n    pub fn read_u16_into(&mut self, target: &mut [u8]) {\n        assert!(target.len() >= 2);\n\n        let value = self.read_u16().to_le_bytes();\n        target[0] = value[0];\n        target[1] = value[1];\n    }\n\n    #[cfg(target_endian = \"little\")]\n    pub fn read_u32(&mut self) -> u32 {\n        debug_assert!(self.available() >= 32);\n\n        use bitintr::Bextr;\n        let res = self.data.bextr(self.pos, 32) as u32;\n        self.pos += 32;\n        res\n    }\n\n    #[cfg(target_endian = \"little\")]\n    pub fn read_u32_into(&mut self, target: &mut [u8]) {\n        assert!(target.len() >= 4);\n        let value = self.read_u32().to_le_bytes();\n        target[0] = value[0];\n        target[1] = value[1];\n        target[2] = value[2];\n        target[3] = value[3];\n    }\n\n    pub fn read_u64(&mut self) -> u64 {\n        debug_assert!(self.available() >= 64);\n\n        self.pos += 64;\n        self.data\n    }\n\n    #[cfg(target_endian = \"little\")]\n    pub fn read_u64_into(&mut self, target: &mut [u8]) {\n        assert!(target.len() >= 8);\n        let value = self.read_u64().to_le_bytes();\n        target[0] = value[0];\n        target[1] = value[1];\n        target[2] = value[2];\n        target[3] = value[3];\n        target[4] = value[4];\n        target[5] = value[5];\n        target[6] = value[6];\n        target[7] = value[7];\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use pretty_assertions::assert_eq;\n    use std::io::Read;\n\n    #[test]\n    fn test_buffer_read_bit() {\n        let mut buffer = Buffer::default();\n        let val = 12345u64.to_le_bytes();\n        buffer.copy_from_slice(&val[..]);\n        buffer.reset_available(64);\n\n        for i in 0..8 {\n            assert_eq!(buffer.read_bit(), 0 != val[0] & (1 << i));\n        }\n    }\n\n    #[test]\n    fn test_buffer_read_u8() {\n        let mut buffer = Buffer::default();\n        let val = 12345u64.to_le_bytes();\n        buffer.copy_from_slice(&val[..]);\n        buffer.reset_available(64);\n\n        for i in 0..8 {\n            let read = buffer.read_u8();\n            assert_eq!(read, val[i], \"failed to read byte {}\", i);\n        }\n    }\n\n    #[test]\n    fn test_buffer_read_u16() {\n        let mut buffer = Buffer::default();\n        let val = 12345u64.to_le_bytes();\n        buffer.copy_from_slice(&val[..]);\n        buffer.reset_available(64);\n\n        for val in val.chunks(2) {\n            let read = buffer.read_u16();\n            assert_eq!(read, u16::from_le_bytes([val[0], val[1]]));\n        }\n    }\n\n    #[test]\n    fn test_buffer_read_u32() {\n        let mut buffer = Buffer::default();\n        let val = 12345u64.to_le_bytes();\n        buffer.copy_from_slice(&val[..]);\n        buffer.reset_available(64);\n\n        for val in val.chunks(4) {\n            let read = buffer.read_u32();\n            assert_eq!(read, u32::from_le_bytes([val[0], val[1], val[2], val[3]]));\n        }\n    }\n\n    #[test]\n    fn test_buffer_read_u64() {\n        let mut buffer = Buffer::default();\n        let val = 12345u64;\n        buffer.copy_from_slice(&val.to_le_bytes()[..]);\n        buffer.reset_available(64);\n\n        let read = buffer.read_u64();\n        assert_eq!(read, val);\n    }\n\n    #[test]\n    fn test_simple_short() {\n        // Source is shorter than 1 padding cycle.\n        let data = vec![3u8; 30];\n        let mut reader = Fr32Reader::new(io::Cursor::new(&data));\n        let mut padded = Vec::new();\n        reader.read_to_end(&mut padded).unwrap();\n        assert_eq!(&data[..], &padded[..]);\n\n        assert_eq!(padded.into_boxed_slice(), bit_vec_padding(data));\n    }\n\n    #[test]\n    fn test_simple_single() {\n        let data = vec![255u8; 32];\n        let mut padded = Vec::new();\n        let mut reader = Fr32Reader::new(io::Cursor::new(&data));\n        reader.read_to_end(&mut padded).unwrap();\n\n        assert_eq!(&padded[0..31], &data[0..31]);\n        assert_eq!(padded[31], 0b0011_1111);\n        assert_eq!(padded[32], 0b0000_0011);\n        assert_eq!(padded.len(), 33);\n\n        assert_eq!(padded.into_boxed_slice(), bit_vec_padding(data));\n    }\n\n    #[test]\n    fn test_simple_127() {\n        let data = vec![255u8; 127];\n        let mut padded = Vec::new();\n        let mut reader = Fr32Reader::new(io::Cursor::new(&data));\n        reader.read_to_end(&mut padded).unwrap();\n\n        assert_eq!(&padded[0..31], &data[0..31]);\n        assert_eq!(padded[31], 0b0011_1111);\n        assert_eq!(padded[32], 0b1111_1111);\n\n        assert_eq!(padded.len(), 128);\n\n        assert_eq!(padded.into_boxed_slice(), bit_vec_padding(data));\n    }\n\n    #[test]\n    fn test_chained_byte_source() {\n        let random_bytes: Vec<u8> = (0..127).map(|_| rand::random::<u8>()).collect();\n\n        // read 127 bytes from a non-chained source\n        let output_x = {\n            let input_x = io::Cursor::new(random_bytes.clone());\n\n            let mut reader = Fr32Reader::new(input_x);\n            let mut buf_x = Vec::new();\n            reader.read_to_end(&mut buf_x).expect(\"could not seek\");\n            buf_x\n        };\n\n        for n in 1..127 {\n            let random_bytes = random_bytes.clone();\n\n            // read 127 bytes from a n-byte buffer and then the rest\n            let output_y = {\n                let input_y =\n                    io::Cursor::new(random_bytes.iter().take(n).cloned().collect::<Vec<u8>>())\n                        .chain(io::Cursor::new(\n                            random_bytes.iter().skip(n).cloned().collect::<Vec<u8>>(),\n                        ));\n\n                let mut reader = Fr32Reader::new(input_y);\n                let mut buf_y = Vec::new();\n                reader.read_to_end(&mut buf_y).expect(\"could not seek\");\n\n                buf_y\n            };\n\n            assert_eq!(&output_x, &output_y, \"should have written same bytes\");\n            assert_eq!(\n                output_x.clone().into_boxed_slice(),\n                bit_vec_padding(random_bytes)\n            );\n        }\n    }\n\n    #[test]\n    fn test_full() {\n        let data = vec![255u8; 127];\n\n        let mut buf = Vec::new();\n        let mut reader = Fr32Reader::new(io::Cursor::new(&data));\n        reader.read_to_end(&mut buf).unwrap();\n\n        assert_eq!(buf.clone().into_boxed_slice(), bit_vec_padding(data));\n        validate_fr32(&buf);\n    }\n\n    #[test]\n    #[ignore]\n    fn test_long() {\n        use rand::RngCore;\n\n        let mut rng = rand::thread_rng();\n        for i in 1..100 {\n            for j in 0..50 {\n                let mut data = vec![0u8; i * j];\n                rng.fill_bytes(&mut data);\n\n                let mut buf = Vec::new();\n                let mut reader = Fr32Reader::new(io::Cursor::new(&data));\n                reader.read_to_end(&mut buf).unwrap();\n\n                assert_eq!(buf.clone().into_boxed_slice(), bit_vec_padding(data));\n            }\n        }\n    }\n\n    // Simple (and slow) padder implementation using `BitVec`.\n    // It is technically not quite right to use `BitVec` to test this, since at\n    // the moment that function still uses\n    // it for some corner cases, but since largely this implementation\n    // has been replaced it seems reasonable.\n    fn bit_vec_padding(raw_data: Vec<u8>) -> Box<[u8]> {\n        use bitvec::{order::Lsb0 as LittleEndian, vec::BitVec};\n        use itertools::Itertools;\n\n        let mut padded_data: BitVec<LittleEndian, u8> = BitVec::new();\n        let raw_data: BitVec<LittleEndian, u8> = BitVec::from(raw_data);\n\n        for data_unit in raw_data.into_iter().chunks(DATA_BITS as usize).into_iter() {\n            padded_data.extend(data_unit.into_iter());\n\n            // To avoid reconverting the iterator, we deduce if we need the padding\n            // by the length of `padded_data`: a full data unit would not leave the\n            // padded layout aligned (it would leave it unaligned by just `pad_bits()`).\n            if padded_data.len() % 8 != 0 {\n                for _ in 0..(TARGET_BITS - DATA_BITS) {\n                    padded_data.push(false);\n                }\n            }\n        }\n\n        padded_data.into_boxed_slice()\n    }\n\n    fn validate_fr32(bytes: &[u8]) {\n        let chunks = (bytes.len() as f64 / 32 as f64).ceil() as usize;\n        for (i, chunk) in bytes.chunks(32).enumerate() {\n            let _ = storage_proofs::fr32::bytes_into_fr(chunk).expect(&format!(\n                \"chunk {}/{} cannot be converted to valid Fr: {:?}\",\n                i + 1,\n                chunks,\n                chunk\n            ));\n        }\n    }\n\n    // raw data stream of increasing values and specific\n    // outliers (0xFF, 9), check the content of the raw data encoded (with\n    // different alignments) in the padded layouts.\n    #[test]\n    fn test_exotic() {\n        let mut source = vec![\n            1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,\n            25, 26, 27, 28, 29, 30, 31, 0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,\n            16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0xff, 9, 9,\n        ];\n        source.extend(vec![9, 0xff]);\n\n        let mut buf = Vec::new();\n        let mut reader = Fr32Reader::new(io::Cursor::new(&source));\n        reader.read_to_end(&mut buf).unwrap();\n\n        for i in 0..31 {\n            assert_eq!(buf[i], i as u8 + 1);\n        }\n        assert_eq!(buf[31], 63); // Six least significant bits of 0xff\n        assert_eq!(buf[32], (1 << 2) | 0b11); // 7\n        for i in 33..63 {\n            assert_eq!(buf[i], (i as u8 - 31) << 2);\n        }\n        assert_eq!(buf[63], (0x0f << 2)); // 4-bits of ones, half of 0xff, shifted by two, followed by two bits of 0-padding.\n        assert_eq!(buf[64], 0x0f | 9 << 4); // The last half of 0xff, 'followed' by 9.\n        assert_eq!(buf[65], 9 << 4); // A shifted 9.\n        assert_eq!(buf[66], 9 << 4); // Another.\n        assert_eq!(buf[67], 0xf0); // The final 0xff is split into two bytes. Here is the first half.\n        assert_eq!(buf[68], 0x0f); // And here is the second.\n\n        assert_eq!(buf.into_boxed_slice(), bit_vec_padding(source));\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/lib.rs",
    "content": "#![deny(clippy::all, clippy::perf, clippy::correctness, rust_2018_idioms)]\n#![warn(clippy::unwrap_used)]\n#![warn(clippy::upper_case_acronyms)]\n#![warn(clippy::unnecessary_wraps)]\n\npub mod constants;\npub mod param;\npub mod parameters;\npub mod pieces;\npub mod types;\n\nmod api;\nmod caches;\nmod commitment_reader;\n\npub use api::*;\npub use commitment_reader::*;\npub use constants::*;\npub use types::*;\n"
  },
  {
    "path": "filecoin-proofs/src/param.rs",
    "content": "use std::collections::BTreeMap;\nuse std::ffi::OsStr;\nuse std::fs::File;\nuse std::io;\nuse std::path::{Path, PathBuf};\n\nuse anyhow::{Context, Result};\nuse blake2b_simd::State as Blake2b;\nuse storage_proofs_core::parameter_cache::{\n    parameter_cache_dir, CacheEntryMetadata, PARAMETER_METADATA_EXT,\n};\n\n// Produces an absolute path to a file within the cache\npub fn get_full_path_for_file_within_cache(filename: &str) -> PathBuf {\n    let mut path = parameter_cache_dir();\n    path.push(filename);\n    path\n}\n\n// Produces a BLAKE2b checksum for a file within the cache\npub fn get_digest_for_file_within_cache(filename: &str) -> Result<String> {\n    let path = get_full_path_for_file_within_cache(filename);\n    let mut file = File::open(&path).with_context(|| format!(\"could not open path={:?}\", path))?;\n    let mut hasher = Blake2b::new();\n\n    io::copy(&mut file, &mut hasher)?;\n\n    Ok(hasher.finalize().to_hex()[..32].into())\n}\n\n// Predicate which matches the provided extension against the given filename\npub fn has_extension<S: AsRef<str>, P: AsRef<Path>>(filename: P, ext: S) -> bool {\n    filename\n        .as_ref()\n        .extension()\n        .and_then(OsStr::to_str)\n        .map(|s| s == ext.as_ref())\n        .unwrap_or(false)\n}\n\n// Adds a file extension to the given filename\npub fn add_extension(filename: &str, ext: &str) -> String {\n    format!(\"{}.{}\", filename, ext)\n}\n\n/// Builds a map from a parameter_id (file in cache) to metadata.\npub fn parameter_id_to_metadata_map(\n    parameter_ids: &[String],\n) -> Result<BTreeMap<String, CacheEntryMetadata>> {\n    let mut map: BTreeMap<String, CacheEntryMetadata> = Default::default();\n\n    for parameter_id in parameter_ids {\n        let filename = add_extension(parameter_id, PARAMETER_METADATA_EXT);\n        let file_path = get_full_path_for_file_within_cache(&filename);\n        let file = File::open(&file_path)\n            .with_context(|| format!(\"could not open path={:?}\", file_path))?;\n\n        let meta = serde_json::from_reader(file)?;\n\n        map.insert(parameter_id.to_string(), meta);\n    }\n\n    Ok(map)\n}\n\n/// Maps the name of a file in the cache to its parameter id. For example,\n/// ABCDEF.vk corresponds to parameter id ABCDEF.\npub fn filename_to_parameter_id<'a, P: AsRef<Path> + 'a>(filename: P) -> Option<String> {\n    filename\n        .as_ref()\n        .file_stem()\n        .and_then(OsStr::to_str)\n        .map(ToString::to_string)\n}\n"
  },
  {
    "path": "filecoin-proofs/src/parameters.rs",
    "content": "use anyhow::{ensure, Result};\nuse storage_proofs_core::{api_version::ApiVersion, proof::ProofScheme};\nuse storage_proofs_porep::stacked::{self, LayerChallenges, StackedDrg};\nuse storage_proofs_post::fallback::{self, FallbackPoSt};\n\nuse crate::{\n    constants::{DefaultPieceHasher, DRG_DEGREE, EXP_DEGREE, LAYERS, POREP_MINIMUM_CHALLENGES},\n    types::{MerkleTreeTrait, PaddedBytesAmount, PoStConfig},\n};\n\ntype WinningPostSetupParams = fallback::SetupParams;\npub type WinningPostPublicParams = fallback::PublicParams;\n\ntype WindowPostSetupParams = fallback::SetupParams;\npub type WindowPostPublicParams = fallback::PublicParams;\n\npub fn public_params<Tree: 'static + MerkleTreeTrait>(\n    sector_bytes: PaddedBytesAmount,\n    partitions: usize,\n    porep_id: [u8; 32],\n    api_version: ApiVersion,\n) -> Result<stacked::PublicParams<Tree>> {\n    StackedDrg::<Tree, DefaultPieceHasher>::setup(&setup_params(\n        sector_bytes,\n        partitions,\n        porep_id,\n        api_version,\n    )?)\n}\n\npub fn winning_post_public_params<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n) -> Result<WinningPostPublicParams> {\n    FallbackPoSt::<Tree>::setup(&winning_post_setup_params(&post_config)?)\n}\n\npub fn winning_post_setup_params(post_config: &PoStConfig) -> Result<WinningPostSetupParams> {\n    ensure!(\n        post_config.challenge_count % post_config.sector_count == 0,\n        \"sector count must divide challenge count\"\n    );\n\n    let param_sector_count = post_config.challenge_count / post_config.sector_count;\n    let param_challenge_count = post_config.challenge_count / param_sector_count;\n\n    ensure!(\n        param_sector_count * param_challenge_count == post_config.challenge_count,\n        \"invald parameters calculated {} * {} != {}\",\n        param_sector_count,\n        param_challenge_count,\n        post_config.challenge_count\n    );\n\n    Ok(fallback::SetupParams {\n        sector_size: post_config.padded_sector_size().into(),\n        challenge_count: param_challenge_count,\n        sector_count: param_sector_count,\n        api_version: post_config.api_version,\n    })\n}\n\npub fn window_post_public_params<Tree: 'static + MerkleTreeTrait>(\n    post_config: &PoStConfig,\n) -> Result<WindowPostPublicParams> {\n    FallbackPoSt::<Tree>::setup(&window_post_setup_params(&post_config))\n}\n\npub fn window_post_setup_params(post_config: &PoStConfig) -> WindowPostSetupParams {\n    fallback::SetupParams {\n        sector_size: post_config.padded_sector_size().into(),\n        challenge_count: post_config.challenge_count,\n        sector_count: post_config.sector_count,\n        api_version: post_config.api_version,\n    }\n}\n\npub fn setup_params(\n    sector_bytes: PaddedBytesAmount,\n    partitions: usize,\n    porep_id: [u8; 32],\n    api_version: ApiVersion,\n) -> Result<stacked::SetupParams> {\n    let layer_challenges = select_challenges(\n        partitions,\n        *POREP_MINIMUM_CHALLENGES\n            .read()\n            .expect(\"POREP_MINIMUM_CHALLENGES poisoned\")\n            .get(&u64::from(sector_bytes))\n            .expect(\"unknown sector size\") as usize,\n        *LAYERS\n            .read()\n            .expect(\"LAYERS poisoned\")\n            .get(&u64::from(sector_bytes))\n            .expect(\"unknown sector size\"),\n    )?;\n    let sector_bytes = u64::from(sector_bytes);\n\n    ensure!(\n        sector_bytes % 32 == 0,\n        \"sector_bytes ({}) must be a multiple of 32\",\n        sector_bytes,\n    );\n\n    let nodes = (sector_bytes / 32) as usize;\n    let degree = DRG_DEGREE;\n    let expansion_degree = EXP_DEGREE;\n\n    Ok(stacked::SetupParams {\n        nodes,\n        degree,\n        expansion_degree,\n        porep_id,\n        layer_challenges,\n        api_version,\n    })\n}\n\nfn select_challenges(\n    partitions: usize,\n    minimum_total_challenges: usize,\n    layers: usize,\n) -> Result<LayerChallenges> {\n    let mut count = 1;\n    let mut guess = LayerChallenges::new(layers, count);\n    while partitions * guess.challenges_count_all() < minimum_total_challenges {\n        count += 1;\n        guess = LayerChallenges::new(layers, count);\n    }\n    Ok(guess)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use crate::{DefaultOctLCTree, PoRepProofPartitions, PoStType};\n\n    #[test]\n    fn partition_layer_challenges_test() {\n        let f = |partitions| {\n            select_challenges(partitions, 12, 11)\n                .expect(\"never fails\")\n                .challenges_count_all()\n        };\n        // Update to ensure all supported PoRepProofPartitions options are represented here.\n        assert_eq!(6, f(usize::from(PoRepProofPartitions(2))));\n\n        assert_eq!(12, f(1));\n        assert_eq!(6, f(2));\n        assert_eq!(3, f(4));\n    }\n\n    #[test]\n    fn test_winning_post_params() {\n        let config = PoStConfig {\n            typ: PoStType::Winning,\n            priority: false,\n            challenge_count: 66,\n            sector_count: 1,\n            sector_size: 2048u64.into(),\n            api_version: ApiVersion::V1_0_0,\n        };\n\n        let params =\n            winning_post_public_params::<DefaultOctLCTree>(&config).expect(\"failed to get params\");\n        assert_eq!(params.sector_count, 66);\n        assert_eq!(params.challenge_count, 1);\n        assert_eq!(params.sector_size, 2048);\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/pieces.rs",
    "content": "use std::collections::HashMap;\nuse std::io::Read;\nuse std::io::{self, Cursor};\nuse std::iter::Iterator;\nuse std::sync::Mutex;\n\nuse anyhow::{ensure, Context, Result};\nuse lazy_static::lazy_static;\nuse log::info;\nuse storage_proofs::hasher::{HashFunction, Hasher};\nuse storage_proofs::util::NODE_SIZE;\n\nuse crate::constants::{\n    DefaultPieceHasher,\n    MINIMUM_RESERVED_BYTES_FOR_PIECE_IN_FULLY_ALIGNED_SECTOR as MINIMUM_PIECE_SIZE,\n};\nuse crate::types::{\n    Commitment, PaddedBytesAmount, PieceInfo, SectorSize, UnpaddedByteIndex, UnpaddedBytesAmount,\n};\n\n/// Verify that the provided `piece_infos` and `comm_d` match.\npub fn verify_pieces(\n    comm_d: &Commitment,\n    piece_infos: &[PieceInfo],\n    sector_size: SectorSize,\n) -> Result<bool> {\n    let comm_d_calculated = compute_comm_d(sector_size, piece_infos)?;\n\n    Ok(&comm_d_calculated == comm_d)\n}\n\nlazy_static! {\n    static ref COMMITMENTS: Mutex<HashMap<SectorSize, Commitment>> = Mutex::new(HashMap::new());\n}\nuse crate::commitment_reader::CommitmentReader;\nuse crate::fr32_reader::Fr32Reader;\n\n#[derive(Debug, Clone)]\nstruct EmptySource {\n    size: usize,\n}\n\nimpl EmptySource {\n    pub fn new(size: usize) -> Self {\n        EmptySource { size }\n    }\n}\n\nimpl Read for EmptySource {\n    fn read(&mut self, target: &mut [u8]) -> io::Result<usize> {\n        let to_read = std::cmp::min(self.size, target.len());\n        self.size -= to_read;\n        for val in target {\n            *val = 0;\n        }\n\n        Ok(to_read)\n    }\n}\n\nfn empty_comm_d(sector_size: SectorSize) -> Commitment {\n    let map = &mut *COMMITMENTS.lock().unwrap();\n\n    *map.entry(sector_size).or_insert_with(|| {\n        let size: UnpaddedBytesAmount = sector_size.into();\n        let fr32_reader = Fr32Reader::new(EmptySource::new(size.into()));\n        let mut commitment_reader = CommitmentReader::new(fr32_reader);\n        io::copy(&mut commitment_reader, &mut io::sink()).unwrap();\n\n        let mut comm = [0u8; 32];\n        comm.copy_from_slice(\n            commitment_reader\n                .finish()\n                .expect(\"failed to create commitment\")\n                .as_ref(),\n        );\n        comm\n    })\n}\n\npub fn compute_comm_d(sector_size: SectorSize, piece_infos: &[PieceInfo]) -> Result<Commitment> {\n    info!(\"verifying {} pieces\", piece_infos.len());\n    if piece_infos.is_empty() {\n        return Ok(empty_comm_d(sector_size));\n    }\n\n    let unpadded_sector: UnpaddedBytesAmount = sector_size.into();\n\n    ensure!(\n        piece_infos.len() as u64 <= u64::from(unpadded_sector) / MINIMUM_PIECE_SIZE,\n        \"Too many pieces\"\n    );\n\n    // make sure the piece sizes are at most a sector size large\n    let piece_size: u64 = piece_infos\n        .iter()\n        .map(|info| u64::from(PaddedBytesAmount::from(info.size)))\n        .sum();\n\n    ensure!(\n        piece_size <= u64::from(sector_size),\n        \"Piece is larger than sector.\"\n    );\n\n    let mut stack = Stack::new();\n\n    let first = piece_infos.first().unwrap().clone();\n    ensure!(\n        u64::from(PaddedBytesAmount::from(first.size)).is_power_of_two(),\n        \"Piece size ({:?}) must be a power of 2.\",\n        PaddedBytesAmount::from(first.size)\n    );\n    stack.shift(first);\n\n    for piece_info in piece_infos.iter().skip(1) {\n        ensure!(\n            u64::from(PaddedBytesAmount::from(piece_info.size)).is_power_of_two(),\n            \"Piece size ({:?}) must be a power of 2.\",\n            PaddedBytesAmount::from(piece_info.size)\n        );\n\n        while stack.peek().size < piece_info.size {\n            stack.shift_reduce(zero_padding(stack.peek().size)?)?\n        }\n\n        stack.shift_reduce(piece_info.clone())?;\n    }\n\n    while stack.len() > 1 {\n        stack.shift_reduce(zero_padding(stack.peek().size)?)?;\n    }\n\n    ensure!(stack.len() == 1, \"Stack size ({}) must be 1.\", stack.len());\n\n    let comm_d_calculated = stack.pop()?.commitment;\n\n    Ok(comm_d_calculated)\n}\n\n/// Stack used for piece reduction.\nstruct Stack(Vec<PieceInfo>);\n\nimpl Stack {\n    /// Creates a new stack.\n    pub fn new() -> Self {\n        Stack(Vec::new())\n    }\n\n    /// Pushes a single element onto the stack.\n    pub fn shift(&mut self, el: PieceInfo) {\n        self.0.push(el)\n    }\n\n    /// Look at the last element of the stack.\n    pub fn peek(&self) -> &PieceInfo {\n        &self.0[self.0.len() - 1]\n    }\n\n    /// Look at the second to last element of the stack.\n    pub fn peek2(&self) -> &PieceInfo {\n        &self.0[self.0.len() - 2]\n    }\n\n    /// Pop the last element of the stack.\n    pub fn pop(&mut self) -> Result<PieceInfo> {\n        self.0.pop().context(\"empty stack popped\")\n    }\n\n    pub fn reduce1(&mut self) -> Result<bool> {\n        if self.len() < 2 {\n            return Ok(false);\n        }\n\n        if self.peek().size == self.peek2().size {\n            let right = self.pop()?;\n            let left = self.pop()?;\n            let joined = join_piece_infos(left, right)?;\n            self.shift(joined);\n            return Ok(true);\n        }\n\n        Ok(false)\n    }\n\n    pub fn reduce(&mut self) -> Result<()> {\n        while self.reduce1()? {}\n        Ok(())\n    }\n\n    pub fn shift_reduce(&mut self, piece: PieceInfo) -> Result<()> {\n        self.shift(piece);\n        self.reduce()\n    }\n\n    pub fn len(&self) -> usize {\n        self.0.len()\n    }\n}\n\n/// Create a padding `PieceInfo` of size `size`.\nfn zero_padding(size: UnpaddedBytesAmount) -> Result<PieceInfo> {\n    let padded_size: PaddedBytesAmount = size.into();\n    let mut commitment = [0u8; 32];\n\n    // TODO: cache common piece hashes\n    let mut hashed_size = 64;\n    let h1 = piece_hash(&commitment, &commitment);\n    commitment.copy_from_slice(h1.as_ref());\n\n    while hashed_size < u64::from(padded_size) {\n        let h = piece_hash(&commitment, &commitment);\n        commitment.copy_from_slice(h.as_ref());\n        hashed_size *= 2;\n    }\n\n    ensure!(\n        hashed_size == u64::from(padded_size),\n        \"Hashed size must equal padded size\"\n    );\n\n    PieceInfo::new(commitment, size)\n}\n\n/// Join two equally sized `PieceInfo`s together, by hashing them and adding their sizes.\nfn join_piece_infos(mut left: PieceInfo, right: PieceInfo) -> Result<PieceInfo> {\n    ensure!(\n        left.size == right.size,\n        \"Piece sizes must be equal (left: {:?}, right: {:?})\",\n        left.size,\n        right.size\n    );\n    let h = piece_hash(&left.commitment, &right.commitment);\n\n    left.commitment.copy_from_slice(AsRef::<[u8]>::as_ref(&h));\n    left.size = left.size + right.size;\n    Ok(left)\n}\n\npub(crate) fn piece_hash(a: &[u8], b: &[u8]) -> <DefaultPieceHasher as Hasher>::Domain {\n    let mut buf = [0u8; NODE_SIZE * 2];\n    buf[..NODE_SIZE].copy_from_slice(a);\n    buf[NODE_SIZE..].copy_from_slice(b);\n    <DefaultPieceHasher as Hasher>::Function::hash(&buf)\n}\n\n#[derive(Debug, Clone)]\npub struct PieceAlignment {\n    pub left_bytes: UnpaddedBytesAmount,\n    pub right_bytes: UnpaddedBytesAmount,\n}\n\nimpl PieceAlignment {\n    pub fn sum(&self, piece_size: UnpaddedBytesAmount) -> UnpaddedBytesAmount {\n        self.left_bytes + piece_size + self.right_bytes\n    }\n}\n\n/// Given a list of pieces, sum the number of bytes taken by those pieces in that order.\npub fn sum_piece_bytes_with_alignment(pieces: &[UnpaddedBytesAmount]) -> UnpaddedBytesAmount {\n    pieces\n        .iter()\n        .fold(UnpaddedBytesAmount(0), |acc, piece_bytes| {\n            acc + get_piece_alignment(acc, *piece_bytes).sum(*piece_bytes)\n        })\n}\n\n/// Given a list of pieces, find the byte where a given piece does or would start.\npub fn get_piece_start_byte(\n    pieces: &[UnpaddedBytesAmount],\n    piece_bytes: UnpaddedBytesAmount,\n) -> UnpaddedByteIndex {\n    // sum up all the bytes taken by the ordered pieces\n    let last_byte = sum_piece_bytes_with_alignment(&pieces);\n    let alignment = get_piece_alignment(last_byte, piece_bytes);\n\n    // add only the left padding of the target piece to give the start of that piece's data\n    UnpaddedByteIndex::from(last_byte + alignment.left_bytes)\n}\n\n/// Given a number of bytes already written to a staged sector (ignoring bit padding) and a number\n/// of bytes (before bit padding) to be added, return the alignment required to create a piece where\n/// len(piece) == len(sector size)/(2^n) and sufficient left padding to ensure simple merkle proof\n/// construction.\npub fn get_piece_alignment(\n    written_bytes: UnpaddedBytesAmount,\n    piece_bytes: UnpaddedBytesAmount,\n) -> PieceAlignment {\n    let mut piece_bytes_needed = MINIMUM_PIECE_SIZE as u64;\n\n    // Calculate the next power of two multiple that will fully contain the piece's data.\n    // This is required to ensure a clean piece merkle root, without being affected by\n    // preceding or following pieces.\n    while piece_bytes_needed < u64::from(piece_bytes) {\n        piece_bytes_needed *= 2;\n    }\n\n    // Calculate the bytes being affected from the left of the piece by the previous piece.\n    let encroaching = u64::from(written_bytes) % piece_bytes_needed;\n\n    // Calculate the bytes to push from the left to ensure a clean piece merkle root.\n    let left_bytes = if encroaching > 0 {\n        piece_bytes_needed - encroaching\n    } else {\n        0\n    };\n\n    let right_bytes = piece_bytes_needed - u64::from(piece_bytes);\n\n    PieceAlignment {\n        left_bytes: UnpaddedBytesAmount(left_bytes),\n        right_bytes: UnpaddedBytesAmount(right_bytes),\n    }\n}\n\n/// Wraps a Readable source with null bytes on either end according to a provided PieceAlignment.\nfn with_alignment(source: impl Read, piece_alignment: PieceAlignment) -> impl Read {\n    let PieceAlignment {\n        left_bytes,\n        right_bytes,\n    } = piece_alignment;\n\n    let left_padding = Cursor::new(vec![0; left_bytes.into()]);\n    let right_padding = Cursor::new(vec![0; right_bytes.into()]);\n\n    left_padding.chain(source).chain(right_padding)\n}\n\n/// Given an enumeration of pieces in a staged sector and a piece to be added (represented by a Read\n/// and corresponding length, in UnpaddedBytesAmount) to the staged sector, produce a new Read and\n/// UnpaddedBytesAmount pair which includes the appropriate amount of alignment bytes for the piece\n/// to be written to the target staged sector.\npub fn get_aligned_source<T: Read>(\n    source: T,\n    pieces: &[UnpaddedBytesAmount],\n    piece_bytes: UnpaddedBytesAmount,\n) -> (UnpaddedBytesAmount, PieceAlignment, impl Read) {\n    let written_bytes = sum_piece_bytes_with_alignment(pieces);\n    let piece_alignment = get_piece_alignment(written_bytes, piece_bytes);\n    let expected_num_bytes_written =\n        piece_alignment.left_bytes + piece_bytes + piece_alignment.right_bytes;\n\n    (\n        expected_num_bytes_written,\n        piece_alignment.clone(),\n        with_alignment(source, piece_alignment),\n    )\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::api::util::commitment_from_fr;\n    use crate::constants::{DRG_DEGREE, EXP_DEGREE};\n    use crate::types::DataTree;\n\n    use paired::bls12_381::Fr;\n    use rand::{Rng, RngCore, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs::drgraph::{new_seed, Graph};\n    use storage_proofs::merkle::create_base_merkle_tree;\n    use storage_proofs::porep::stacked::StackedBucketGraph;\n\n    #[test]\n    fn test_empty_source() {\n        let mut source = EmptySource::new(12);\n        let mut target = Vec::new();\n        source.read_to_end(&mut target).unwrap();\n        assert_eq!(target, vec![0u8; 12]);\n    }\n\n    #[test]\n    fn test_compute_comm_d_empty() {\n        let comm_d = compute_comm_d(SectorSize(2048), &[]).unwrap();\n        assert_eq!(\n            comm_d,\n            [\n                252, 126, 146, 130, 150, 229, 22, 250, 173, 233, 134, 178, 143, 146, 212, 74, 79,\n                36, 185, 53, 72, 82, 35, 55, 106, 121, 144, 39, 188, 24, 248, 51\n            ]\n        );\n\n        let comm_d = compute_comm_d(SectorSize(128), &[]).unwrap();\n        assert_eq!(\n            hex::encode(&comm_d),\n            \"3731bb99ac689f66eef5973e4a94da188f4ddcae580724fc6f3fd60dfd488333\",\n        );\n    }\n\n    #[test]\n    fn test_get_piece_alignment() {\n        let table = vec![\n            (0, 0, (0, 127)),\n            (0, 127, (0, 0)),\n            (0, 254, (0, 0)),\n            (0, 508, (0, 0)),\n            (0, 1016, (0, 0)),\n            (127, 127, (0, 0)),\n            (127, 254, (127, 0)),\n            (127, 508, (381, 0)),\n            (100, 100, (27, 27)),\n            (200, 200, (54, 54)),\n            (300, 300, (208, 208)),\n        ];\n\n        for (bytes_in_sector, bytes_in_piece, (expected_left_align, expected_right_align)) in\n            table.clone()\n        {\n            let PieceAlignment {\n                left_bytes: UnpaddedBytesAmount(actual_left_align),\n                right_bytes: UnpaddedBytesAmount(actual_right_align),\n            } = get_piece_alignment(\n                UnpaddedBytesAmount(bytes_in_sector),\n                UnpaddedBytesAmount(bytes_in_piece),\n            );\n            assert_eq!(\n                (expected_left_align, expected_right_align),\n                (actual_left_align, actual_right_align)\n            );\n        }\n    }\n\n    #[test]\n    fn test_get_piece_start_byte() {\n        let pieces = [\n            UnpaddedBytesAmount(31),\n            UnpaddedBytesAmount(32),\n            UnpaddedBytesAmount(33),\n        ];\n\n        assert_eq!(\n            get_piece_start_byte(&pieces[..0], pieces[0]),\n            UnpaddedByteIndex(0)\n        );\n        assert_eq!(\n            get_piece_start_byte(&pieces[..1], pieces[1]),\n            UnpaddedByteIndex(127)\n        );\n        assert_eq!(\n            get_piece_start_byte(&pieces[..2], pieces[2]),\n            UnpaddedByteIndex(254)\n        );\n    }\n\n    #[test]\n    fn test_verify_simple_pieces() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        //     g\n        //   /  \\\n        //  e    f\n        // / \\  / \\\n        // a  b c  d\n\n        let (a, b, c, d): ([u8; 32], [u8; 32], [u8; 32], [u8; 32]) = rng.gen();\n\n        let mut e = [0u8; 32];\n        let h = piece_hash(&a, &b);\n        e.copy_from_slice(h.as_ref());\n\n        let mut f = [0u8; 32];\n        let h = piece_hash(&c, &d);\n        f.copy_from_slice(h.as_ref());\n\n        let mut g = [0u8; 32];\n        let h = piece_hash(&e, &f);\n        g.copy_from_slice(h.as_ref());\n        let a = PieceInfo::new(a, UnpaddedBytesAmount(127)).unwrap();\n        let b = PieceInfo::new(b, UnpaddedBytesAmount(127)).unwrap();\n        let c = PieceInfo::new(c, UnpaddedBytesAmount(127)).unwrap();\n        let d = PieceInfo::new(d, UnpaddedBytesAmount(127)).unwrap();\n\n        let e = PieceInfo::new(e, UnpaddedBytesAmount(254)).unwrap();\n        let f = PieceInfo::new(f, UnpaddedBytesAmount(254)).unwrap();\n        let g = PieceInfo::new(g, UnpaddedBytesAmount(508)).unwrap();\n\n        let sector_size = SectorSize(4 * 128);\n        let comm_d = g.commitment;\n\n        // println!(\"e: {:?}\", e);\n        // println!(\"f: {:?}\", f);\n        // println!(\"g: {:?}\", g);\n\n        assert!(\n            verify_pieces(\n                &comm_d,\n                &vec![a.clone(), b.clone(), c.clone(), d.clone()],\n                sector_size\n            )\n            .expect(\"failed to verify\"),\n            \"[a, b, c, d]\"\n        );\n\n        assert!(\n            verify_pieces(&comm_d, &vec![e.clone(), c.clone(), d.clone()], sector_size)\n                .expect(\"failed to verify\"),\n            \"[e, c, d]\"\n        );\n\n        assert!(\n            verify_pieces(&comm_d, &vec![e.clone(), f.clone()], sector_size)\n                .expect(\"failed to verify\"),\n            \"[e, f]\"\n        );\n\n        assert!(\n            verify_pieces(&comm_d, &vec![a.clone(), b.clone(), f.clone()], sector_size)\n                .expect(\"failed to verify\"),\n            \"[a, b, f]\"\n        );\n\n        assert!(\n            verify_pieces(&comm_d, &vec![g], sector_size).expect(\"failed to verify\"),\n            \"[g]\"\n        );\n    }\n\n    #[test]\n    fn test_verify_padded_pieces() {\n        // [\n        //   {(A0 00) (BB BB)} -> A(1) P(1) P(1) P(1) B(4)\n        //   {(CC 00) (00 00)} -> C(2)      P(1) P(1) P(1) P(1) P(1) P(1)\n        // ]\n        // [\n        //   {(DD DD) (DD DD)} -> D(8)\n        //   {(00 00) (00 00)} -> P(1) P(1) P(1) P(1) P(1) P(1) P(1) P(1)\n        // ]\n\n        let sector_size = SectorSize(32 * 128);\n        let pad = zero_padding(UnpaddedBytesAmount(127)).unwrap();\n\n        let pieces = vec![\n            PieceInfo::new([1u8; 32], UnpaddedBytesAmount(1 * 127)).unwrap(),\n            PieceInfo::new([2u8; 32], UnpaddedBytesAmount(4 * 127)).unwrap(),\n            PieceInfo::new([3u8; 32], UnpaddedBytesAmount(2 * 127)).unwrap(),\n            PieceInfo::new([4u8; 32], UnpaddedBytesAmount(8 * 127)).unwrap(),\n        ];\n\n        let padded_pieces = vec![\n            PieceInfo::new([1u8; 32], UnpaddedBytesAmount(1 * 127)).unwrap(),\n            pad.clone(),\n            pad.clone(),\n            pad.clone(),\n            PieceInfo::new([2u8; 32], UnpaddedBytesAmount(4 * 127)).unwrap(),\n            PieceInfo::new([3u8; 32], UnpaddedBytesAmount(2 * 127)).unwrap(),\n            pad.clone(),\n            pad.clone(),\n            pad.clone(),\n            pad.clone(),\n            pad.clone(),\n            pad.clone(),\n            PieceInfo::new([4u8; 32], UnpaddedBytesAmount(8 * 127)).unwrap(),\n            pad.clone(),\n            pad.clone(),\n            pad.clone(),\n            pad.clone(),\n            pad.clone(),\n            pad.clone(),\n            pad.clone(),\n            pad.clone(),\n        ];\n\n        let hash = |a, b| {\n            let hash = piece_hash(a, b);\n            let mut res = [0u8; 32];\n            res.copy_from_slice(hash.as_ref());\n            res\n        };\n\n        let layer1: Vec<[u8; 32]> = vec![\n            hash(&padded_pieces[0].commitment, &padded_pieces[1].commitment), // 2: H(A(1) | P(1))\n            hash(&padded_pieces[2].commitment, &padded_pieces[3].commitment), // 2: H(P(1) | P(1))\n            padded_pieces[4].commitment,                                      // 4: B(4)\n            padded_pieces[5].commitment,                                      // 2: C(2)\n            hash(&padded_pieces[6].commitment, &padded_pieces[7].commitment), // 2: H(P(1) | P(1))\n            hash(&padded_pieces[8].commitment, &padded_pieces[9].commitment), // 2: H(P(1) | P(1))\n            hash(&padded_pieces[10].commitment, &padded_pieces[11].commitment), // 2: H(P(1) | P(1))\n            padded_pieces[12].commitment,                                     // 8: D(8)\n            hash(&padded_pieces[13].commitment, &padded_pieces[14].commitment), // 2: H(P(1) | P(1))\n            hash(&padded_pieces[15].commitment, &padded_pieces[16].commitment), // 2: H(P(1) | P(1))\n            hash(&padded_pieces[17].commitment, &padded_pieces[18].commitment), // 2: H(P(1) | P(1))\n            hash(&padded_pieces[19].commitment, &padded_pieces[20].commitment), // 2: H(P(1) | P(1))\n        ];\n\n        let layer2: Vec<[u8; 32]> = vec![\n            hash(&layer1[0], &layer1[1]),   // 4\n            layer1[2],                      // 4\n            hash(&layer1[3], &layer1[4]),   // 4\n            hash(&layer1[5], &layer1[6]),   // 4\n            layer1[7],                      // 8\n            hash(&layer1[8], &layer1[9]),   // 4\n            hash(&layer1[10], &layer1[11]), // 4\n        ];\n\n        let layer3 = vec![\n            hash(&layer2[0], &layer2[1]), // 8\n            hash(&layer2[2], &layer2[3]), // 8\n            layer2[4],                    // 8\n            hash(&layer2[5], &layer2[6]), // 8\n        ];\n\n        let layer4 = vec![\n            hash(&layer3[0], &layer3[1]), // 16\n            hash(&layer3[2], &layer3[3]), // 16\n        ];\n\n        let comm_d = hash(&layer4[0], &layer4[1]); // 32\n\n        assert!(verify_pieces(&comm_d, &pieces, sector_size).unwrap());\n    }\n\n    #[ignore] // slow test\n    #[test]\n    fn test_verify_random_pieces() -> Result<()> {\n        use crate::pieces::*;\n\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for sector_size in &[\n            SectorSize(4 * 128),\n            SectorSize(32 * 128),\n            SectorSize(1024 * 128),\n            SectorSize(1024 * 8 * 128),\n        ] {\n            println!(\"--- {:?} ---\", sector_size);\n            for i in 0..100 {\n                println!(\" - {} -\", i);\n                let unpadded_sector_size: UnpaddedBytesAmount = sector_size.clone().into();\n                let sector_size = *sector_size;\n                let padded_sector_size: PaddedBytesAmount = sector_size.into();\n\n                let mut piece_sizes = Vec::new();\n                loop {\n                    let sum_piece_sizes: PaddedBytesAmount =\n                        sum_piece_bytes_with_alignment(&piece_sizes).into();\n\n                    if sum_piece_sizes > padded_sector_size {\n                        piece_sizes.pop();\n                        break;\n                    }\n                    if sum_piece_sizes == padded_sector_size {\n                        break;\n                    }\n\n                    'inner: loop {\n                        // pieces must be power of two\n                        let left = u64::from(padded_sector_size) - u64::from(sum_piece_sizes);\n                        let left_power_of_two = prev_power_of_two(left as u32);\n                        let max_exp = (left_power_of_two as f64).log2() as u32;\n\n                        let padded_exp = if max_exp > 7 {\n                            rng.gen_range(\n                                7, // 2**7 == 128,\n                                max_exp,\n                            )\n                        } else {\n                            7\n                        };\n                        let padded_piece_size = 2u64.pow(padded_exp);\n                        let piece_size: UnpaddedBytesAmount =\n                            PaddedBytesAmount(padded_piece_size).into();\n                        piece_sizes.push(piece_size);\n                        let sum: PaddedBytesAmount =\n                            sum_piece_bytes_with_alignment(&piece_sizes).into();\n\n                        if sum > padded_sector_size {\n                            // pieces might be too large after padding, so remove them and try again.\n                            piece_sizes.pop();\n                        } else {\n                            break 'inner;\n                        }\n                    }\n                }\n\n                // println!(\n                //     \"  {:?}\",\n                //     piece_sizes\n                //         .iter()\n                //         .map(|s| u64::from(*s) / 127)\n                //         .collect::<Vec<_>>()\n                // );\n                assert!(sum_piece_bytes_with_alignment(&piece_sizes) <= unpadded_sector_size);\n                assert!(!piece_sizes.is_empty());\n\n                let (comm_d, piece_infos) = build_sector(&piece_sizes, sector_size)?;\n\n                assert!(\n                    verify_pieces(&comm_d, &piece_infos, sector_size)?,\n                    \"invalid pieces\"\n                );\n            }\n        }\n\n        Ok(())\n    }\n\n    fn build_sector(\n        piece_sizes: &[UnpaddedBytesAmount],\n        sector_size: SectorSize,\n    ) -> Result<([u8; 32], Vec<PieceInfo>)> {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n        let graph = StackedBucketGraph::<DefaultPieceHasher>::new_stacked(\n            u64::from(sector_size) as usize / NODE_SIZE,\n            DRG_DEGREE,\n            EXP_DEGREE,\n            new_seed(),\n        )?;\n\n        let mut staged_sector = Vec::with_capacity(u64::from(sector_size) as usize);\n        let mut staged_sector_io = std::io::Cursor::new(&mut staged_sector);\n        let mut piece_infos = Vec::with_capacity(piece_sizes.len());\n\n        for (i, piece_size) in piece_sizes.iter().enumerate() {\n            let piece_size_u = u64::from(*piece_size) as usize;\n            let mut piece_bytes = vec![255u8; piece_size_u];\n            rng.fill_bytes(&mut piece_bytes);\n\n            let mut piece_file = std::io::Cursor::new(&mut piece_bytes);\n\n            let (piece_info, _) = crate::api::add_piece(\n                &mut piece_file,\n                &mut staged_sector_io,\n                *piece_size,\n                &piece_sizes[..i],\n            )?;\n\n            piece_infos.push(piece_info);\n        }\n        assert_eq!(staged_sector.len(), u64::from(sector_size) as usize);\n\n        let data_tree: DataTree =\n            create_base_merkle_tree::<DataTree>(None, graph.size(), &staged_sector).unwrap();\n        let comm_d_root: Fr = data_tree.root().into();\n        let comm_d = commitment_from_fr(comm_d_root);\n\n        Ok((comm_d, piece_infos))\n    }\n\n    fn prev_power_of_two(mut x: u32) -> u32 {\n        x |= x >> 1;\n        x |= x >> 2;\n        x |= x >> 4;\n        x |= x >> 8;\n        x |= x >> 16;\n        x - (x >> 1)\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/serde_big_array.rs",
    "content": "use serde::de::{Deserialize, Deserializer, Error, SeqAccess, Visitor};\nuse serde::ser::{Serialize, SerializeTuple, Serializer};\nuse std::fmt;\nuse std::marker::PhantomData;\n\n// serde doesn't know how to serialize big arrays out of the box\n// see: https://github.com/serde-rs/serde/issues/631\n\npub trait BigArray<'de>: Sized {\n    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>\n    where\n        S: Serializer;\n    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>\n    where\n        D: Deserializer<'de>;\n}\n\nmacro_rules! big_array {\n    ($($len:expr,)+) => {\n        $(\n            impl<'de, T> BigArray<'de> for [T; $len]\n                where T: Default + Copy + Serialize + Deserialize<'de>\n            {\n                fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>\n                    where S: Serializer\n                {\n                    let mut seq = serializer.serialize_tuple(self.len())?;\n                    for elem in &self[..] {\n                        seq.serialize_element(elem)?;\n                    }\n                    seq.end()\n                }\n\n                fn deserialize<D>(deserializer: D) -> Result<[T; $len], D::Error>\n                    where D: Deserializer<'de>\n                {\n                    struct ArrayVisitor<T> {\n                        element: PhantomData<T>,\n                    }\n\n                    impl<'de, T> Visitor<'de> for ArrayVisitor<T>\n                        where T: Default + Copy + Deserialize<'de>\n                    {\n                        type Value = [T; $len];\n\n                        fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {\n                            formatter.write_str(concat!(\"an array of length \", $len))\n                        }\n\n                        fn visit_seq<A>(self, mut seq: A) -> Result<[T; $len], A::Error>\n                            where A: SeqAccess<'de>\n                        {\n                            let mut arr = [T::default(); $len];\n                            for (i, element) in arr.iter_mut().enumerate() {\n                                *element = seq.next_element()?.ok_or_else(|| Error::invalid_length(i, &self))?\n                            }\n                            Ok(arr)\n                        }\n                    }\n\n                    let visitor = ArrayVisitor { element: PhantomData };\n                    deserializer.deserialize_tuple($len, visitor)\n                }\n            }\n        )+\n    }\n}\n\nbig_array! {\n    40, 48, 50, 56, 64, 72, 96, 100, 128, 160, 192, 200, 224, 256, 384, 512,\n    768, 1024, 2048, 4096, 8192, 16384, 32768, 65536,\n}\n"
  },
  {
    "path": "filecoin-proofs/src/singletons.rs",
    "content": "use ff::PrimeField;\nuse lazy_static::lazy_static;\nuse paired::bls12_381::Fr;\n\nuse storage_proofs::hasher::pedersen::PedersenDomain;\n\nlazy_static! {\n    pub static ref POST_VDF_KEY: PedersenDomain =\n        PedersenDomain(Fr::from_str(\"12345\").unwrap().into_repr());\n}\n"
  },
  {
    "path": "filecoin-proofs/src/types/bytes_amount.rs",
    "content": "use std::ops::{Add, Sub};\n\nuse serde::{Deserialize, Serialize};\n\nuse crate::fr32::{to_padded_bytes, to_unpadded_bytes};\n\npub struct PoStProofBytesAmount(pub usize);\n\npub struct PoRepProofBytesAmount(pub usize);\n\n#[derive(Debug, Default, Clone, Copy, PartialEq, PartialOrd, Serialize, Deserialize, Eq, Ord)]\npub struct UnpaddedByteIndex(pub u64);\n\n#[derive(Debug, Default, Clone, Copy, PartialEq, PartialOrd, Serialize, Deserialize, Eq, Ord)]\npub struct UnpaddedBytesAmount(pub u64);\n\n#[derive(Debug, Default, Clone, Copy, PartialEq, PartialOrd, Serialize, Deserialize, Eq, Ord)]\npub struct PaddedBytesAmount(pub u64);\n\nimpl From<UnpaddedBytesAmount> for u64 {\n    fn from(n: UnpaddedBytesAmount) -> Self {\n        n.0\n    }\n}\n\nimpl From<UnpaddedBytesAmount> for usize {\n    fn from(n: UnpaddedBytesAmount) -> Self {\n        n.0 as usize\n    }\n}\n\nimpl From<UnpaddedBytesAmount> for PaddedBytesAmount {\n    fn from(n: UnpaddedBytesAmount) -> Self {\n        PaddedBytesAmount(to_padded_bytes(n.0 as usize) as u64)\n    }\n}\n\nimpl From<PaddedBytesAmount> for u64 {\n    fn from(n: PaddedBytesAmount) -> Self {\n        n.0\n    }\n}\n\nimpl From<PaddedBytesAmount> for usize {\n    fn from(n: PaddedBytesAmount) -> Self {\n        n.0 as usize\n    }\n}\n\nimpl From<PaddedBytesAmount> for UnpaddedBytesAmount {\n    fn from(n: PaddedBytesAmount) -> Self {\n        UnpaddedBytesAmount(to_unpadded_bytes(n.0))\n    }\n}\n\nimpl From<UnpaddedBytesAmount> for UnpaddedByteIndex {\n    fn from(n: UnpaddedBytesAmount) -> Self {\n        UnpaddedByteIndex(n.0)\n    }\n}\n\nimpl From<UnpaddedByteIndex> for UnpaddedBytesAmount {\n    fn from(n: UnpaddedByteIndex) -> Self {\n        UnpaddedBytesAmount(n.0)\n    }\n}\n\nimpl From<UnpaddedByteIndex> for u64 {\n    fn from(n: UnpaddedByteIndex) -> Self {\n        n.0\n    }\n}\n\nimpl From<UnpaddedByteIndex> for usize {\n    fn from(n: UnpaddedByteIndex) -> Self {\n        n.0 as usize\n    }\n}\n\nimpl Add for UnpaddedBytesAmount {\n    type Output = UnpaddedBytesAmount;\n\n    fn add(self, other: UnpaddedBytesAmount) -> UnpaddedBytesAmount {\n        UnpaddedBytesAmount(self.0 + other.0)\n    }\n}\n\nimpl Add for PaddedBytesAmount {\n    type Output = PaddedBytesAmount;\n\n    fn add(self, other: PaddedBytesAmount) -> PaddedBytesAmount {\n        PaddedBytesAmount(self.0 + other.0)\n    }\n}\n\nimpl Sub for UnpaddedBytesAmount {\n    type Output = UnpaddedBytesAmount;\n\n    fn sub(self, other: UnpaddedBytesAmount) -> UnpaddedBytesAmount {\n        UnpaddedBytesAmount(self.0 - other.0)\n    }\n}\n\nimpl Sub for PaddedBytesAmount {\n    type Output = PaddedBytesAmount;\n\n    fn sub(self, other: PaddedBytesAmount) -> PaddedBytesAmount {\n        PaddedBytesAmount(self.0 - other.0)\n    }\n}\n\nimpl From<PoStProofBytesAmount> for usize {\n    fn from(x: PoStProofBytesAmount) -> Self {\n        x.0\n    }\n}\n\nimpl From<PoRepProofBytesAmount> for usize {\n    fn from(x: PoRepProofBytesAmount) -> Self {\n        x.0\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn allowed_operations() {\n        let a = UnpaddedBytesAmount(1);\n        let b = UnpaddedBytesAmount(2);\n        let c = UnpaddedBytesAmount(3);\n\n        let d = PaddedBytesAmount(1);\n        let e = PaddedBytesAmount(2);\n        let f = PaddedBytesAmount(3);\n\n        // Operations between UnpaddedBytesAmounts are allowed\n        assert_eq!(a + b, c);\n        assert_eq!(c - b, a);\n\n        // Operations between PaddedBytesAmounts are allowed\n        assert_eq!(d + e, f);\n        assert_eq!(f - e, d);\n\n        // Mixed operations fail at compile time.\n        // assert_eq!(a + b, f);\n\n        // Coercion to primitives work\n        assert_eq!(1u64 + u64::from(b), 3u64);\n        assert_eq!(1usize + usize::from(b), 3usize);\n        assert_eq!(1u64 + u64::from(e), 3u64);\n        assert_eq!(1usize + usize::from(e), 3usize);\n\n        // But not between BytesAmount types\n        // assert_eq!(a + UnpaddedBytesAmount::from(e), c);\n        // assert_eq!(d + UnpaddedBytesAmount::from(b), f);\n\n        // But must be explicit or won't compile.\n        // assert_eq!(1u64 + b, 3u64);\n        // assert_eq!(1usize + b, 3usize);\n        // assert_eq!(1u64 + u64::from(e), 3u64);\n        // assert_eq!(1usize + usize::from(e), 3usize);\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/types/mod.rs",
    "content": "use serde::{Deserialize, Serialize};\nuse storage_proofs::hasher::Hasher;\nuse storage_proofs::porep::stacked;\n\nuse crate::constants::*;\n\nmod bytes_amount;\nmod piece_info;\nmod porep_config;\nmod porep_proof_partitions;\nmod post_config;\nmod post_proof_partitions;\nmod sector_class;\nmod sector_size;\n\npub use self::bytes_amount::*;\npub use self::piece_info::*;\npub use self::porep_config::*;\npub use self::porep_proof_partitions::*;\npub use self::post_config::*;\npub use self::post_proof_partitions::*;\npub use self::sector_class::*;\npub use self::sector_size::*;\n\npub type Commitment = [u8; 32];\npub type ChallengeSeed = [u8; 32];\npub use stacked::PersistentAux;\npub use stacked::TemporaryAux;\npub type ProverId = [u8; 32];\npub type Ticket = [u8; 32];\n\npub type Tree = storage_proofs::merkle::OctMerkleTree<DefaultTreeHasher>;\npub type LCTree = storage_proofs::merkle::OctLCMerkleTree<DefaultTreeHasher>;\n\npub use storage_proofs::porep::stacked::Labels;\npub type DataTree = storage_proofs::merkle::BinaryMerkleTree<DefaultPieceHasher>;\n\npub use storage_proofs::merkle::MerkleTreeTrait;\n\n/// Arity for oct trees, used for comm_r_last.\npub const OCT_ARITY: usize = 8;\n\n/// Arity for binary trees, used for comm_d.\npub const BINARY_ARITY: usize = 2;\n\n#[derive(Debug, Clone)]\npub struct SealPreCommitOutput {\n    pub comm_r: Commitment,\n    pub comm_d: Commitment,\n}\n\npub type VanillaSealProof<Tree> = storage_proofs::porep::stacked::Proof<Tree, DefaultPieceHasher>;\n\n#[derive(Clone, Debug, Serialize, Deserialize)]\npub struct SealCommitPhase1Output<Tree: MerkleTreeTrait> {\n    pub vanilla_proofs: Vec<Vec<VanillaSealProof<Tree>>>,\n    pub comm_r: Commitment,\n    pub comm_d: Commitment,\n    pub replica_id: <Tree::Hasher as Hasher>::Domain,\n    pub seed: Ticket,\n    pub ticket: Ticket,\n}\n\n#[derive(Clone, Debug)]\npub struct SealCommitOutput {\n    pub proof: Vec<u8>,\n}\n\npub use merkletree::store::StoreConfig;\n\n#[derive(Debug, Serialize, Deserialize)]\npub struct SealPreCommitPhase1Output<Tree: MerkleTreeTrait> {\n    pub labels: Labels<Tree>,\n    pub config: StoreConfig,\n    pub comm_d: Commitment,\n}\n"
  },
  {
    "path": "filecoin-proofs/src/types/piece_info.rs",
    "content": "use std::fmt::{self, Debug, Formatter};\n\nuse anyhow::{ensure, Result};\nuse serde::{Deserialize, Serialize};\n\nuse crate::types::{Commitment, UnpaddedBytesAmount};\n\n#[derive(Clone, Default, PartialEq, Eq, Serialize, Deserialize)]\npub struct PieceInfo {\n    pub commitment: Commitment,\n    pub size: UnpaddedBytesAmount,\n}\n\nimpl Debug for PieceInfo {\n    fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {\n        fmt.debug_struct(\"PieceInfo\")\n            .field(\"commitment\", &hex::encode(&self.commitment))\n            .field(\"size\", &self.size)\n            .finish()\n    }\n}\n\nimpl PieceInfo {\n    pub fn new(commitment: Commitment, size: UnpaddedBytesAmount) -> Result<Self> {\n        ensure!(commitment != [0; 32], \"Invalid all zero commitment\");\n        Ok(PieceInfo { commitment, size })\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/types/porep_config.rs",
    "content": "use std::path::PathBuf;\n\nuse anyhow::Result;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    merkle::MerkleTreeTrait,\n    parameter_cache::{\n        parameter_cache_metadata_path, parameter_cache_params_path,\n        parameter_cache_verifying_key_path, CacheableParameters,\n    },\n};\nuse storage_proofs_porep::stacked::{StackedCircuit, StackedCompound};\n\nuse crate::{\n    constants::DefaultPieceHasher,\n    parameters::public_params,\n    types::{PaddedBytesAmount, PoRepProofPartitions, SectorSize, UnpaddedBytesAmount},\n};\n\n#[derive(Clone, Copy, Debug)]\npub struct PoRepConfig {\n    pub sector_size: SectorSize,\n    pub partitions: PoRepProofPartitions,\n    pub porep_id: [u8; 32],\n    pub api_version: ApiVersion,\n}\n\nimpl From<PoRepConfig> for PaddedBytesAmount {\n    fn from(x: PoRepConfig) -> Self {\n        let PoRepConfig { sector_size, .. } = x;\n        PaddedBytesAmount::from(sector_size)\n    }\n}\n\nimpl From<PoRepConfig> for UnpaddedBytesAmount {\n    fn from(x: PoRepConfig) -> Self {\n        let PoRepConfig { sector_size, .. } = x;\n        PaddedBytesAmount::from(sector_size).into()\n    }\n}\n\nimpl From<PoRepConfig> for PoRepProofPartitions {\n    fn from(x: PoRepConfig) -> Self {\n        let PoRepConfig { partitions, .. } = x;\n        partitions\n    }\n}\n\nimpl From<PoRepConfig> for SectorSize {\n    fn from(cfg: PoRepConfig) -> Self {\n        let PoRepConfig { sector_size, .. } = cfg;\n        sector_size\n    }\n}\n\nimpl PoRepConfig {\n    /// Returns the cache identifier as used by `storage-proofs::paramater_cache`.\n    pub fn get_cache_identifier<Tree: 'static + MerkleTreeTrait>(&self) -> Result<String> {\n        let params = public_params::<Tree>(\n            self.sector_size.into(),\n            self.partitions.into(),\n            self.porep_id,\n            self.api_version,\n        )?;\n\n        Ok(\n            <StackedCompound<Tree, DefaultPieceHasher> as CacheableParameters<\n                StackedCircuit<'_, Tree, DefaultPieceHasher>,\n                _,\n            >>::cache_identifier(&params),\n        )\n    }\n\n    pub fn get_cache_metadata_path<Tree: 'static + MerkleTreeTrait>(&self) -> Result<PathBuf> {\n        let id = self.get_cache_identifier::<Tree>()?;\n        Ok(parameter_cache_metadata_path(&id))\n    }\n\n    pub fn get_cache_verifying_key_path<Tree: 'static + MerkleTreeTrait>(&self) -> Result<PathBuf> {\n        let id = self.get_cache_identifier::<Tree>()?;\n        Ok(parameter_cache_verifying_key_path(&id))\n    }\n\n    pub fn get_cache_params_path<Tree: 'static + MerkleTreeTrait>(&self) -> Result<PathBuf> {\n        let id = self.get_cache_identifier::<Tree>()?;\n        Ok(parameter_cache_params_path(&id))\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/types/porep_proof_partitions.rs",
    "content": "#[derive(Clone, Copy, Debug)]\npub struct PoRepProofPartitions(pub u8);\n\nimpl From<PoRepProofPartitions> for usize {\n    fn from(x: PoRepProofPartitions) -> Self {\n        x.0 as usize\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/types/post_config.rs",
    "content": "use std::path::PathBuf;\n\nuse anyhow::Result;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    merkle::MerkleTreeTrait,\n    parameter_cache::{\n        parameter_cache_metadata_path, parameter_cache_params_path,\n        parameter_cache_verifying_key_path, CacheableParameters,\n    },\n};\nuse storage_proofs_post::fallback::{FallbackPoStCircuit, FallbackPoStCompound};\n\nuse crate::{\n    parameters::{window_post_public_params, winning_post_public_params},\n    types::{PaddedBytesAmount, SectorSize, UnpaddedBytesAmount},\n};\n\n#[derive(Clone, Debug)]\npub struct PoStConfig {\n    pub sector_size: SectorSize,\n    pub challenge_count: usize,\n    pub sector_count: usize,\n    pub typ: PoStType,\n    /// High priority (always runs on GPU) == true\n    pub priority: bool,\n    pub api_version: ApiVersion,\n}\n\n#[derive(Debug, Clone, PartialEq, Eq)]\npub enum PoStType {\n    Winning,\n    Window,\n}\n\nimpl From<PoStConfig> for PaddedBytesAmount {\n    fn from(x: PoStConfig) -> Self {\n        let PoStConfig { sector_size, .. } = x;\n        PaddedBytesAmount::from(sector_size)\n    }\n}\n\nimpl From<PoStConfig> for UnpaddedBytesAmount {\n    fn from(x: PoStConfig) -> Self {\n        let PoStConfig { sector_size, .. } = x;\n        PaddedBytesAmount::from(sector_size).into()\n    }\n}\n\nimpl PoStConfig {\n    pub fn padded_sector_size(&self) -> PaddedBytesAmount {\n        PaddedBytesAmount::from(self.sector_size)\n    }\n\n    pub fn unpadded_sector_size(&self) -> UnpaddedBytesAmount {\n        PaddedBytesAmount::from(self.sector_size).into()\n    }\n\n    /// Returns the cache identifier as used by `storage-proofs::paramater_cache`.\n    pub fn get_cache_identifier<Tree: 'static + MerkleTreeTrait>(&self) -> Result<String> {\n        match self.typ {\n            PoStType::Winning => {\n                let params = winning_post_public_params::<Tree>(self)?;\n\n                Ok(<FallbackPoStCompound<Tree> as CacheableParameters<\n                    FallbackPoStCircuit<Tree>,\n                    _,\n                >>::cache_identifier(&params))\n            }\n            PoStType::Window => {\n                let params = window_post_public_params::<Tree>(self)?;\n\n                Ok(<FallbackPoStCompound<Tree> as CacheableParameters<\n                    FallbackPoStCircuit<Tree>,\n                    _,\n                >>::cache_identifier(&params))\n            }\n        }\n    }\n\n    pub fn get_cache_metadata_path<Tree: 'static + MerkleTreeTrait>(&self) -> Result<PathBuf> {\n        let id = self.get_cache_identifier::<Tree>()?;\n        Ok(parameter_cache_metadata_path(&id))\n    }\n\n    pub fn get_cache_verifying_key_path<Tree: 'static + MerkleTreeTrait>(&self) -> Result<PathBuf> {\n        let id = self.get_cache_identifier::<Tree>()?;\n        Ok(parameter_cache_verifying_key_path(&id))\n    }\n\n    pub fn get_cache_params_path<Tree: 'static + MerkleTreeTrait>(&self) -> Result<PathBuf> {\n        let id = self.get_cache_identifier::<Tree>()?;\n        Ok(parameter_cache_params_path(&id))\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/types/post_proof_partitions.rs",
    "content": "use crate::{constants::SINGLE_PARTITION_PROOF_LEN, types::PoStProofBytesAmount};\n\n#[derive(Clone, Copy, Debug)]\npub struct PoStProofPartitions(pub u8);\n\nimpl From<PoStProofPartitions> for PoStProofBytesAmount {\n    fn from(x: PoStProofPartitions) -> Self {\n        PoStProofBytesAmount(SINGLE_PARTITION_PROOF_LEN * usize::from(x))\n    }\n}\n\nimpl From<PoStProofPartitions> for usize {\n    fn from(x: PoStProofPartitions) -> Self {\n        x.0 as usize\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/types/private_replica_info.rs",
    "content": "use std::cmp::Ordering;\nuse std::fs;\nuse std::hash::{Hash, Hasher as StdHasher};\nuse std::marker::PhantomData;\nuse std::path::{Path, PathBuf};\n\nuse anyhow::{ensure, Context, Result};\nuse bincode::deserialize;\nuse filecoin_hashers::Hasher;\nuse generic_array::typenum::Unsigned;\nuse log::trace;\nuse merkletree::store::StoreConfig;\nuse storage_proofs_core::{\n    cache_key::CacheKey,\n    merkle::{\n        create_tree, get_base_tree_count, split_config_and_replica, MerkleTreeTrait,\n        MerkleTreeWrapper,\n    },\n    util::default_rows_to_discard,\n};\n\nuse crate::{\n    api::{as_safe_commitment, get_base_tree_leafs, get_base_tree_size},\n    types::{Commitment, PersistentAux, SectorSize},\n};\n\n/// The minimal information required about a replica, in order to be able to generate\n/// a PoSt over it.\n#[derive(Debug)]\npub struct PrivateReplicaInfo<Tree: MerkleTreeTrait> {\n    /// Path to the replica.\n    replica: PathBuf,\n    /// The replica commitment.\n    comm_r: Commitment,\n    /// Persistent Aux.\n    aux: PersistentAux<<Tree::Hasher as Hasher>::Domain>,\n    /// Contains sector-specific (e.g. merkle trees) assets\n    pub cache_dir: PathBuf,\n\n    _t: PhantomData<Tree>,\n}\n\nimpl<Tree: MerkleTreeTrait> Clone for PrivateReplicaInfo<Tree> {\n    fn clone(&self) -> Self {\n        Self {\n            replica: self.replica.clone(),\n            comm_r: self.comm_r,\n            aux: self.aux.clone(),\n            cache_dir: self.cache_dir.clone(),\n            _t: Default::default(),\n        }\n    }\n}\n\nimpl<Tree: MerkleTreeTrait> PartialEq for PrivateReplicaInfo<Tree> {\n    fn eq(&self, other: &Self) -> bool {\n        self.replica == other.replica\n            && self.comm_r == other.comm_r\n            && self.aux == other.aux\n            && self.cache_dir == other.cache_dir\n    }\n}\n\nimpl<Tree: MerkleTreeTrait> Hash for PrivateReplicaInfo<Tree> {\n    fn hash<H: StdHasher>(&self, state: &mut H) {\n        self.replica.hash(state);\n        self.comm_r.hash(state);\n        self.aux.hash(state);\n        self.cache_dir.hash(state);\n    }\n}\n\nimpl<Tree: MerkleTreeTrait> Eq for PrivateReplicaInfo<Tree> {}\n\nimpl<Tree: MerkleTreeTrait> Ord for PrivateReplicaInfo<Tree> {\n    fn cmp(&self, other: &Self) -> Ordering {\n        self.comm_r.as_ref().cmp(other.comm_r.as_ref())\n    }\n}\n\nimpl<Tree: MerkleTreeTrait> PartialOrd for PrivateReplicaInfo<Tree> {\n    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {\n        self.comm_r.as_ref().partial_cmp(other.comm_r.as_ref())\n    }\n}\n\nimpl<Tree: 'static + MerkleTreeTrait> PrivateReplicaInfo<Tree> {\n    pub fn new(replica: PathBuf, comm_r: Commitment, cache_dir: PathBuf) -> Result<Self> {\n        ensure!(comm_r != [0; 32], \"Invalid all zero commitment (comm_r)\");\n\n        let aux = {\n            let f_aux_path = cache_dir.join(CacheKey::PAux.to_string());\n            let aux_bytes = fs::read(&f_aux_path)\n                .with_context(|| format!(\"could not read from path={:?}\", f_aux_path))?;\n\n            deserialize(&aux_bytes)\n        }?;\n\n        ensure!(replica.exists(), \"Sealed replica does not exist\");\n\n        Ok(PrivateReplicaInfo {\n            replica,\n            comm_r,\n            aux,\n            cache_dir,\n            _t: Default::default(),\n        })\n    }\n\n    pub fn cache_dir_path(&self) -> &Path {\n        self.cache_dir.as_path()\n    }\n\n    pub fn replica_path(&self) -> &Path {\n        self.replica.as_path()\n    }\n\n    pub fn safe_comm_r(&self) -> Result<<Tree::Hasher as Hasher>::Domain> {\n        as_safe_commitment(&self.comm_r, \"comm_r\")\n    }\n\n    pub fn safe_comm_c(&self) -> <Tree::Hasher as Hasher>::Domain {\n        self.aux.comm_c\n    }\n\n    pub fn safe_comm_r_last(&self) -> <Tree::Hasher as Hasher>::Domain {\n        self.aux.comm_r_last\n    }\n\n    /// Generate the merkle tree of this particular replica.\n    pub fn merkle_tree(\n        &self,\n        sector_size: SectorSize,\n    ) -> Result<\n        MerkleTreeWrapper<\n            Tree::Hasher,\n            Tree::Store,\n            Tree::Arity,\n            Tree::SubTreeArity,\n            Tree::TopTreeArity,\n        >,\n    > {\n        let base_tree_size = get_base_tree_size::<Tree>(sector_size)?;\n        let base_tree_leafs = get_base_tree_leafs::<Tree>(base_tree_size)?;\n        trace!(\n            \"post: base tree size {}, base tree leafs {}, rows_to_discard {}, arities [{}, {}, {}]\",\n            base_tree_size,\n            base_tree_leafs,\n            default_rows_to_discard(base_tree_leafs, Tree::Arity::to_usize()),\n            Tree::Arity::to_usize(),\n            Tree::SubTreeArity::to_usize(),\n            Tree::TopTreeArity::to_usize(),\n        );\n\n        let mut config = StoreConfig::new(\n            self.cache_dir_path(),\n            CacheKey::CommRLastTree.to_string(),\n            default_rows_to_discard(base_tree_leafs, Tree::Arity::to_usize()),\n        );\n        config.size = Some(base_tree_size);\n\n        let tree_count = get_base_tree_count::<Tree>();\n        let (configs, replica_config) = split_config_and_replica(\n            config,\n            self.replica_path().to_path_buf(),\n            base_tree_leafs,\n            tree_count,\n        )?;\n\n        create_tree::<Tree>(base_tree_size, &configs, Some(&replica_config))\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/types/public_replica_info.rs",
    "content": "use std::cmp::Ordering;\nuse std::hash::Hash;\n\nuse anyhow::{ensure, Result};\nuse filecoin_hashers::Domain;\n\nuse crate::{api::as_safe_commitment, types::Commitment};\n\n/// The minimal information required about a replica, in order to be able to verify\n/// a PoSt over it.\n#[derive(Clone, Debug, PartialEq, Eq, Hash)]\npub struct PublicReplicaInfo {\n    /// The replica commitment.\n    comm_r: Commitment,\n}\n\nimpl Ord for PublicReplicaInfo {\n    fn cmp(&self, other: &Self) -> Ordering {\n        self.comm_r.as_ref().cmp(other.comm_r.as_ref())\n    }\n}\n\nimpl PartialOrd for PublicReplicaInfo {\n    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {\n        Some(self.cmp(other))\n    }\n}\n\nimpl PublicReplicaInfo {\n    pub fn new(comm_r: Commitment) -> Result<Self> {\n        ensure!(comm_r != [0; 32], \"Invalid all zero commitment (comm_r)\");\n        Ok(PublicReplicaInfo { comm_r })\n    }\n\n    pub fn safe_comm_r<T: Domain>(&self) -> Result<T> {\n        as_safe_commitment(&self.comm_r, \"comm_r\")\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/types/sector_class.rs",
    "content": "use storage_proofs_core::api_version::ApiVersion;\n\nuse crate::types::{PoRepConfig, PoRepProofPartitions, SectorSize};\n\n#[derive(Clone, Copy, Debug)]\npub struct SectorClass {\n    pub sector_size: SectorSize,\n    pub partitions: PoRepProofPartitions,\n    pub porep_id: [u8; 32],\n    pub api_version: ApiVersion,\n}\n\nimpl From<SectorClass> for PoRepConfig {\n    fn from(x: SectorClass) -> Self {\n        let SectorClass {\n            sector_size,\n            partitions,\n            porep_id,\n            api_version,\n        } = x;\n        PoRepConfig {\n            sector_size,\n            partitions,\n            porep_id,\n            api_version,\n        }\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/src/types/sector_size.rs",
    "content": "use fr32::to_unpadded_bytes;\n\nuse crate::types::{PaddedBytesAmount, UnpaddedBytesAmount};\n\n#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]\npub struct SectorSize(pub u64);\n\nimpl From<u64> for SectorSize {\n    fn from(size: u64) -> Self {\n        SectorSize(size)\n    }\n}\n\nimpl From<SectorSize> for UnpaddedBytesAmount {\n    fn from(x: SectorSize) -> Self {\n        UnpaddedBytesAmount(to_unpadded_bytes(x.0))\n    }\n}\n\nimpl From<SectorSize> for PaddedBytesAmount {\n    fn from(x: SectorSize) -> Self {\n        PaddedBytesAmount(x.0)\n    }\n}\n\nimpl From<SectorSize> for u64 {\n    fn from(x: SectorSize) -> Self {\n        x.0\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/tests/api.rs",
    "content": "use std::collections::BTreeMap;\nuse std::fs::{read_dir, remove_file};\nuse std::io::{Read, Seek, SeekFrom, Write};\nuse std::path::{Path, PathBuf};\nuse std::sync::Once;\n\nuse anyhow::Result;\nuse bellperson::bls::Fr;\nuse ff::Field;\nuse filecoin_hashers::Hasher;\nuse filecoin_proofs::{\n    add_piece, clear_cache, compute_comm_d, fauxrep_aux, generate_fallback_sector_challenges,\n    generate_piece_commitment, generate_single_vanilla_proof, generate_window_post,\n    generate_window_post_with_vanilla, generate_winning_post,\n    generate_winning_post_sector_challenge, generate_winning_post_with_vanilla, get_unsealed_range,\n    seal_commit_phase1, seal_commit_phase2, seal_pre_commit_phase1, seal_pre_commit_phase2,\n    validate_cache_for_commit, validate_cache_for_precommit_phase2, verify_seal,\n    verify_window_post, verify_winning_post, Commitment, DefaultTreeDomain, MerkleTreeTrait,\n    PaddedBytesAmount, PieceInfo, PoRepConfig, PoRepProofPartitions, PoStConfig, PoStType,\n    PrivateReplicaInfo, ProverId, PublicReplicaInfo, SealPreCommitOutput,\n    SealPreCommitPhase1Output, SectorShape16KiB, SectorShape2KiB, SectorShape32KiB,\n    SectorShape4KiB, SectorSize, UnpaddedByteIndex, UnpaddedBytesAmount, POREP_PARTITIONS,\n    SECTOR_SIZE_16_KIB, SECTOR_SIZE_2_KIB, SECTOR_SIZE_32_KIB, SECTOR_SIZE_4_KIB,\n    WINDOW_POST_CHALLENGE_COUNT, WINDOW_POST_SECTOR_COUNT, WINNING_POST_CHALLENGE_COUNT,\n    WINNING_POST_SECTOR_COUNT,\n};\nuse rand::{random, Rng, SeedableRng};\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{api_version::ApiVersion, is_legacy_porep_id, sector::SectorId};\nuse tempfile::{tempdir, NamedTempFile, TempDir};\n\n// Use a fixed PoRep ID, so that the parents cache can be re-used between some tests.\n// Note however, that parents caches cannot be shared when testing the differences\n// between API v1 and v2 behaviour (since the parent caches will be different for the\n// same porep_ids).\nconst ARBITRARY_POREP_ID_V1_0_0: [u8; 32] = [127; 32];\nconst ARBITRARY_POREP_ID_V1_1_0: [u8; 32] = [128; 32];\n\nstatic INIT_LOGGER: Once = Once::new();\nfn init_logger() {\n    INIT_LOGGER.call_once(|| {\n        fil_logger::init();\n    });\n}\n\nconst TEST_SEED: [u8; 16] = [\n    0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, 0xe5,\n];\n\n#[test]\n#[ignore]\nfn test_seal_lifecycle_2kib_porep_id_v1_base_8() -> Result<()> {\n    let porep_id_v1: u64 = 0; // This is a RegisteredSealProof value\n\n    let mut porep_id = [0u8; 32];\n    porep_id[..8].copy_from_slice(&porep_id_v1.to_le_bytes());\n    assert!(is_legacy_porep_id(porep_id));\n    seal_lifecycle::<SectorShape2KiB>(SECTOR_SIZE_2_KIB, &porep_id, ApiVersion::V1_0_0)\n}\n\n#[test]\n#[ignore]\nfn test_seal_lifecycle_2kib_porep_id_v1_1_base_8() -> Result<()> {\n    let porep_id_v1_1: u64 = 5; // This is a RegisteredSealProof value\n\n    let mut porep_id = [0u8; 32];\n    porep_id[..8].copy_from_slice(&porep_id_v1_1.to_le_bytes());\n    assert!(!is_legacy_porep_id(porep_id));\n    seal_lifecycle::<SectorShape2KiB>(SECTOR_SIZE_2_KIB, &porep_id, ApiVersion::V1_1_0)\n}\n\n#[test]\n#[ignore]\nfn test_seal_lifecycle_4kib_sub_8_2() -> Result<()> {\n    seal_lifecycle::<SectorShape4KiB>(\n        SECTOR_SIZE_4_KIB,\n        &ARBITRARY_POREP_ID_V1_0_0,\n        ApiVersion::V1_0_0,\n    )?;\n    seal_lifecycle::<SectorShape4KiB>(\n        SECTOR_SIZE_4_KIB,\n        &ARBITRARY_POREP_ID_V1_1_0,\n        ApiVersion::V1_1_0,\n    )\n}\n\n#[test]\n#[ignore]\nfn test_seal_lifecycle_16kib_sub_8_2() -> Result<()> {\n    seal_lifecycle::<SectorShape16KiB>(\n        SECTOR_SIZE_16_KIB,\n        &ARBITRARY_POREP_ID_V1_0_0,\n        ApiVersion::V1_0_0,\n    )?;\n    seal_lifecycle::<SectorShape16KiB>(\n        SECTOR_SIZE_16_KIB,\n        &ARBITRARY_POREP_ID_V1_1_0,\n        ApiVersion::V1_1_0,\n    )\n}\n\n#[test]\n#[ignore]\nfn test_seal_lifecycle_32kib_top_8_8_2() -> Result<()> {\n    seal_lifecycle::<SectorShape32KiB>(\n        SECTOR_SIZE_32_KIB,\n        &ARBITRARY_POREP_ID_V1_0_0,\n        ApiVersion::V1_0_0,\n    )?;\n    seal_lifecycle::<SectorShape32KiB>(\n        SECTOR_SIZE_32_KIB,\n        &ARBITRARY_POREP_ID_V1_1_0,\n        ApiVersion::V1_1_0,\n    )\n}\n\n// These tests are good to run, but take a long time.\n\n//#[test]\n//#[ignore]\n//fn test_seal_lifecycle_512mib_porep_id_v1_top_8_0_0_api_v1() -> Result<()> {\n//    let porep_id_v1: u64 = 2; // This is a RegisteredSealProof value\n//\n//    let mut porep_id = [0u8; 32];\n//    porep_id[..8].copy_from_slice(&porep_id_v1.to_le_bytes());\n//    assert!(is_legacy_porep_id(porep_id));\n//    seal_lifecycle::<SectorShape512MiB>(SECTOR_SIZE_512_MIB, &porep_id, ApiVersion::V1_0_0)\n//}\n\n//#[test]\n//#[ignore]\n//fn test_seal_lifecycle_512mib_porep_id_v1_top_8_0_0_api_v1_1() -> Result<()> {\n//    let porep_id_v1_1: u64 = 7; // This is a RegisteredSealProof value\n//\n//    let mut porep_id = [0u8; 32];\n//    porep_id[..8].copy_from_slice(&porep_id_v1_1.to_le_bytes());\n//    assert!(!is_legacy_porep_id(porep_id));\n//    seal_lifecycle::<SectorShape512MiB>(SECTOR_SIZE_512_MIB, &porep_id, ApiVersion::V1_1_0)\n//}\n\n//#[test]\n//#[ignore]\n//fn test_seal_lifecycle_32gib_porep_id_v1_top_8_8_0_api_v1() -> Result<()> {\n//    let porep_id_v1: u64 = 3; // This is a RegisteredSealProof value\n//\n//    let mut porep_id = [0u8; 32];\n//    porep_id[..8].copy_from_slice(&porep_id_v1.to_le_bytes());\n//    assert!(is_legacy_porep_id(porep_id));\n//    seal_lifecycle::<SectorShape32GiB>(SECTOR_SIZE_32_GIB, &porep_id, ApiVersion::V1_0_0)\n//}\n\n//#[test]\n//#[ignore]\n//fn test_seal_lifecycle_32gib_porep_id_v1_1_top_8_8_0_api_v1_1() -> Result<()> {\n//    let porep_id_v1_1: u64 = 8; // This is a RegisteredSealProof value\n//\n//    let mut porep_id = [0u8; 32];\n//    porep_id[..8].copy_from_slice(&porep_id_v1_1.to_le_bytes());\n//    assert!(!is_legacy_porep_id(porep_id));\n//    seal_lifecycle::<SectorShape32GiB>(SECTOR_SIZE_32_GIB, &porep_id, ApiVersion::V1_1_0)\n//}\n\n//#[test]\n//#[ignore]\n//fn test_seal_lifecycle_64gib_porep_id_v1_top_8_8_2_api_v1() -> Result<()> {\n//    let porep_id_v1: u64 = 4; // This is a RegisteredSealProof value\n//\n//    let mut porep_id = [0u8; 32];\n//    porep_id[..8].copy_from_slice(&porep_id_v1.to_le_bytes());\n//    assert!(is_legacy_porep_id(porep_id));\n//    seal_lifecycle::<SectorShape64GiB>(SECTOR_SIZE_64_GIB, &porep_id, ApiVersion::V1_0_0)\n//}\n\n//#[test]\n//#[ignore]\n//fn test_seal_lifecycle_64gib_porep_id_v1_1_top_8_8_2_api_v1_1() -> Result<()> {\n//    let porep_id_v1_1: u64 = 9; // This is a RegisteredSealProof value\n//\n//    let mut porep_id = [0u8; 32];\n//    porep_id[..8].copy_from_slice(&porep_id_v1_1.to_le_bytes());\n//    assert!(!is_legacy_porep_id(porep_id));\n//    seal_lifecycle::<SectorShape64GiB>(SECTOR_SIZE_64_GIB, &porep_id, ApiVersion::V1_1_0)\n//}\n\nfn seal_lifecycle<Tree: 'static + MerkleTreeTrait>(\n    sector_size: u64,\n    porep_id: &[u8; 32],\n    api_version: ApiVersion,\n) -> Result<()> {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n    let prover_fr: DefaultTreeDomain = Fr::random(rng).into();\n    let mut prover_id = [0u8; 32];\n    prover_id.copy_from_slice(AsRef::<[u8]>::as_ref(&prover_fr));\n\n    create_seal::<_, Tree>(rng, sector_size, prover_id, false, porep_id, api_version)?;\n    Ok(())\n}\n\nfn get_layer_file_paths(cache_dir: &TempDir) -> Vec<PathBuf> {\n    let mut list: Vec<_> = read_dir(&cache_dir)\n        .expect(\"failed to read read directory \")\n        .filter_map(|entry| {\n            let cur = entry.expect(\"reading directory failed\");\n            let entry_path = cur.path();\n            let entry_str = entry_path.to_str().expect(\"failed to get string from path\");\n            if entry_str.contains(\"data-layer\") {\n                Some(entry_path.clone())\n            } else {\n                None\n            }\n        })\n        .collect();\n    list.sort();\n    list\n}\n\nfn clear_cache_dir_keep_data_layer(cache_dir: &TempDir) {\n    for entry in read_dir(&cache_dir).expect(\"faailed to read directory\") {\n        let entry_path = entry.expect(\"failed get directory entry\").path();\n        if entry_path.is_file() {\n            // delete everything except the data-layers\n            if !entry_path\n                .to_str()\n                .expect(\"failed to get string from path\")\n                .contains(\"data-layer\")\n            {\n                remove_file(entry_path).expect(\"failed to remove file\")\n            }\n        }\n    }\n}\n\n#[test]\nfn test_resumable_seal_skip_proofs_v1() {\n    let porep_id_v1: u64 = 0; // This is a RegisteredSealProof value\n\n    let mut porep_id = [0u8; 32];\n    porep_id[..8].copy_from_slice(&porep_id_v1.to_le_bytes());\n    assert!(is_legacy_porep_id(porep_id));\n    run_resumable_seal::<SectorShape2KiB>(true, 0, &porep_id, ApiVersion::V1_0_0);\n    run_resumable_seal::<SectorShape2KiB>(true, 1, &porep_id, ApiVersion::V1_0_0);\n}\n\n#[test]\nfn test_resumable_seal_skip_proofs_v1_1() {\n    let porep_id_v1_1: u64 = 5; // This is a RegisteredSealProof value\n\n    let mut porep_id = [0u8; 32];\n    porep_id[..8].copy_from_slice(&porep_id_v1_1.to_le_bytes());\n    assert!(!is_legacy_porep_id(porep_id));\n    run_resumable_seal::<SectorShape2KiB>(true, 0, &porep_id, ApiVersion::V1_1_0);\n    run_resumable_seal::<SectorShape2KiB>(true, 1, &porep_id, ApiVersion::V1_1_0);\n}\n\n#[test]\n#[ignore]\nfn test_resumable_seal_v1() {\n    let porep_id_v1: u64 = 0; // This is a RegisteredSealProof value\n\n    let mut porep_id = [0u8; 32];\n    porep_id[..8].copy_from_slice(&porep_id_v1.to_le_bytes());\n    assert!(is_legacy_porep_id(porep_id));\n    run_resumable_seal::<SectorShape2KiB>(false, 0, &porep_id, ApiVersion::V1_0_0);\n    run_resumable_seal::<SectorShape2KiB>(false, 1, &porep_id, ApiVersion::V1_0_0);\n}\n\n#[test]\n#[ignore]\nfn test_resumable_seal_v1_1() {\n    let porep_id_v1_1: u64 = 5; // This is a RegisteredSealProof value\n\n    let mut porep_id = [0u8; 32];\n    porep_id[..8].copy_from_slice(&porep_id_v1_1.to_le_bytes());\n    assert!(!is_legacy_porep_id(porep_id));\n    run_resumable_seal::<SectorShape2KiB>(false, 0, &porep_id, ApiVersion::V1_1_0);\n    run_resumable_seal::<SectorShape2KiB>(false, 1, &porep_id, ApiVersion::V1_1_0);\n}\n\n/// Create a seal, delete a layer and resume\n///\n/// The current code works on two layers only. The `layer_to_delete` specifies (zero-based) which\n/// layer should be deleted.\nfn run_resumable_seal<Tree: 'static + MerkleTreeTrait>(\n    skip_proofs: bool,\n    layer_to_delete: usize,\n    porep_id: &[u8; 32],\n    api_version: ApiVersion,\n) {\n    init_logger();\n\n    let sector_size = SECTOR_SIZE_2_KIB;\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n    let prover_fr: DefaultTreeDomain = Fr::random(rng).into();\n    let mut prover_id = [0u8; 32];\n    prover_id.copy_from_slice(AsRef::<[u8]>::as_ref(&prover_fr));\n\n    let (mut piece_file, piece_bytes) =\n        generate_piece_file(sector_size).expect(\"failed to generate piece file\");\n    let sealed_sector_file = NamedTempFile::new().expect(\"failed to created sealed sector file\");\n    let cache_dir = tempdir().expect(\"failed to create temp dir\");\n\n    let config = porep_config(sector_size, *porep_id, api_version);\n    let ticket = rng.gen();\n    let sector_id = rng.gen::<u64>().into();\n\n    // First create seals as expected\n    run_seal_pre_commit_phase1::<Tree>(\n        config,\n        prover_id,\n        sector_id,\n        ticket,\n        &cache_dir,\n        &mut piece_file,\n        &sealed_sector_file,\n    )\n    .expect(\"failed to run seal pre commit phase1\");\n    let layers = get_layer_file_paths(&cache_dir);\n    assert_eq!(layers.len(), 2, \"not all expected layers were created\");\n\n    // Delete one layer, keep the other\n    clear_cache_dir_keep_data_layer(&cache_dir);\n    remove_file(&layers[layer_to_delete]).expect(\"failed to remove layer\");\n    let layers_remaining = get_layer_file_paths(&cache_dir);\n    assert_eq!(layers_remaining.len(), 1, \"expected one layer only\");\n    if layer_to_delete == 0 {\n        assert_eq!(layers_remaining[0], layers[1], \"wrong layer was removed\");\n    } else {\n        assert_eq!(layers_remaining[0], layers[0], \"wrong layer was removed\");\n    }\n\n    // Resume the seal\n    piece_file\n        .seek(SeekFrom::Start(0))\n        .expect(\"failed to seek piece file to start\");\n    let (piece_infos, phase1_output) = run_seal_pre_commit_phase1::<Tree>(\n        config,\n        prover_id,\n        sector_id,\n        ticket,\n        &cache_dir,\n        &mut piece_file,\n        &sealed_sector_file,\n    )\n    .expect(\"failed to run seal pre commit phase1\");\n\n    // Running proofs clears the cache, hence we can only check for existence of files if we don't\n    // run them\n    if skip_proofs {\n        let layers_recreated = get_layer_file_paths(&cache_dir);\n        assert_eq!(\n            layers_recreated.len(),\n            2,\n            \"not all expected layers were recreated\"\n        );\n        assert_eq!(\n            layers_recreated, layers,\n            \"recreated layers don't match original ones\"\n        );\n    } else {\n        let pre_commit_output = seal_pre_commit_phase2(\n            config,\n            phase1_output,\n            cache_dir.path(),\n            sealed_sector_file.path(),\n        )\n        .expect(\"failed to run seal pre commit phase2\");\n\n        validate_cache_for_commit::<_, _, Tree>(cache_dir.path(), sealed_sector_file.path())\n            .expect(\"failed to validate cache for commit\");\n\n        let seed = rng.gen();\n        proof_and_unseal::<Tree>(\n            config,\n            cache_dir.path(),\n            &sealed_sector_file,\n            prover_id,\n            sector_id,\n            ticket,\n            seed,\n            pre_commit_output,\n            &piece_infos,\n            &piece_bytes,\n        )\n        .expect(\"failed to proof\");\n    }\n}\n\n#[test]\n#[ignore]\nfn test_winning_post_2kib_base_8() -> Result<()> {\n    winning_post::<SectorShape2KiB>(SECTOR_SIZE_2_KIB, false, ApiVersion::V1_0_0)?;\n    winning_post::<SectorShape2KiB>(SECTOR_SIZE_2_KIB, true, ApiVersion::V1_0_0)?;\n    winning_post::<SectorShape2KiB>(SECTOR_SIZE_2_KIB, false, ApiVersion::V1_1_0)?;\n    winning_post::<SectorShape2KiB>(SECTOR_SIZE_2_KIB, true, ApiVersion::V1_1_0)\n}\n\n#[test]\n#[ignore]\nfn test_winning_post_4kib_sub_8_2() -> Result<()> {\n    winning_post::<SectorShape4KiB>(SECTOR_SIZE_4_KIB, false, ApiVersion::V1_0_0)?;\n    winning_post::<SectorShape4KiB>(SECTOR_SIZE_4_KIB, true, ApiVersion::V1_0_0)?;\n    winning_post::<SectorShape4KiB>(SECTOR_SIZE_4_KIB, false, ApiVersion::V1_1_0)?;\n    winning_post::<SectorShape4KiB>(SECTOR_SIZE_4_KIB, true, ApiVersion::V1_1_0)\n}\n\n#[test]\n#[ignore]\nfn test_winning_post_16kib_sub_8_8() -> Result<()> {\n    winning_post::<SectorShape16KiB>(SECTOR_SIZE_16_KIB, false, ApiVersion::V1_0_0)?;\n    winning_post::<SectorShape16KiB>(SECTOR_SIZE_16_KIB, true, ApiVersion::V1_0_0)?;\n    winning_post::<SectorShape16KiB>(SECTOR_SIZE_16_KIB, false, ApiVersion::V1_1_0)?;\n    winning_post::<SectorShape16KiB>(SECTOR_SIZE_16_KIB, true, ApiVersion::V1_1_0)\n}\n\n#[test]\n#[ignore]\nfn test_winning_post_32kib_top_8_8_2() -> Result<()> {\n    winning_post::<SectorShape32KiB>(SECTOR_SIZE_32_KIB, false, ApiVersion::V1_0_0)?;\n    winning_post::<SectorShape32KiB>(SECTOR_SIZE_32_KIB, true, ApiVersion::V1_0_0)?;\n    winning_post::<SectorShape32KiB>(SECTOR_SIZE_32_KIB, false, ApiVersion::V1_1_0)?;\n    winning_post::<SectorShape32KiB>(SECTOR_SIZE_32_KIB, true, ApiVersion::V1_1_0)\n}\n\n#[test]\nfn test_winning_post_empty_sector_challenge() -> Result<()> {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let prover_fr: DefaultTreeDomain = Fr::random(rng).into();\n    let mut prover_id = [0u8; 32];\n    prover_id.copy_from_slice(AsRef::<[u8]>::as_ref(&prover_fr));\n\n    let sector_count = 0;\n    let sector_size = SECTOR_SIZE_2_KIB;\n    let api_version = ApiVersion::V1_1_0;\n\n    let (_, _, _, _) = create_seal::<_, SectorShape2KiB>(\n        rng,\n        sector_size,\n        prover_id,\n        true,\n        &ARBITRARY_POREP_ID_V1_1_0,\n        api_version,\n    )?;\n\n    let random_fr: DefaultTreeDomain = Fr::random(rng).into();\n    let mut randomness = [0u8; 32];\n    randomness.copy_from_slice(AsRef::<[u8]>::as_ref(&random_fr));\n\n    let config = PoStConfig {\n        sector_size: sector_size.into(),\n        sector_count,\n        challenge_count: WINNING_POST_CHALLENGE_COUNT,\n        typ: PoStType::Winning,\n        priority: false,\n        api_version: ApiVersion::V1_0_0,\n    };\n\n    assert!(generate_winning_post_sector_challenge::<SectorShape2KiB>(\n        &config,\n        &randomness,\n        sector_count as u64,\n        prover_id\n    )\n    .is_err());\n\n    Ok(())\n}\n\nfn winning_post<Tree: 'static + MerkleTreeTrait>(\n    sector_size: u64,\n    fake: bool,\n    api_version: ApiVersion,\n) -> Result<()> {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let prover_fr: DefaultTreeDomain = Fr::random(rng).into();\n    let mut prover_id = [0u8; 32];\n    prover_id.copy_from_slice(AsRef::<[u8]>::as_ref(&prover_fr));\n\n    let porep_id = match api_version {\n        ApiVersion::V1_0_0 => ARBITRARY_POREP_ID_V1_0_0,\n        ApiVersion::V1_1_0 => ARBITRARY_POREP_ID_V1_1_0,\n    };\n\n    let (sector_id, replica, comm_r, cache_dir) = if fake {\n        create_fake_seal::<_, Tree>(rng, sector_size, &porep_id, api_version)?\n    } else {\n        create_seal::<_, Tree>(rng, sector_size, prover_id, true, &porep_id, api_version)?\n    };\n    let sector_count = WINNING_POST_SECTOR_COUNT;\n\n    let random_fr: DefaultTreeDomain = Fr::random(rng).into();\n    let mut randomness = [0u8; 32];\n    randomness.copy_from_slice(AsRef::<[u8]>::as_ref(&random_fr));\n\n    let config = PoStConfig {\n        sector_size: sector_size.into(),\n        sector_count,\n        challenge_count: WINNING_POST_CHALLENGE_COUNT,\n        typ: PoStType::Winning,\n        priority: false,\n        api_version,\n    };\n\n    let challenged_sectors = generate_winning_post_sector_challenge::<Tree>(\n        &config,\n        &randomness,\n        sector_count as u64,\n        prover_id,\n    )?;\n    assert_eq!(challenged_sectors.len(), sector_count);\n    assert_eq!(challenged_sectors[0], 0); // with a sector_count of 1, the only valid index is 0\n\n    let pub_replicas = vec![(sector_id, PublicReplicaInfo::new(comm_r)?)];\n    let private_replica_info =\n        PrivateReplicaInfo::new(replica.path().into(), comm_r, cache_dir.path().into())?;\n\n    /////////////////////////////////////////////\n    // The following methods of proof generation are functionally equivalent:\n    // 1)\n    //\n    let priv_replicas = vec![(sector_id, private_replica_info.clone())];\n    let proof = generate_winning_post::<Tree>(&config, &randomness, &priv_replicas[..], prover_id)?;\n\n    let valid =\n        verify_winning_post::<Tree>(&config, &randomness, &pub_replicas[..], prover_id, &proof)?;\n    assert!(valid, \"proof did not verify\");\n\n    //\n    // 2)\n    let mut vanilla_proofs = Vec::with_capacity(sector_count);\n    let challenges = generate_fallback_sector_challenges::<Tree>(\n        &config,\n        &randomness,\n        &vec![sector_id],\n        prover_id,\n    )?;\n\n    let single_proof = generate_single_vanilla_proof::<Tree>(\n        &config,\n        sector_id,\n        &private_replica_info,\n        &challenges[&sector_id],\n    )?;\n\n    vanilla_proofs.push(single_proof);\n\n    let proof = generate_winning_post_with_vanilla::<Tree>(\n        &config,\n        &randomness,\n        prover_id,\n        vanilla_proofs,\n    )?;\n    /////////////////////////////////////////////\n\n    let valid =\n        verify_winning_post::<Tree>(&config, &randomness, &pub_replicas[..], prover_id, &proof)?;\n    assert!(valid, \"proof did not verify\");\n\n    Ok(())\n}\n\n#[test]\n#[ignore]\nfn test_window_post_single_partition_smaller_2kib_base_8() -> Result<()> {\n    let sector_size = SECTOR_SIZE_2_KIB;\n    let sector_count = *WINDOW_POST_SECTOR_COUNT\n        .read()\n        .expect(\"WINDOW_POST_SECTOR_COUNT poisoned\")\n        .get(&sector_size)\n        .expect(\"unknown sector size\");\n\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        sector_count / 2,\n        sector_count,\n        false,\n        ApiVersion::V1_0_0,\n    )?;\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        sector_count / 2,\n        sector_count,\n        true,\n        ApiVersion::V1_0_0,\n    )?;\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        sector_count / 2,\n        sector_count,\n        false,\n        ApiVersion::V1_1_0,\n    )?;\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        sector_count / 2,\n        sector_count,\n        true,\n        ApiVersion::V1_1_0,\n    )\n}\n\n#[test]\n#[ignore]\nfn test_window_post_two_partitions_matching_2kib_base_8() -> Result<()> {\n    let sector_size = SECTOR_SIZE_2_KIB;\n    let sector_count = *WINDOW_POST_SECTOR_COUNT\n        .read()\n        .expect(\"WINDOW_POST_SECTOR_COUNT poisoned\")\n        .get(&sector_size)\n        .expect(\"unknown sector size\");\n\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        false,\n        ApiVersion::V1_0_0,\n    )?;\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        true,\n        ApiVersion::V1_0_0,\n    )?;\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        false,\n        ApiVersion::V1_1_0,\n    )?;\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        true,\n        ApiVersion::V1_1_0,\n    )\n}\n\n#[test]\n#[ignore]\nfn test_window_post_two_partitions_matching_4kib_sub_8_2() -> Result<()> {\n    let sector_size = SECTOR_SIZE_4_KIB;\n    let sector_count = *WINDOW_POST_SECTOR_COUNT\n        .read()\n        .expect(\"WINDOW_POST_SECTOR_COUNT poisoned\")\n        .get(&sector_size)\n        .expect(\"unknown sector size\");\n\n    window_post::<SectorShape4KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        false,\n        ApiVersion::V1_0_0,\n    )?;\n    window_post::<SectorShape4KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        true,\n        ApiVersion::V1_0_0,\n    )?;\n    window_post::<SectorShape4KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        false,\n        ApiVersion::V1_1_0,\n    )?;\n    window_post::<SectorShape4KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        true,\n        ApiVersion::V1_1_0,\n    )\n}\n\n#[test]\n#[ignore]\nfn test_window_post_two_partitions_matching_16kib_sub_8_8() -> Result<()> {\n    let sector_size = SECTOR_SIZE_16_KIB;\n    let sector_count = *WINDOW_POST_SECTOR_COUNT\n        .read()\n        .expect(\"WINDOW_POST_SECTOR_COUNT poisoned\")\n        .get(&sector_size)\n        .expect(\"unknown sector size\");\n\n    window_post::<SectorShape16KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        false,\n        ApiVersion::V1_0_0,\n    )?;\n    window_post::<SectorShape16KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        true,\n        ApiVersion::V1_0_0,\n    )?;\n    window_post::<SectorShape16KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        false,\n        ApiVersion::V1_1_0,\n    )?;\n    window_post::<SectorShape16KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        true,\n        ApiVersion::V1_1_0,\n    )\n}\n\n#[test]\n#[ignore]\nfn test_window_post_two_partitions_matching_32kib_top_8_8_2() -> Result<()> {\n    let sector_size = SECTOR_SIZE_32_KIB;\n    let sector_count = *WINDOW_POST_SECTOR_COUNT\n        .read()\n        .expect(\"WINDOW_POST_SECTOR_COUNT poisoned\")\n        .get(&sector_size)\n        .expect(\"unknown sector size\");\n\n    window_post::<SectorShape32KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        false,\n        ApiVersion::V1_0_0,\n    )?;\n    window_post::<SectorShape32KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        true,\n        ApiVersion::V1_0_0,\n    )?;\n    window_post::<SectorShape32KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        false,\n        ApiVersion::V1_1_0,\n    )?;\n    window_post::<SectorShape32KiB>(\n        sector_size,\n        2 * sector_count,\n        sector_count,\n        true,\n        ApiVersion::V1_1_0,\n    )\n}\n\n#[test]\n#[ignore]\nfn test_window_post_two_partitions_smaller_2kib_base_8() -> Result<()> {\n    let sector_size = SECTOR_SIZE_2_KIB;\n    let sector_count = *WINDOW_POST_SECTOR_COUNT\n        .read()\n        .expect(\"WINDOW_POST_SECTOR_COUNT poisoned\")\n        .get(&sector_size)\n        .expect(\"unknown sector size\");\n\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        2 * sector_count - 1,\n        sector_count,\n        false,\n        ApiVersion::V1_0_0,\n    )?;\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        2 * sector_count - 1,\n        sector_count,\n        true,\n        ApiVersion::V1_0_0,\n    )?;\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        2 * sector_count - 1,\n        sector_count,\n        false,\n        ApiVersion::V1_1_0,\n    )?;\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        2 * sector_count - 1,\n        sector_count,\n        true,\n        ApiVersion::V1_1_0,\n    )\n}\n\n#[test]\n#[ignore]\nfn test_window_post_single_partition_matching_2kib_base_8() -> Result<()> {\n    let sector_size = SECTOR_SIZE_2_KIB;\n    let sector_count = *WINDOW_POST_SECTOR_COUNT\n        .read()\n        .expect(\"WINDOW_POST_SECTOR_COUNT poisoned\")\n        .get(&sector_size)\n        .expect(\"unknown sector size\");\n\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        sector_count,\n        sector_count,\n        false,\n        ApiVersion::V1_0_0,\n    )?;\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        sector_count,\n        sector_count,\n        true,\n        ApiVersion::V1_0_0,\n    )?;\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        sector_count,\n        sector_count,\n        false,\n        ApiVersion::V1_1_0,\n    )?;\n    window_post::<SectorShape2KiB>(\n        sector_size,\n        sector_count,\n        sector_count,\n        true,\n        ApiVersion::V1_1_0,\n    )\n}\n\nfn window_post<Tree: 'static + MerkleTreeTrait>(\n    sector_size: u64,\n    total_sector_count: usize,\n    sector_count: usize,\n    fake: bool,\n    api_version: ApiVersion,\n) -> Result<()> {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let mut sectors = Vec::with_capacity(total_sector_count);\n    let mut pub_replicas = BTreeMap::new();\n    let mut priv_replicas = BTreeMap::new();\n\n    let prover_fr: <Tree::Hasher as Hasher>::Domain = Fr::random(rng).into();\n    let mut prover_id = [0u8; 32];\n    prover_id.copy_from_slice(AsRef::<[u8]>::as_ref(&prover_fr));\n\n    let porep_id = match api_version {\n        ApiVersion::V1_0_0 => ARBITRARY_POREP_ID_V1_0_0,\n        ApiVersion::V1_1_0 => ARBITRARY_POREP_ID_V1_1_0,\n    };\n\n    for _ in 0..total_sector_count {\n        let (sector_id, replica, comm_r, cache_dir) = if fake {\n            create_fake_seal::<_, Tree>(rng, sector_size, &porep_id, api_version)?\n        } else {\n            create_seal::<_, Tree>(rng, sector_size, prover_id, true, &porep_id, api_version)?\n        };\n        priv_replicas.insert(\n            sector_id,\n            PrivateReplicaInfo::new(replica.path().into(), comm_r, cache_dir.path().into())?,\n        );\n        pub_replicas.insert(sector_id, PublicReplicaInfo::new(comm_r)?);\n        sectors.push((sector_id, replica, comm_r, cache_dir, prover_id));\n    }\n    assert_eq!(priv_replicas.len(), total_sector_count);\n    assert_eq!(pub_replicas.len(), total_sector_count);\n    assert_eq!(sectors.len(), total_sector_count);\n\n    let random_fr: <Tree::Hasher as Hasher>::Domain = Fr::random(rng).into();\n    let mut randomness = [0u8; 32];\n    randomness.copy_from_slice(AsRef::<[u8]>::as_ref(&random_fr));\n\n    let config = PoStConfig {\n        sector_size: sector_size.into(),\n        sector_count,\n        challenge_count: WINDOW_POST_CHALLENGE_COUNT,\n        typ: PoStType::Window,\n        priority: false,\n        api_version,\n    };\n\n    /////////////////////////////////////////////\n    // The following methods of proof generation are functionally equivalent:\n    // 1)\n    let proof = generate_window_post::<Tree>(&config, &randomness, &priv_replicas, prover_id)?;\n\n    let valid = verify_window_post::<Tree>(&config, &randomness, &pub_replicas, prover_id, &proof)?;\n    assert!(valid, \"proof did not verify\");\n\n    // 2)\n    let replica_sectors = priv_replicas\n        .iter()\n        .map(|(sector, _replica)| *sector)\n        .collect::<Vec<SectorId>>();\n\n    let challenges = generate_fallback_sector_challenges::<Tree>(\n        &config,\n        &randomness,\n        &replica_sectors,\n        prover_id,\n    )?;\n\n    let mut vanilla_proofs = Vec::with_capacity(replica_sectors.len());\n\n    for (sector_id, replica) in priv_replicas.iter() {\n        let sector_challenges = &challenges[sector_id];\n        let single_proof =\n            generate_single_vanilla_proof::<Tree>(&config, *sector_id, replica, sector_challenges)?;\n\n        vanilla_proofs.push(single_proof);\n    }\n\n    let proof =\n        generate_window_post_with_vanilla::<Tree>(&config, &randomness, prover_id, vanilla_proofs)?;\n    /////////////////////////////////////////////\n\n    let valid = verify_window_post::<Tree>(&config, &randomness, &pub_replicas, prover_id, &proof)?;\n    assert!(valid, \"proof did not verify\");\n\n    Ok(())\n}\n\nfn generate_piece_file(sector_size: u64) -> Result<(NamedTempFile, Vec<u8>)> {\n    let number_of_bytes_in_piece = UnpaddedBytesAmount::from(PaddedBytesAmount(sector_size));\n\n    let piece_bytes: Vec<u8> = (0..number_of_bytes_in_piece.0)\n        .map(|_| random::<u8>())\n        .collect();\n\n    let mut piece_file = NamedTempFile::new()?;\n    piece_file.write_all(&piece_bytes)?;\n    piece_file.as_file_mut().sync_all()?;\n    piece_file.as_file_mut().seek(SeekFrom::Start(0))?;\n\n    Ok((piece_file, piece_bytes))\n}\n\nfn porep_config(sector_size: u64, porep_id: [u8; 32], api_version: ApiVersion) -> PoRepConfig {\n    PoRepConfig {\n        sector_size: SectorSize(sector_size),\n        partitions: PoRepProofPartitions(\n            *POREP_PARTITIONS\n                .read()\n                .expect(\"POREP_PARTITIONS poisoned\")\n                .get(&sector_size)\n                .expect(\"unknown sector size\"),\n        ),\n        porep_id,\n        api_version,\n    }\n}\n\nfn run_seal_pre_commit_phase1<Tree: 'static + MerkleTreeTrait>(\n    config: PoRepConfig,\n    prover_id: ProverId,\n    sector_id: SectorId,\n    ticket: [u8; 32],\n    cache_dir: &TempDir,\n    mut piece_file: &mut NamedTempFile,\n    sealed_sector_file: &NamedTempFile,\n) -> Result<(Vec<PieceInfo>, SealPreCommitPhase1Output<Tree>)> {\n    let number_of_bytes_in_piece =\n        UnpaddedBytesAmount::from(PaddedBytesAmount(config.sector_size.into()));\n\n    let piece_info = generate_piece_commitment(piece_file.as_file_mut(), number_of_bytes_in_piece)?;\n    piece_file.as_file_mut().seek(SeekFrom::Start(0))?;\n\n    let mut staged_sector_file = NamedTempFile::new()?;\n    add_piece(\n        &mut piece_file,\n        &mut staged_sector_file,\n        number_of_bytes_in_piece,\n        &[],\n    )?;\n\n    let piece_infos = vec![piece_info];\n\n    let phase1_output = seal_pre_commit_phase1::<_, _, _, Tree>(\n        config,\n        cache_dir.path(),\n        staged_sector_file.path(),\n        sealed_sector_file.path(),\n        prover_id,\n        sector_id,\n        ticket,\n        &piece_infos,\n    )?;\n\n    validate_cache_for_precommit_phase2(\n        cache_dir.path(),\n        staged_sector_file.path(),\n        &phase1_output,\n    )?;\n\n    Ok((piece_infos, phase1_output))\n}\n\nfn proof_and_unseal<Tree: 'static + MerkleTreeTrait>(\n    config: PoRepConfig,\n    cache_dir_path: &Path,\n    sealed_sector_file: &NamedTempFile,\n    prover_id: ProverId,\n    sector_id: SectorId,\n    ticket: [u8; 32],\n    seed: [u8; 32],\n    pre_commit_output: SealPreCommitOutput,\n    piece_infos: &[PieceInfo],\n    piece_bytes: &[u8],\n) -> Result<()> {\n    let comm_d = pre_commit_output.comm_d;\n    let comm_r = pre_commit_output.comm_r;\n\n    let mut unseal_file = NamedTempFile::new()?;\n    let phase1_output = seal_commit_phase1::<_, Tree>(\n        config,\n        cache_dir_path,\n        sealed_sector_file.path(),\n        prover_id,\n        sector_id,\n        ticket,\n        seed,\n        pre_commit_output,\n        &piece_infos,\n    )?;\n\n    clear_cache::<Tree>(cache_dir_path)?;\n\n    let commit_output = seal_commit_phase2(config, phase1_output, prover_id, sector_id)?;\n\n    let _ = get_unsealed_range::<_, Tree>(\n        config,\n        cache_dir_path,\n        sealed_sector_file.path(),\n        unseal_file.path(),\n        prover_id,\n        sector_id,\n        comm_d,\n        ticket,\n        UnpaddedByteIndex(508),\n        UnpaddedBytesAmount(508),\n    )?;\n\n    unseal_file.seek(SeekFrom::Start(0))?;\n\n    let mut contents = vec![];\n    assert!(\n        unseal_file.read_to_end(&mut contents).is_ok(),\n        \"failed to populate buffer with unsealed bytes\"\n    );\n    assert_eq!(contents.len(), 508);\n    assert_eq!(&piece_bytes[508..508 + 508], &contents[..]);\n\n    let computed_comm_d = compute_comm_d(config.sector_size, &piece_infos)?;\n\n    assert_eq!(\n        comm_d, computed_comm_d,\n        \"Computed and expected comm_d don't match.\"\n    );\n\n    let verified = verify_seal::<Tree>(\n        config,\n        comm_r,\n        comm_d,\n        prover_id,\n        sector_id,\n        ticket,\n        seed,\n        &commit_output.proof,\n    )?;\n    assert!(verified, \"failed to verify valid seal\");\n    Ok(())\n}\n\nfn create_seal<R: Rng, Tree: 'static + MerkleTreeTrait>(\n    rng: &mut R,\n    sector_size: u64,\n    prover_id: ProverId,\n    skip_proof: bool,\n    porep_id: &[u8; 32],\n    api_version: ApiVersion,\n) -> Result<(SectorId, NamedTempFile, Commitment, TempDir)> {\n    init_logger();\n\n    let (mut piece_file, piece_bytes) = generate_piece_file(sector_size)?;\n    let sealed_sector_file = NamedTempFile::new()?;\n    let cache_dir = tempdir().expect(\"failed to create temp dir\");\n\n    let config = porep_config(sector_size, *porep_id, api_version);\n    let ticket = rng.gen();\n    let seed = rng.gen();\n    let sector_id = rng.gen::<u64>().into();\n\n    let (piece_infos, phase1_output) = run_seal_pre_commit_phase1::<Tree>(\n        config,\n        prover_id,\n        sector_id,\n        ticket,\n        &cache_dir,\n        &mut piece_file,\n        &sealed_sector_file,\n    )?;\n\n    let pre_commit_output = seal_pre_commit_phase2(\n        config,\n        phase1_output,\n        cache_dir.path(),\n        sealed_sector_file.path(),\n    )?;\n\n    let comm_r = pre_commit_output.comm_r;\n\n    validate_cache_for_commit::<_, _, Tree>(cache_dir.path(), sealed_sector_file.path())?;\n\n    if skip_proof {\n        clear_cache::<Tree>(cache_dir.path())?;\n    } else {\n        proof_and_unseal::<Tree>(\n            config,\n            cache_dir.path(),\n            &sealed_sector_file,\n            prover_id,\n            sector_id,\n            ticket,\n            seed,\n            pre_commit_output,\n            &piece_infos,\n            &piece_bytes,\n        )\n        .expect(\"failed to proof\");\n    }\n\n    Ok((sector_id, sealed_sector_file, comm_r, cache_dir))\n}\n\nfn create_fake_seal<R: Rng, Tree: 'static + MerkleTreeTrait>(\n    mut rng: &mut R,\n    sector_size: u64,\n    porep_id: &[u8; 32],\n    api_version: ApiVersion,\n) -> Result<(SectorId, NamedTempFile, Commitment, TempDir)> {\n    init_logger();\n\n    let sealed_sector_file = NamedTempFile::new()?;\n\n    let config = porep_config(sector_size, *porep_id, api_version);\n\n    let cache_dir = tempdir().unwrap();\n\n    let sector_id = rng.gen::<u64>().into();\n\n    let comm_r = fauxrep_aux::<_, _, _, Tree>(\n        &mut rng,\n        config,\n        cache_dir.path(),\n        sealed_sector_file.path(),\n    )?;\n\n    Ok((sector_id, sealed_sector_file, comm_r, cache_dir))\n}\n"
  },
  {
    "path": "filecoin-proofs/tests/constants.rs",
    "content": "use filecoin_proofs::{\n    with_shape, SECTOR_SIZE_16_MIB, SECTOR_SIZE_1_GIB, SECTOR_SIZE_2_KIB, SECTOR_SIZE_32_GIB,\n    SECTOR_SIZE_4_KIB, SECTOR_SIZE_512_MIB, SECTOR_SIZE_64_GIB, SECTOR_SIZE_8_MIB,\n};\nuse generic_array::typenum::Unsigned;\nuse storage_proofs_core::merkle::MerkleTreeTrait;\n\nfn canonical_shape(sector_size: u64) -> (usize, usize, usize) {\n    // This could perhaps be cleaned up, but I think it expresses the intended constraints\n    // and is consistent with our current hard-coded size->shape mappings.\n    assert_eq!(sector_size.count_ones(), 1);\n    let log_byte_size = sector_size.trailing_zeros();\n    let log_nodes = log_byte_size - 5; // 2^5 = 32-byte nodes\n\n    let max_tree_log = 3; // Largest allowable arity. The optimal shape.\n\n    let log_max_base = 27; // 4 GiB worth of nodes\n    let log_base = max_tree_log; // Base must be oct trees.x\n    let log_in_base = u32::min(log_max_base, (log_nodes / log_base) * log_base); // How many nodes in base?\n\n    let log_upper = log_nodes - log_in_base; // Nodes in sub and upper combined.\n    let log_rem = log_upper % max_tree_log; // Remainder after filling optimal trees.\n\n    let (log_sub, log_top) = {\n        // Are the upper trees empty?\n        if log_upper > 0 {\n            // Do we need a remainder tree?\n            if log_rem == 0 {\n                (Some(max_tree_log), None) // No remainder tree, fill the sub tree optimall.y\n            } else {\n                // Need a remainder tree.\n\n                // Do we have room for another max tree?\n                if log_upper > max_tree_log {\n                    // There is room. Use the sub tree for as much overflow as we can fit optimally.\n                    // And put the rest in the top tree.\n                    (Some(max_tree_log), Some(log_rem))\n                } else {\n                    // Can't fit another max tree.\n                    // Just put the remainder in the sub tree.\n                    (Some(log_rem), None)\n                }\n            }\n        } else {\n            // Upper trees are empty.\n            (None, None)\n        }\n    };\n\n    let base = 1 << log_base;\n    let sub = if let Some(l) = log_sub { 1 << l } else { 0 };\n    let top = if let Some(l) = log_top { 1 << l } else { 0 };\n\n    (base, sub, top)\n}\n\nfn arities_to_usize<Tree: MerkleTreeTrait>() -> (usize, usize, usize) {\n    (\n        Tree::Arity::to_usize(),\n        Tree::SubTreeArity::to_usize(),\n        Tree::TopTreeArity::to_usize(),\n    )\n}\n\n#[test]\nfn test_with_shape_macro() {\n    test_with_shape_macro_aux(SECTOR_SIZE_2_KIB);\n    test_with_shape_macro_aux(SECTOR_SIZE_4_KIB);\n    test_with_shape_macro_aux(SECTOR_SIZE_8_MIB);\n    test_with_shape_macro_aux(SECTOR_SIZE_16_MIB);\n    test_with_shape_macro_aux(SECTOR_SIZE_512_MIB);\n    test_with_shape_macro_aux(SECTOR_SIZE_1_GIB);\n    test_with_shape_macro_aux(SECTOR_SIZE_32_GIB);\n    test_with_shape_macro_aux(SECTOR_SIZE_64_GIB);\n}\n\nfn test_with_shape_macro_aux(sector_size: u64) {\n    let expected = canonical_shape(sector_size);\n    let arities = with_shape!(sector_size, arities_to_usize);\n    assert_eq!(\n        arities, expected,\n        \"Wrong shape for sector size {}: have {:?} but need {:?}.\",\n        sector_size, arities, expected\n    );\n}\n"
  },
  {
    "path": "filecoin-proofs/tests/mod.rs",
    "content": "use std::panic::panic_any;\n\nuse bellperson::bls::Fr;\nuse ff::Field;\nuse filecoin_proofs::{\n    as_safe_commitment, verify_seal, DefaultOctLCTree, DefaultTreeDomain, PoRepConfig,\n    PoRepProofPartitions, SectorSize, POREP_PARTITIONS, SECTOR_SIZE_2_KIB, TEST_SEED,\n};\nuse fr32::bytes_into_fr;\nuse rand::SeedableRng;\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{api_version::ApiVersion, sector::SectorId};\n\n#[test]\nfn test_verify_seal_fr32_validation() {\n    let convertible_to_fr_bytes = [0; 32];\n    let out = bytes_into_fr(&convertible_to_fr_bytes);\n    assert!(out.is_ok(), \"tripwire\");\n\n    let not_convertible_to_fr_bytes = [255; 32];\n    let out = bytes_into_fr(&not_convertible_to_fr_bytes);\n    assert!(out.is_err(), \"tripwire\");\n\n    let arbitrary_porep_id = [87; 32];\n    {\n        let result = verify_seal::<DefaultOctLCTree>(\n            PoRepConfig {\n                sector_size: SectorSize(SECTOR_SIZE_2_KIB),\n                partitions: PoRepProofPartitions(\n                    *POREP_PARTITIONS\n                        .read()\n                        .expect(\"POREP_PARTITIONS poisoned\")\n                        .get(&SECTOR_SIZE_2_KIB)\n                        .expect(\"unknown sector size\"),\n                ),\n                porep_id: arbitrary_porep_id,\n                api_version: ApiVersion::V1_1_0,\n            },\n            not_convertible_to_fr_bytes,\n            convertible_to_fr_bytes,\n            [0; 32],\n            SectorId::from(0),\n            [0; 32],\n            [0; 32],\n            &[],\n        );\n\n        if let Err(err) = result {\n            let needle = \"Invalid all zero commitment\";\n            let haystack = format!(\"{}\", err);\n\n            assert!(\n                haystack.contains(needle),\n                \"\\\"{}\\\" did not contain \\\"{}\\\"\",\n                haystack,\n                needle,\n            );\n        } else {\n            panic_any(\"should have failed comm_r to Fr32 conversion\");\n        }\n    }\n\n    {\n        let result = verify_seal::<DefaultOctLCTree>(\n            PoRepConfig {\n                sector_size: SectorSize(SECTOR_SIZE_2_KIB),\n                partitions: PoRepProofPartitions(\n                    *POREP_PARTITIONS\n                        .read()\n                        .expect(\"POREP_PARTITIONS poisoned\")\n                        .get(&SECTOR_SIZE_2_KIB)\n                        .expect(\"unknown sector size\"),\n                ),\n                porep_id: arbitrary_porep_id,\n                api_version: ApiVersion::V1_1_0,\n            },\n            convertible_to_fr_bytes,\n            not_convertible_to_fr_bytes,\n            [0; 32],\n            SectorId::from(0),\n            [0; 32],\n            [0; 32],\n            &[],\n        );\n\n        if let Err(err) = result {\n            let needle = \"Invalid all zero commitment\";\n            let haystack = format!(\"{}\", err);\n\n            assert!(\n                haystack.contains(needle),\n                \"\\\"{}\\\" did not contain \\\"{}\\\"\",\n                haystack,\n                needle,\n            );\n        } else {\n            panic_any(\"should have failed comm_d to Fr32 conversion\");\n        }\n    }\n}\n\n#[test]\nfn test_random_domain_element() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    for _ in 0..100 {\n        let random_el: DefaultTreeDomain = Fr::random(rng).into();\n        let mut randomness = [0u8; 32];\n        randomness.copy_from_slice(AsRef::<[u8]>::as_ref(&random_el));\n        let back: DefaultTreeDomain =\n            as_safe_commitment(&randomness, \"test\").expect(\"failed to get domain from randomness\");\n        assert_eq!(back, random_el);\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/tests/paramfetch/mod.rs",
    "content": "mod support;\n\npub mod prompts_to_fetch;\n"
  },
  {
    "path": "filecoin-proofs/tests/paramfetch/prompts_to_fetch.rs",
    "content": "use std::collections::btree_map::BTreeMap;\nuse std::fs::File;\nuse std::io::{BufReader, Write};\nuse std::path::PathBuf;\n\nuse failure::Error as FailureError;\n\nuse crate::paramfetch::support::session::ParamFetchSessionBuilder;\nuse crate::support::tmp_manifest;\nuse blake2b_simd::State as Blake2b;\nuse filecoin_proofs::param::{ParameterData, ParameterMap};\nuse rand::Rng;\n\n/// Produce a random sequence of bytes and first 32 characters of hex encoded\n/// BLAKE2b checksum. This helper function must be kept up-to-date with the\n/// parampublish implementation.\nfn rand_bytes_with_blake2b() -> Result<(Vec<u8>, String), FailureError> {\n    let bytes = rand::thread_rng().gen::<[u8; 32]>();\n\n    let mut hasher = Blake2b::new();\n\n    let mut as_slice = &bytes[..];\n\n    std::io::copy(&mut as_slice, &mut hasher)?;\n\n    Ok((\n        bytes.iter().cloned().collect(),\n        hasher.finalize().to_hex()[..32].into(),\n    ))\n}\n\n#[test]\nfn nothing_to_fetch_if_cache_fully_hydrated() -> Result<(), FailureError> {\n    let mut manifest: BTreeMap<String, ParameterData> = BTreeMap::new();\n\n    let (aaa_bytes, aaa_checksum) = rand_bytes_with_blake2b()?;\n    let mut aaa_bytes: &[u8] = &aaa_bytes;\n\n    // manifest entry checksum matches the BLAKE2b we compute locally\n    manifest.insert(\n        \"aaa.vk\".to_string(),\n        ParameterData {\n            cid: \"\".to_string(),\n            digest: aaa_checksum.clone(),\n            sector_size: 1234,\n        },\n    );\n\n    let manifest_pbuf = tmp_manifest(Some(manifest))?;\n\n    let mut session = ParamFetchSessionBuilder::new(Some(manifest_pbuf))\n        .with_session_timeout_ms(1000)\n        .with_file_and_bytes(\"aaa.vk\", &mut aaa_bytes)\n        .build();\n\n    session.exp_string(\"checking: aaa.vk\")?;\n    session.exp_string(\"0 files to fetch\")?;\n    session.exp_string(\"done\")?;\n\n    Ok(())\n}\n\n#[test]\nfn prompts_to_download_if_file_in_manifest_is_missing() -> Result<(), FailureError> {\n    let mut manifest: BTreeMap<String, ParameterData> = BTreeMap::new();\n\n    manifest.insert(\n        \"aaa.vk\".to_string(),\n        ParameterData {\n            cid: \"\".to_string(),\n            digest: \"\".to_string(),\n            sector_size: 1234,\n        },\n    );\n\n    let manifest_pbuf = tmp_manifest(Some(manifest))?;\n\n    let mut session = ParamFetchSessionBuilder::new(Some(manifest_pbuf))\n        .with_session_timeout_ms(1000)\n        .build();\n\n    session.exp_string(\"checking: aaa.vk\")?;\n    session.exp_string(\"does file exist... no\")?;\n    session.exp_string(\"[y/n] (sector size: 1234B) aaa.vk: \")?;\n\n    Ok(())\n}\n\n#[test]\nfn prompts_to_download_if_file_checksum_does_not_match_manifest() -> Result<(), FailureError> {\n    let mut manifest: BTreeMap<String, ParameterData> = BTreeMap::new();\n\n    let (aaa_bytes, _) = rand_bytes_with_blake2b()?;\n    let mut aaa_bytes: &[u8] = &aaa_bytes;\n\n    manifest.insert(\n        \"aaa.vk\".to_string(),\n        ParameterData {\n            cid: \"\".to_string(),\n            digest: \"obviouslywrong\".to_string(),\n            sector_size: 5555,\n        },\n    );\n\n    // create a manifest\n    let manifest_pbuf = tmp_manifest(Some(manifest))?;\n\n    // start a session\n    let mut session = ParamFetchSessionBuilder::new(Some(manifest_pbuf))\n        .with_session_timeout_ms(1000)\n        .with_file_and_bytes(\"aaa.vk\", &mut aaa_bytes)\n        .build();\n\n    session.exp_string(\"checking: aaa.vk\")?;\n    session.exp_string(\"does file exist... yes\")?;\n    session.exp_string(\"is file valid... no\")?;\n    session.exp_string(\"[y/n] (sector size: 5555B) aaa.vk: \")?;\n\n    Ok(())\n}\n\n#[test]\nfn fetches_vk_even_if_sector_size_does_not_match() -> Result<(), FailureError> {\n    let mut manifest: BTreeMap<String, ParameterData> = BTreeMap::new();\n\n    manifest.insert(\n        \"aaa.params\".to_string(),\n        ParameterData {\n            cid: \"\".to_string(),\n            digest: \"\".to_string(),\n            sector_size: 1234,\n        },\n    );\n\n    manifest.insert(\n        \"aaa.vk\".to_string(),\n        ParameterData {\n            cid: \"\".to_string(),\n            digest: \"\".to_string(),\n            sector_size: 1234,\n        },\n    );\n\n    let manifest_pbuf = tmp_manifest(Some(manifest))?;\n\n    let mut session = ParamFetchSessionBuilder::new(Some(manifest_pbuf))\n        .with_session_timeout_ms(1000)\n        .whitelisted_sector_sizes(vec![\"6666\".to_string(), \"4444\".to_string()])\n        .build();\n\n    session.exp_string(\"2 files in manifest\")?;\n    session.exp_string(\"1 files to check for (re)download\")?;\n    session.exp_string(\"checking: aaa.vk\")?;\n    session.exp_string(\"does file exist... no\")?;\n\n    Ok(())\n}\n\n#[test]\nfn invalid_json_path_produces_error() -> Result<(), FailureError> {\n    let mut session = ParamFetchSessionBuilder::new(Some(PathBuf::from(\"/invalid/path\")))\n        .with_session_timeout_ms(1000)\n        .build();\n\n    session.exp_string(\"fatal error: JSON file '/invalid/path' does not exist\")?;\n\n    Ok(())\n}\n\n#[test]\nfn invalid_json_produces_error() -> Result<(), FailureError> {\n    let manifest_pbuf = tmp_manifest(None)?;\n\n    let mut file = File::create(&manifest_pbuf)?;\n    file.write_all(b\"invalid json\")?;\n\n    let mut session = ParamFetchSessionBuilder::new(Some(manifest_pbuf))\n        .with_session_timeout_ms(1000)\n        .build();\n\n    session.exp_string(\"fatal error: JSON file\")?;\n    session.exp_string(\"did not parse correctly\")?;\n\n    Ok(())\n}\n\n#[test]\nfn no_json_path_uses_default_manifest() -> Result<(), FailureError> {\n    let file = File::open(\"parameters.json\")?;\n    let reader = BufReader::new(file);\n    let manifest: ParameterMap = serde_json::from_reader(reader)?;\n\n    let mut session = ParamFetchSessionBuilder::new(None)\n        .with_session_timeout_ms(1000)\n        .build();\n\n    session.exp_string(\"using built-in manifest\")?;\n\n    for parameter in manifest.keys() {\n        session.exp_string(&format!(\"checking: {}\", parameter))?;\n    }\n\n    Ok(())\n}\n"
  },
  {
    "path": "filecoin-proofs/tests/paramfetch/support/mod.rs",
    "content": "pub mod session;\n"
  },
  {
    "path": "filecoin-proofs/tests/paramfetch/support/session.rs",
    "content": "use std::fs::File;\nuse std::io::Read;\nuse std::path::{Path, PathBuf};\n\nuse failure::SyncFailure;\nuse rexpect::session::PtyBashSession;\nuse tempfile;\nuse tempfile::TempDir;\n\nuse crate::support::{cargo_bin, spawn_bash_with_retries};\nuse storage_proofs::parameter_cache::PARAMETER_CACHE_ENV_VAR;\n\npub struct ParamFetchSessionBuilder {\n    cache_dir: TempDir,\n    session_timeout_ms: u64,\n    whitelisted_sector_sizes: Option<Vec<String>>,\n    manifest: Option<PathBuf>,\n    prompt_enabled: bool,\n}\n\nimpl ParamFetchSessionBuilder {\n    pub fn new(manifest: Option<PathBuf>) -> ParamFetchSessionBuilder {\n        let temp_dir = tempfile::tempdir().expect(\"could not create temp dir\");\n\n        ParamFetchSessionBuilder {\n            cache_dir: temp_dir,\n            session_timeout_ms: 1000,\n            manifest,\n            prompt_enabled: true,\n            whitelisted_sector_sizes: None,\n        }\n    }\n\n    /// Configure the pty timeout (see documentation for `rexpect::spawn_bash`).\n    pub fn with_session_timeout_ms(mut self, timeout_ms: u64) -> ParamFetchSessionBuilder {\n        self.session_timeout_ms = timeout_ms;\n        self\n    }\n\n    /// Configure the pty timeout (see documentation for `rexpect::spawn_bash`).\n    pub fn whitelisted_sector_sizes(\n        mut self,\n        sector_sizes: Vec<String>,\n    ) -> ParamFetchSessionBuilder {\n        self.whitelisted_sector_sizes = Some(sector_sizes);\n        self\n    }\n\n    /// Create a file with the provided bytes in the cache directory.\n    pub fn with_file_and_bytes<P: AsRef<Path>, R: Read>(\n        self,\n        filename: P,\n        r: &mut R,\n    ) -> ParamFetchSessionBuilder {\n        let mut pbuf = self.cache_dir.path().clone().to_path_buf();\n        pbuf.push(filename.as_ref());\n\n        let mut file = File::create(&pbuf).expect(\"failed to create file in temp dir\");\n\n        std::io::copy(r, &mut file).expect(\"failed to copy bytes to file\");\n\n        self\n    }\n\n    /// Launch paramfetch in an environment configured by the builder.\n    pub fn build(self) -> ParamFetchSession {\n        let mut p = spawn_bash_with_retries(10, Some(self.session_timeout_ms))\n            .unwrap_or_else(|err| panic!(err));\n\n        let cache_dir_path = format!(\"{:?}\", self.cache_dir.path());\n\n        let paramfetch_path = cargo_bin(\"paramfetch\");\n\n        let whitelist: String = self\n            .whitelisted_sector_sizes\n            .map(|wl| {\n                let mut s = \"--params-for-sector-sizes=\".to_string();\n                s.push_str(&wl.join(\",\"));\n                s\n            })\n            .unwrap_or(\"\".to_string());\n\n        let json_argument = if self.manifest.is_some() {\n            format!(\"--json={:?}\", self.manifest.unwrap())\n        } else {\n            \"\".to_string()\n        };\n\n        let cmd = format!(\n            \"{}={} {:?} {} {} {} --ipget-bin={:?}\",\n            PARAMETER_CACHE_ENV_VAR,\n            cache_dir_path,\n            paramfetch_path,\n            if self.prompt_enabled { \"\" } else { \"--all\" },\n            json_argument,\n            whitelist,\n            \"true\"\n        );\n\n        p.execute(&cmd, \".*\").expect(\"could not execute paramfetch\");\n\n        ParamFetchSession {\n            pty_session: p,\n            _cache_dir: self.cache_dir,\n        }\n    }\n}\n\n/// An active pseudoterminal (pty) used to interact with paramfetch.\npub struct ParamFetchSession {\n    pty_session: PtyBashSession,\n    _cache_dir: TempDir,\n}\n\nimpl ParamFetchSession {\n    /// Block until provided string is seen on stdout from paramfetch and\n    /// return remaining output.\n    pub fn exp_string(\n        &mut self,\n        needle: &str,\n    ) -> Result<String, SyncFailure<rexpect::errors::Error>> {\n        self.pty_session\n            .exp_string(needle)\n            .map_err(SyncFailure::new)\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/tests/parampublish/mod.rs",
    "content": "pub mod prompts_to_publish;\npub mod read_metadata_files;\npub mod write_json_manifest;\n\npub mod support;\n"
  },
  {
    "path": "filecoin-proofs/tests/parampublish/prompts_to_publish.rs",
    "content": "use std::collections::HashSet;\nuse std::iter::FromIterator;\n\nuse failure::Error as FailureError;\n\nuse storage_proofs::parameter_cache::CacheEntryMetadata;\n\nuse crate::parampublish::support::session::ParamPublishSessionBuilder;\nuse std::collections::btree_map::BTreeMap;\n\n#[test]\nfn ignores_files_unrecognized_extensions() {\n    Ok::<(), FailureError>(())\n        .and_then(|_| {\n            // create files with these names in the parameter cache\n            let to_create = vec![\"aaa.vk\", \"aaa.params\", \"bbb.txt\", \"ddd\"];\n\n            // parampublish should prompt user to publish these files\n            let to_prompt: HashSet<&str> =\n                HashSet::from_iter(vec![\"aaa.vk\", \"aaa.params\"].iter().cloned());\n\n            let (mut session, _) = ParamPublishSessionBuilder::new()\n                .with_session_timeout_ms(1000)\n                .with_files(&to_create)\n                .with_metadata(\"aaa.meta\", &CacheEntryMetadata { sector_size: 1234 })\n                .build();\n\n            for _ in 0..to_prompt.len() {\n                session.exp_string(\"[y/n] (sector size: 1234B) \")?;\n                let prompt_filename = session.exp_string(\": \")?;\n                let key: &str = &prompt_filename;\n                assert_eq!(true, to_prompt.contains(key), \"missing {}\", key);\n                session.send_line(\"n\")?;\n            }\n\n            session.exp_string(\"no files to publish\")?;\n            session.exp_string(\"done\")?;\n\n            Ok(())\n        })\n        .expect(\"parampublish test failed\");\n}\n\n#[test]\nfn displays_sector_size_in_prompt() {\n    Ok::<(), FailureError>(())\n        .and_then(|_| {\n            let to_create = vec![\"aaa.vk\", \"aaa.params\", \"xxx.vk\", \"xxx.params\"];\n\n            let (mut session, _) = ParamPublishSessionBuilder::new()\n                .with_session_timeout_ms(1000)\n                .with_files(&to_create)\n                .with_metadata(\"aaa.meta\", &CacheEntryMetadata { sector_size: 1234 })\n                .with_metadata(\"xxx.meta\", &CacheEntryMetadata { sector_size: 4444 })\n                .build();\n\n            let mut map: BTreeMap<&str, String> = BTreeMap::new();\n            map.insert(\"aaa.vk\", \"1234\".to_string());\n            map.insert(\"aaa.params\", \"1234\".to_string());\n            map.insert(\"xxx.vk\", \"4444\".to_string());\n            map.insert(\"xxx.params\", \"4444\".to_string());\n\n            for _ in 0..to_create.len() {\n                session.exp_string(\"[y/n] (sector size: \")?;\n                let prompt_sector_size: &str = &session.exp_string(\"B) \")?;\n                let prompt_filename: &str = &session.exp_string(\": \")?;\n                assert_eq!(map.get(prompt_filename).unwrap(), prompt_sector_size);\n                session.send_line(\"n\")?;\n            }\n\n            Ok(())\n        })\n        .expect(\"parampublish test failed\");\n}\n\n#[test]\nfn no_assets_no_prompt() -> Result<(), FailureError> {\n    let (mut session, _) = ParamPublishSessionBuilder::new()\n        .with_session_timeout_ms(1000)\n        .build();\n\n    session.exp_string(\"No valid parameters in directory\")?;\n\n    Ok(())\n}\n"
  },
  {
    "path": "filecoin-proofs/tests/parampublish/read_metadata_files.rs",
    "content": "use failure::Error as FailureError;\n\nuse crate::parampublish::support::session::ParamPublishSessionBuilder;\n\n#[test]\nfn fails_if_missing_metadata_file() -> Result<(), FailureError> {\n    // missing the corresponding .meta file\n    let filenames = vec![\"v12-aaa.vk\", \"v12-aaa.params\"];\n\n    let (mut session, _) = ParamPublishSessionBuilder::new()\n        .with_session_timeout_ms(1000)\n        .with_files(&filenames)\n        .with_prompt_disabled()\n        .build();\n\n    // error!\n    session.exp_string(\"No valid parameters in directory\")?;\n\n    Ok(())\n}\n\n#[test]\nfn fails_if_malformed_metadata_file() -> Result<(), FailureError> {\n    let mut malformed: &[u8] = &vec![42];\n\n    let (mut session, _) = ParamPublishSessionBuilder::new()\n        .with_session_timeout_ms(1000)\n        .with_files(&vec![\"v11-aaa.vk\", \"v11-aaa.params\"])\n        .with_file_and_bytes(\"v11-aaa.meta\", &mut malformed)\n        .with_prompt_disabled()\n        .build();\n\n    // error!\n    session.exp_string(\"fatal error\")?;\n\n    Ok(())\n}\n"
  },
  {
    "path": "filecoin-proofs/tests/parampublish/support/mod.rs",
    "content": "pub mod session;\n"
  },
  {
    "path": "filecoin-proofs/tests/parampublish/support/session.rs",
    "content": "use std::fs::File;\nuse std::io::{Read, Write};\nuse std::path::{Path, PathBuf};\n\nuse failure::SyncFailure;\nuse rand::Rng;\nuse rexpect::session::PtyBashSession;\nuse tempfile;\nuse tempfile::TempDir;\n\nuse storage_proofs::parameter_cache::{CacheEntryMetadata, PARAMETER_CACHE_ENV_VAR};\n\nuse crate::support::{cargo_bin, spawn_bash_with_retries, FakeIpfsBin};\n\npub struct ParamPublishSessionBuilder {\n    cache_dir: TempDir,\n    cached_file_pbufs: Vec<PathBuf>,\n    session_timeout_ms: u64,\n    manifest: PathBuf,\n    ipfs_bin_path: PathBuf,\n    prompt_enabled: bool,\n}\n\nimpl ParamPublishSessionBuilder {\n    pub fn new() -> ParamPublishSessionBuilder {\n        let temp_dir = tempfile::tempdir().expect(\"could not create temp dir\");\n\n        let mut pbuf = temp_dir.path().clone().to_path_buf();\n        pbuf.push(\"parameters.json\");\n\n        File::create(&pbuf).expect(\"failed to create file in temp dir\");\n\n        ParamPublishSessionBuilder {\n            cache_dir: temp_dir,\n            cached_file_pbufs: vec![],\n            session_timeout_ms: 1000,\n            manifest: pbuf,\n            ipfs_bin_path: cargo_bin(\"fakeipfsadd\"),\n            prompt_enabled: true,\n        }\n    }\n\n    /// Configure the path used by `parampublish` to add files to IPFS daemon.\n    pub fn with_ipfs_bin(mut self, ipfs_bin: &FakeIpfsBin) -> ParamPublishSessionBuilder {\n        let pbuf: PathBuf = PathBuf::from(&ipfs_bin.bin_path());\n        self.ipfs_bin_path = pbuf;\n        self\n    }\n\n    /// Create empty files with the given names in the cache directory.\n    pub fn with_files<P: AsRef<Path>>(self, filenames: &[P]) -> ParamPublishSessionBuilder {\n        filenames\n            .into_iter()\n            .fold(self, |acc, item| acc.with_file(item))\n    }\n\n    /// Create a file containing 32 random bytes with the given name in the\n    /// cache directory.\n    pub fn with_file<P: AsRef<Path>>(mut self, filename: P) -> ParamPublishSessionBuilder {\n        let mut pbuf = self.cache_dir.path().clone().to_path_buf();\n        pbuf.push(filename.as_ref());\n\n        let mut file = File::create(&pbuf).expect(\"failed to create file in temp dir\");\n\n        let random_bytes = rand::thread_rng().gen::<[u8; 32]>();\n        file.write(&random_bytes).expect(\"failed to write bytes\");\n\n        self.cached_file_pbufs.push(pbuf);\n        self\n    }\n\n    /// Create a file with the provided bytes in the cache directory.\n    pub fn with_file_and_bytes<P: AsRef<Path>, R: Read>(\n        mut self,\n        filename: P,\n        r: &mut R,\n    ) -> ParamPublishSessionBuilder {\n        let mut pbuf = self.cache_dir.path().clone().to_path_buf();\n        pbuf.push(filename.as_ref());\n\n        let mut file = File::create(&pbuf).expect(\"failed to create file in temp dir\");\n\n        std::io::copy(r, &mut file).expect(\"failed to copy bytes to file\");\n\n        self.cached_file_pbufs.push(pbuf);\n        self\n    }\n\n    /// Create a metadata file with the provided name in the cache directory.\n    pub fn with_metadata<P: AsRef<Path>>(\n        self,\n        filename: P,\n        meta: &CacheEntryMetadata,\n    ) -> ParamPublishSessionBuilder {\n        let mut meta_bytes: &[u8] = &serde_json::to_vec(meta)\n            .expect(\"failed to serialize CacheEntryMetadata to JSON byte array\");\n\n        self.with_file_and_bytes(filename, &mut meta_bytes)\n    }\n\n    /// Configure the pty timeout (see documentation for `rexpect::spawn_bash`).\n    pub fn with_session_timeout_ms(mut self, timeout_ms: u64) -> ParamPublishSessionBuilder {\n        self.session_timeout_ms = timeout_ms;\n        self\n    }\n\n    /// If prompt is disabled, `--all` flag will be passed to parampublish.\n    pub fn with_prompt_disabled(mut self) -> ParamPublishSessionBuilder {\n        self.prompt_enabled = false;\n        self\n    }\n\n    /// When publishing, write JSON manifest to provided path.\n    pub fn write_manifest_to(mut self, manifest_dest: PathBuf) -> ParamPublishSessionBuilder {\n        self.manifest = manifest_dest;\n        self\n    }\n\n    /// Launch parampublish in an environment configured by the builder.\n    pub fn build(self) -> (ParamPublishSession, Vec<PathBuf>) {\n        let mut p = spawn_bash_with_retries(10, Some(self.session_timeout_ms))\n            .unwrap_or_else(|err| panic!(err));\n\n        let cache_dir_path = format!(\"{:?}\", self.cache_dir.path());\n\n        let cache_contents: Vec<PathBuf> = std::fs::read_dir(&self.cache_dir)\n            .expect(&format!(\"failed to read cache dir {:?}\", self.cache_dir))\n            .into_iter()\n            .map(|x| x.expect(\"failed to get dir entry\"))\n            .map(|x| x.path())\n            .collect();\n\n        let parampublish_path = cargo_bin(\"parampublish\");\n\n        let cmd = format!(\n            \"{}={} {:?} {} --ipfs-bin={:?} --json={:?}\",\n            PARAMETER_CACHE_ENV_VAR,\n            cache_dir_path,\n            parampublish_path,\n            if self.prompt_enabled { \"\" } else { \"--all\" },\n            self.ipfs_bin_path,\n            self.manifest\n        );\n\n        p.execute(&cmd, \".*\")\n            .expect(\"could not execute parampublish\");\n\n        (\n            ParamPublishSession {\n                pty_session: p,\n                _cache_dir: self.cache_dir,\n            },\n            cache_contents,\n        )\n    }\n}\n\n/// An active pseudoterminal (pty) used to interact with parampublish.\npub struct ParamPublishSession {\n    pty_session: PtyBashSession,\n    _cache_dir: TempDir,\n}\n\nimpl ParamPublishSession {\n    /// Send provided string and trailing newline to parampublish.\n    pub fn send_line(&mut self, line: &str) -> Result<usize, SyncFailure<rexpect::errors::Error>> {\n        self.pty_session.send_line(line).map_err(SyncFailure::new)\n    }\n\n    /// Block until provided string is seen on stdout from parampublish and\n    /// return remaining output.\n    pub fn exp_string(\n        &mut self,\n        needle: &str,\n    ) -> Result<String, SyncFailure<rexpect::errors::Error>> {\n        self.pty_session\n            .exp_string(needle)\n            .map_err(SyncFailure::new)\n    }\n}\n"
  },
  {
    "path": "filecoin-proofs/tests/parampublish/write_json_manifest.rs",
    "content": "use std::collections::btree_map::BTreeMap;\nuse std::fs::File;\nuse std::path::Path;\n\nuse failure::Error as FailureError;\n\nuse filecoin_proofs::param::ParameterData;\nuse storage_proofs::parameter_cache::CacheEntryMetadata;\n\nuse crate::parampublish::support::session::ParamPublishSessionBuilder;\nuse crate::support::{tmp_manifest, FakeIpfsBin};\n\n#[test]\nfn writes_json_manifest() -> Result<(), FailureError> {\n    let filenames = vec![\"v10-aaa.vk\", \"v10-aaa.params\"];\n\n    let manifest_path = tmp_manifest(None)?;\n\n    let ipfs = FakeIpfsBin::new();\n\n    let (mut session, files_in_cache) = ParamPublishSessionBuilder::new()\n        .with_session_timeout_ms(1000)\n        .with_files(&filenames)\n        .with_metadata(\"v10-aaa.meta\", &CacheEntryMetadata { sector_size: 1234 })\n        .write_manifest_to(manifest_path.clone())\n        .with_ipfs_bin(&ipfs)\n        .with_prompt_disabled()\n        .build();\n\n    // compute checksums from files added to cache to compare with\n    // manifest entries after publishing completes\n    let cache_checksums = filename_to_checksum(&ipfs, files_in_cache.as_ref());\n\n    session.exp_string(\"Select a version\")?;\n    // There is only one version of parameters, accept that one\n    session.send_line(\"\")?;\n    //session.exp_regex(\".*Select the sizes to publish.*\")?;\n    session.exp_string(\"Select the sizes to publish\")?;\n    // There is only one size, accept that one\n    session.send_line(\"\")?;\n\n    // wait for confirmation...\n    session.exp_string(\"publishing 2 files\")?;\n    session.exp_string(\"done\")?;\n\n    // read the manifest file from disk and verify that it is well\n    // formed and contains the expected keys\n    let manifest_file = File::open(&manifest_path)?;\n    let manifest_map: BTreeMap<String, ParameterData> = serde_json::from_reader(manifest_file)?;\n\n    // ensure that each filename exists in the manifest and that its\n    // cid matches that which was produced from the `ipfs add` command\n    for filename in filenames.iter().cloned() {\n        if let (Some(m_entry), Some(expected)) =\n            (manifest_map.get(filename), cache_checksums.get(filename))\n        {\n            assert_eq!(\n                &m_entry.cid, expected,\n                \"manifest does not include digest produced by ipfs add for {}\",\n                filename\n            );\n        } else {\n            panic!(\"{} must be present in both manifest and cache\", filename);\n        }\n    }\n\n    Ok(())\n}\n\n/// Produce a map of filename (not path) to the checksum produced by the ipfs\n/// binary.\nfn filename_to_checksum<P: AsRef<Path>>(\n    ipfs_bin: &FakeIpfsBin,\n    paths: &[P],\n) -> BTreeMap<String, String> {\n    paths.iter().fold(BTreeMap::new(), |mut acc, item| {\n        acc.insert(\n            item.as_ref()\n                .file_name()\n                .and_then(|os_str| os_str.to_str())\n                .map(|s| s.to_string())\n                .unwrap_or(\"\".to_string()),\n            ipfs_bin\n                .compute_checksum(item)\n                .expect(\"failed to compute checksum\"),\n        );\n        acc\n    })\n}\n"
  },
  {
    "path": "filecoin-proofs/tests/pieces.rs",
    "content": "use std::io::{Cursor, Read};\nuse std::iter::Iterator;\n\nuse anyhow::Result;\nuse bellperson::bls::Fr;\nuse filecoin_proofs::{\n    add_piece, commitment_from_fr,\n    pieces::{\n        compute_comm_d, get_piece_alignment, get_piece_start_byte, piece_hash, verify_pieces,\n        zero_padding, EmptySource, PieceAlignment,\n    },\n    Commitment, DataTree, DefaultPieceHasher, PaddedBytesAmount, PieceInfo, SectorSize,\n    UnpaddedByteIndex, UnpaddedBytesAmount, DRG_DEGREE, EXP_DEGREE, TEST_SEED,\n};\nuse rand::{Rng, RngCore, SeedableRng};\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    api_version::ApiVersion, drgraph::Graph, merkle::create_base_merkle_tree, util::NODE_SIZE,\n};\nuse storage_proofs_porep::stacked::StackedBucketGraph;\n\n#[test]\nfn test_empty_source() {\n    let mut source = EmptySource::new(12);\n    let mut target = Vec::new();\n    source\n        .read_to_end(&mut target)\n        .expect(\"EmptySource read error\");\n    assert_eq!(target, vec![0u8; 12]);\n}\n\n#[test]\nfn test_compute_comm_d_empty() {\n    let comm_d =\n        compute_comm_d(SectorSize(2048), &[]).expect(\"failed to verify pieces, empty piece infos\");\n    assert_eq!(\n        comm_d,\n        [\n            252, 126, 146, 130, 150, 229, 22, 250, 173, 233, 134, 178, 143, 146, 212, 74, 79, 36,\n            185, 53, 72, 82, 35, 55, 106, 121, 144, 39, 188, 24, 248, 51\n        ]\n    );\n\n    let comm_d = compute_comm_d(SectorSize(128), &[]).expect(\"failed to verify pieces\");\n    assert_eq!(\n        hex::encode(&comm_d),\n        \"3731bb99ac689f66eef5973e4a94da188f4ddcae580724fc6f3fd60dfd488333\",\n    );\n}\n\n#[test]\nfn test_get_piece_alignment() {\n    let table = vec![\n        (0, 0, (0, 127)),\n        (0, 127, (0, 0)),\n        (0, 254, (0, 0)),\n        (0, 508, (0, 0)),\n        (0, 1016, (0, 0)),\n        (127, 127, (0, 0)),\n        (127, 254, (127, 0)),\n        (127, 508, (381, 0)),\n        (100, 100, (27, 27)),\n        (200, 200, (54, 54)),\n        (300, 300, (208, 208)),\n    ];\n\n    for (bytes_in_sector, bytes_in_piece, (expected_left_align, expected_right_align)) in\n        table.clone()\n    {\n        let PieceAlignment {\n            left_bytes: UnpaddedBytesAmount(actual_left_align),\n            right_bytes: UnpaddedBytesAmount(actual_right_align),\n        } = get_piece_alignment(\n            UnpaddedBytesAmount(bytes_in_sector),\n            UnpaddedBytesAmount(bytes_in_piece),\n        );\n        assert_eq!(\n            (expected_left_align, expected_right_align),\n            (actual_left_align, actual_right_align)\n        );\n    }\n}\n\n#[test]\nfn test_get_piece_start_byte() {\n    let pieces = [\n        UnpaddedBytesAmount(31),\n        UnpaddedBytesAmount(32),\n        UnpaddedBytesAmount(33),\n    ];\n\n    assert_eq!(\n        get_piece_start_byte(&pieces[..0], pieces[0]),\n        UnpaddedByteIndex(0)\n    );\n    assert_eq!(\n        get_piece_start_byte(&pieces[..1], pieces[1]),\n        UnpaddedByteIndex(127)\n    );\n    assert_eq!(\n        get_piece_start_byte(&pieces[..2], pieces[2]),\n        UnpaddedByteIndex(254)\n    );\n}\n\n#[test]\nfn test_verify_simple_pieces() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    //     g\n    //   /  \\\n    //  e    f\n    // / \\  / \\\n    // a  b c  d\n\n    let (a, b, c, d): ([u8; 32], [u8; 32], [u8; 32], [u8; 32]) = rng.gen();\n\n    let mut e = [0u8; 32];\n    let h = piece_hash(&a, &b);\n    e.copy_from_slice(h.as_ref());\n\n    let mut f = [0u8; 32];\n    let h = piece_hash(&c, &d);\n    f.copy_from_slice(h.as_ref());\n\n    let mut g = [0u8; 32];\n    let h = piece_hash(&e, &f);\n    g.copy_from_slice(h.as_ref());\n    let a = PieceInfo::new(a, UnpaddedBytesAmount(127)).expect(\"failed to create piece info a\");\n    let b = PieceInfo::new(b, UnpaddedBytesAmount(127)).expect(\"failed to create piece info b\");\n    let c = PieceInfo::new(c, UnpaddedBytesAmount(127)).expect(\"failed to create piece info c\");\n    let d = PieceInfo::new(d, UnpaddedBytesAmount(127)).expect(\"failed to create piece info d\");\n\n    let e = PieceInfo::new(e, UnpaddedBytesAmount(254)).expect(\"failed to create piece info e\");\n    let f = PieceInfo::new(f, UnpaddedBytesAmount(254)).expect(\"failed to create piece info f\");\n    let g = PieceInfo::new(g, UnpaddedBytesAmount(508)).expect(\"failed to create piece info g\");\n\n    let sector_size = SectorSize(4 * 128);\n    let comm_d = g.commitment;\n\n    // println!(\"e: {:?}\", e);\n    // println!(\"f: {:?}\", f);\n    // println!(\"g: {:?}\", g);\n\n    assert!(\n        verify_pieces(\n            &comm_d,\n            &[a.clone(), b.clone(), c.clone(), d.clone()],\n            sector_size\n        )\n        .expect(\"failed to verify\"),\n        \"[a, b, c, d]\"\n    );\n\n    assert!(\n        verify_pieces(&comm_d, &[e.clone(), c, d], sector_size).expect(\"failed to verify\"),\n        \"[e, c, d]\"\n    );\n\n    assert!(\n        verify_pieces(&comm_d, &[e, f.clone()], sector_size).expect(\"failed to verify\"),\n        \"[e, f]\"\n    );\n\n    assert!(\n        verify_pieces(&comm_d, &[a, b, f], sector_size).expect(\"failed to verify\"),\n        \"[a, b, f]\"\n    );\n\n    assert!(\n        verify_pieces(&comm_d, &[g], sector_size).expect(\"failed to verify\"),\n        \"[g]\"\n    );\n}\n\n#[test]\n#[allow(clippy::identity_op)]\nfn test_verify_padded_pieces() {\n    // [\n    //   {(A0 00) (BB BB)} -> A(1) P(1) P(1) P(1) B(4)\n    //   {(CC 00) (00 00)} -> C(2)      P(1) P(1) P(1) P(1) P(1) P(1)\n    // ]\n    // [\n    //   {(DD DD) (DD DD)} -> D(8)\n    //   {(00 00) (00 00)} -> P(1) P(1) P(1) P(1) P(1) P(1) P(1) P(1)\n    // ]\n\n    let sector_size = SectorSize(32 * 128);\n    let pad = zero_padding(UnpaddedBytesAmount(127)).expect(\"failed to create pad\");\n\n    let pieces = vec![\n        PieceInfo::new([1u8; 32], UnpaddedBytesAmount(1 * 127))\n            .expect(\"failed to create piece info 0\"),\n        PieceInfo::new([2u8; 32], UnpaddedBytesAmount(4 * 127))\n            .expect(\"failed to create piece info 1\"),\n        PieceInfo::new([3u8; 32], UnpaddedBytesAmount(2 * 127))\n            .expect(\"failed to create piece info 2\"),\n        PieceInfo::new([4u8; 32], UnpaddedBytesAmount(8 * 127))\n            .expect(\"failed to create piece info 3\"),\n    ];\n\n    let padded_pieces = vec![\n        PieceInfo::new([1u8; 32], UnpaddedBytesAmount(1 * 127))\n            .expect(\"failed to create padded piece info 0\"),\n        pad.clone(),\n        pad.clone(),\n        pad.clone(),\n        PieceInfo::new([2u8; 32], UnpaddedBytesAmount(4 * 127))\n            .expect(\"failed to create padded piece info 1\"),\n        PieceInfo::new([3u8; 32], UnpaddedBytesAmount(2 * 127))\n            .expect(\"failed to create padded piece info 2\"),\n        pad.clone(),\n        pad.clone(),\n        pad.clone(),\n        pad.clone(),\n        pad.clone(),\n        pad.clone(),\n        PieceInfo::new([4u8; 32], UnpaddedBytesAmount(8 * 127))\n            .expect(\"failed to create padded piece info 4\"),\n        pad.clone(),\n        pad.clone(),\n        pad.clone(),\n        pad.clone(),\n        pad.clone(),\n        pad.clone(),\n        pad.clone(),\n        pad,\n    ];\n\n    let hash = |a, b| {\n        let hash = piece_hash(a, b);\n        let mut res = [0u8; 32];\n        res.copy_from_slice(hash.as_ref());\n        res\n    };\n\n    let layer1: Vec<[u8; 32]> = vec![\n        hash(&padded_pieces[0].commitment, &padded_pieces[1].commitment), // 2: H(A(1) | P(1))\n        hash(&padded_pieces[2].commitment, &padded_pieces[3].commitment), // 2: H(P(1) | P(1))\n        padded_pieces[4].commitment,                                      // 4: B(4)\n        padded_pieces[5].commitment,                                      // 2: C(2)\n        hash(&padded_pieces[6].commitment, &padded_pieces[7].commitment), // 2: H(P(1) | P(1))\n        hash(&padded_pieces[8].commitment, &padded_pieces[9].commitment), // 2: H(P(1) | P(1))\n        hash(&padded_pieces[10].commitment, &padded_pieces[11].commitment), // 2: H(P(1) | P(1))\n        padded_pieces[12].commitment,                                     // 8: D(8)\n        hash(&padded_pieces[13].commitment, &padded_pieces[14].commitment), // 2: H(P(1) | P(1))\n        hash(&padded_pieces[15].commitment, &padded_pieces[16].commitment), // 2: H(P(1) | P(1))\n        hash(&padded_pieces[17].commitment, &padded_pieces[18].commitment), // 2: H(P(1) | P(1))\n        hash(&padded_pieces[19].commitment, &padded_pieces[20].commitment), // 2: H(P(1) | P(1))\n    ];\n\n    let layer2: Vec<[u8; 32]> = vec![\n        hash(&layer1[0], &layer1[1]),   // 4\n        layer1[2],                      // 4\n        hash(&layer1[3], &layer1[4]),   // 4\n        hash(&layer1[5], &layer1[6]),   // 4\n        layer1[7],                      // 8\n        hash(&layer1[8], &layer1[9]),   // 4\n        hash(&layer1[10], &layer1[11]), // 4\n    ];\n\n    let layer3 = vec![\n        hash(&layer2[0], &layer2[1]), // 8\n        hash(&layer2[2], &layer2[3]), // 8\n        layer2[4],                    // 8\n        hash(&layer2[5], &layer2[6]), // 8\n    ];\n\n    let layer4 = vec![\n        hash(&layer3[0], &layer3[1]), // 16\n        hash(&layer3[2], &layer3[3]), // 16\n    ];\n\n    let comm_d = hash(&layer4[0], &layer4[1]); // 32\n\n    assert!(verify_pieces(&comm_d, &pieces, sector_size).expect(\"failed to verify pieces\"));\n}\n\n#[test]\n#[ignore] // slow test\nfn test_verify_random_pieces() -> Result<()> {\n    use filecoin_proofs::pieces::sum_piece_bytes_with_alignment;\n\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    for sector_size in &[\n        SectorSize(4 * 128),\n        SectorSize(32 * 128),\n        SectorSize(1024 * 128),\n        SectorSize(1024 * 8 * 128),\n    ] {\n        println!(\"--- {:?} ---\", sector_size);\n        for i in 0..100 {\n            println!(\" - {} -\", i);\n            let unpadded_sector_size: UnpaddedBytesAmount = sector_size.clone().into();\n            let sector_size = *sector_size;\n            let padded_sector_size: PaddedBytesAmount = sector_size.into();\n\n            let mut piece_sizes = Vec::new();\n            loop {\n                let sum_piece_sizes: PaddedBytesAmount =\n                    sum_piece_bytes_with_alignment(&piece_sizes).into();\n\n                if sum_piece_sizes > padded_sector_size {\n                    piece_sizes.pop();\n                    break;\n                }\n                if sum_piece_sizes == padded_sector_size {\n                    break;\n                }\n\n                'inner: loop {\n                    // pieces must be power of two\n                    let left = u64::from(padded_sector_size) - u64::from(sum_piece_sizes);\n                    let left_power_of_two = prev_power_of_two(left as u32);\n                    let max_exp = (left_power_of_two as f64).log2() as u32;\n\n                    let padded_exp = if max_exp > 7 {\n                        rng.gen_range(\n                            7, // 2**7 == 128,\n                            max_exp,\n                        )\n                    } else {\n                        7\n                    };\n                    let padded_piece_size = 2u64.pow(padded_exp);\n                    let piece_size: UnpaddedBytesAmount =\n                        PaddedBytesAmount(padded_piece_size).into();\n                    piece_sizes.push(piece_size);\n                    let sum: PaddedBytesAmount =\n                        sum_piece_bytes_with_alignment(&piece_sizes).into();\n\n                    if sum > padded_sector_size {\n                        // pieces might be too large after padding, so remove them and try again.\n                        piece_sizes.pop();\n                    } else {\n                        break 'inner;\n                    }\n                }\n            }\n\n            // println!(\n            //     \"  {:?}\",\n            //     piece_sizes\n            //         .iter()\n            //         .map(|s| u64::from(*s) / 127)\n            //         .collect::<Vec<_>>()\n            // );\n            assert!(sum_piece_bytes_with_alignment(&piece_sizes) <= unpadded_sector_size);\n            assert!(!piece_sizes.is_empty());\n\n            let (comm_d, piece_infos) = build_sector(&piece_sizes, sector_size)?;\n\n            assert!(\n                verify_pieces(&comm_d, &piece_infos, sector_size)?,\n                \"invalid pieces\"\n            );\n        }\n    }\n\n    Ok(())\n}\n\nfn build_sector(\n    piece_sizes: &[UnpaddedBytesAmount],\n    sector_size: SectorSize,\n) -> Result<(Commitment, Vec<PieceInfo>)> {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n    let porep_id = [32; 32];\n    let graph = StackedBucketGraph::<DefaultPieceHasher>::new_stacked(\n        u64::from(sector_size) as usize / NODE_SIZE,\n        DRG_DEGREE,\n        EXP_DEGREE,\n        porep_id,\n        ApiVersion::V1_1_0,\n    )?;\n\n    let mut staged_sector = Vec::with_capacity(u64::from(sector_size) as usize);\n    let mut staged_sector_io = Cursor::new(&mut staged_sector);\n    let mut piece_infos = Vec::with_capacity(piece_sizes.len());\n\n    for (i, piece_size) in piece_sizes.iter().enumerate() {\n        let piece_size_u = u64::from(*piece_size) as usize;\n        let mut piece_bytes = vec![255u8; piece_size_u];\n        rng.fill_bytes(&mut piece_bytes);\n\n        let mut piece_file = Cursor::new(&mut piece_bytes);\n\n        let (piece_info, _) = add_piece(\n            &mut piece_file,\n            &mut staged_sector_io,\n            *piece_size,\n            &piece_sizes[..i],\n        )?;\n\n        piece_infos.push(piece_info);\n    }\n    assert_eq!(staged_sector.len(), u64::from(sector_size) as usize);\n\n    let data_tree: DataTree =\n        create_base_merkle_tree::<DataTree>(None, graph.size(), &staged_sector)\n            .expect(\"failed to create data tree\");\n    let comm_d_root: Fr = data_tree.root().into();\n    let comm_d = commitment_from_fr(comm_d_root);\n\n    Ok((comm_d, piece_infos))\n}\n\nfn prev_power_of_two(mut x: u32) -> u32 {\n    x |= x >> 1;\n    x |= x >> 2;\n    x |= x >> 4;\n    x |= x >> 8;\n    x |= x >> 16;\n    x - (x >> 1)\n}\n"
  },
  {
    "path": "filecoin-proofs/tests/suite.rs",
    "content": "mod paramfetch;\nmod parampublish;\nmod support;\n"
  },
  {
    "path": "filecoin-proofs/tests/support/mod.rs",
    "content": "use std::path::{Path, PathBuf};\nuse std::{env, thread};\n\nuse failure::format_err;\nuse filecoin_proofs::param::ParameterData;\nuse rexpect::session::PtyBashSession;\nuse rexpect::spawn_bash;\nuse std::collections::btree_map::BTreeMap;\nuse std::fs::File;\nuse std::process::Command;\nuse std::time::Duration;\n\npub struct FakeIpfsBin {\n    bin_path: PathBuf,\n}\n\nimpl FakeIpfsBin {\n    pub fn new() -> FakeIpfsBin {\n        FakeIpfsBin {\n            bin_path: cargo_bin(\"fakeipfsadd\"),\n        }\n    }\n\n    pub fn compute_checksum<P: AsRef<Path>>(&self, path: P) -> Result<String, failure::Error> {\n        let output = Command::new(&self.bin_path)\n            .arg(\"add\")\n            .arg(\"-Q\")\n            .arg(path.as_ref())\n            .output()?;\n\n        if !output.status.success() {\n            Err(format_err!(\n                \"{:?} produced non-zero exit code\",\n                &self.bin_path\n            ))\n        } else {\n            Ok(String::from_utf8(output.stdout)?.trim().to_string())\n        }\n    }\n\n    pub fn bin_path(&self) -> &Path {\n        &self.bin_path\n    }\n}\n\n/// Get the path of the target directory.\npub fn target_dir() -> PathBuf {\n    env::current_exe()\n        .ok()\n        .map(|mut path| {\n            path.pop();\n            if path.ends_with(\"deps\") {\n                path.pop();\n            }\n            path\n        })\n        .unwrap()\n}\n\n/// Look up the path to a cargo-built binary within an integration test.\npub fn cargo_bin<S: AsRef<str>>(name: S) -> PathBuf {\n    target_dir().join(format!(\"{}{}\", name.as_ref(), env::consts::EXE_SUFFIX))\n}\n\n/// Spawn a pty and, if an error is produced, retry with linear backoff (to 5s).\npub fn spawn_bash_with_retries(\n    retries: u8,\n    timeout: Option<u64>,\n) -> Result<PtyBashSession, rexpect::errors::Error> {\n    let result = spawn_bash(timeout);\n    if result.is_ok() || retries == 0 {\n        result\n    } else {\n        let sleep_d = Duration::from_millis(5000 / u64::from(retries));\n        eprintln!(\n            \"failed to spawn pty: {} retries remaining - sleeping {:?}\",\n            retries, sleep_d\n        );\n        thread::sleep(sleep_d);\n        spawn_bash_with_retries(retries - 1, timeout)\n    }\n}\n\n/// Create a parameters.json manifest file in a temp directory and return its\n/// path.\npub fn tmp_manifest(\n    opt_manifest: Option<BTreeMap<String, ParameterData>>,\n) -> Result<PathBuf, failure::Error> {\n    let manifest_dir = tempfile::tempdir()?;\n    let mut pbuf = manifest_dir.into_path();\n    pbuf.push(\"parameters.json\");\n\n    let mut file = File::create(&pbuf)?;\n    if let Some(map) = opt_manifest {\n        // JSON encode the manifest and write bytes to temp file\n        serde_json::to_writer(&mut file, &map)?;\n    }\n\n    Ok(pbuf)\n}\n"
  },
  {
    "path": "fr32/Cargo.toml",
    "content": "[package]\nname = \"fr32\"\nversion = \"0.2.1\"\nauthors = [\"dignifiedquire <me@dignifiedquire.com>\"]\ndescription = \"Filecoin proofs Fr/32-byte conversion tooling\"\nlicense = \"MIT OR Apache-2.0\"\nedition = \"2018\"\nrepository = \"https://github.com/filecoin-project/rust-fil-proofs\"\n\n[dependencies]\nanyhow = \"1.0.23\"\nbellperson = { version = \"0.13\", default-features = false }\nbyte-slice-cast = \"1.0.0\"\nbyteorder = \"1\"\nff = { version = \"0.2.3\", package = \"fff\" }\nthiserror = \"1.0.6\"\n\n[dev-dependencies]\nbitvec = \"0.17\"\ncriterion = \"0.3\"\nitertools = \"0.9\"\npretty_assertions = \"0.6.1\"\nrand = \"0.7\"\nrand_xorshift = \"0.2.0\"\n\n[features]\ndefault = [\"pairing\"]\nblst = [\"bellperson/blst\"]\ngpu = [\"bellperson/gpu\"]\npairing = [\"bellperson/pairing\"]\n\n[[bench]]\nname = \"fr\"\nharness = false\n"
  },
  {
    "path": "fr32/benches/fr.rs",
    "content": "use bellperson::bls::Fr;\nuse criterion::{black_box, criterion_group, criterion_main, Criterion};\nuse ff::Field;\nuse fr32::{bytes_into_fr, fr_into_bytes};\nuse rand::thread_rng;\n\nfn fr_benchmark(c: &mut Criterion) {\n    c.bench_function(\"fr-to-bytes-32\", move |b| {\n        let mut rng = thread_rng();\n        let fr = Fr::random(&mut rng);\n\n        b.iter(|| black_box(fr_into_bytes(&fr)))\n    });\n\n    c.bench_function(\"bytes-32-to-fr\", move |b| {\n        let mut rng = thread_rng();\n        let fr = Fr::random(&mut rng);\n        let bytes = fr_into_bytes(&fr);\n\n        b.iter(|| black_box(bytes_into_fr(&bytes).unwrap()))\n    });\n}\n\ncriterion_group!(benches, fr_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "fr32/src/convert.rs",
    "content": "#[cfg(any(feature = \"pairing\", feature = \"blst\"))]\nuse anyhow::Result;\nuse bellperson::bls::{Fr, FrRepr};\nuse byteorder::{ByteOrder, LittleEndian};\nuse ff::PrimeField;\n#[cfg(feature = \"pairing\")]\nuse ff::PrimeFieldRepr;\n\n#[derive(Debug, thiserror::Error)]\npub enum Error {\n    #[error(\"Bytes could not be converted to Fr\")]\n    BadFrBytes,\n}\n\n/// Contains one or more 32-byte chunks whose little-endian values represent Frs.\n/// Invariants:\n/// - Value of each 32-byte chunks MUST represent valid Frs.\n/// - Total length must be a multiple of 32.\n/// That is to say: each 32-byte chunk taken alone must be a valid Fr32.\npub type Fr32Vec = Vec<u8>;\n\n/// Array whose little-endian value represents an Fr.\n/// Invariants:\n/// - Value MUST represent a valid Fr.\npub type Fr32Ary = [u8; 32];\n\n/// Takes a slice of bytes and returns an Fr if byte slice is exactly 32 bytes and does not overflow.\n/// Otherwise, returns a BadFrBytesError.\n#[cfg(feature = \"pairing\")]\npub fn bytes_into_fr(bytes: &[u8]) -> Result<Fr> {\n    use anyhow::{ensure, Context};\n    ensure!(bytes.len() == 32, Error::BadFrBytes);\n    let mut fr_repr = FrRepr::default();\n    fr_repr.read_le(bytes).context(Error::BadFrBytes)?;\n    Fr::from_repr(fr_repr).map_err(|_| Error::BadFrBytes.into())\n}\n\n#[cfg(feature = \"blst\")]\npub fn bytes_into_fr(bytes: &[u8]) -> Result<Fr> {\n    use std::convert::TryInto;\n    Fr::from_bytes_le(bytes.try_into().map_err(|_| Error::BadFrBytes)?)\n        .ok_or_else(|| Error::BadFrBytes.into())\n}\n\n/// Bytes is little-endian.\n#[inline]\npub fn bytes_into_fr_repr_safe(r: &[u8]) -> FrRepr {\n    debug_assert!(r.len() == 32);\n\n    let repr: [u64; 4] = [\n        LittleEndian::read_u64(&r[0..8]),\n        LittleEndian::read_u64(&r[8..16]),\n        LittleEndian::read_u64(&r[16..24]),\n        u64::from(r[31] & 0b0011_1111) << 56\n            | u64::from(r[30]) << 48\n            | u64::from(r[29]) << 40\n            | u64::from(r[28]) << 32\n            | u64::from(r[27]) << 24\n            | u64::from(r[26]) << 16\n            | u64::from(r[25]) << 8\n            | u64::from(r[24]),\n    ];\n\n    FrRepr(repr)\n}\n\n/// Takes an Fr and returns a vector of exactly 32 bytes guaranteed to contain a valid Fr.\n#[cfg(feature = \"pairing\")]\npub fn fr_into_bytes(fr: &Fr) -> Fr32Vec {\n    let mut out = Vec::with_capacity(32);\n    fr.into_repr().write_le(&mut out).expect(\"write_le failure\");\n    out\n}\n\n#[cfg(feature = \"blst\")]\npub fn fr_into_bytes(fr: &Fr) -> Fr32Vec {\n    fr.to_bytes_le().to_vec()\n}\n\npub fn u64_into_fr(n: u64) -> Fr {\n    Fr::from_repr(FrRepr::from(n)).expect(\"failed to convert u64 into Fr (should never fail)\")\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    fn bytes_fr_test(bytes: Fr32Ary, expect_success: bool) {\n        let b = &bytes[..];\n        let fr_result = bytes_into_fr(&b);\n        if expect_success {\n            let f = fr_result.expect(\"Failed to convert bytes to `Fr`\");\n            let b2 = fr_into_bytes(&f);\n            assert_eq!(bytes.to_vec(), b2);\n        } else {\n            assert!(fr_result.is_err(), \"expected a decoding error\")\n        }\n    }\n\n    #[test]\n    fn test_bytes_into_fr_into_bytes() {\n        bytes_fr_test(\n            [\n                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,\n                23, 24, 25, 26, 27, 28, 29, 30, 31,\n            ],\n            true,\n        );\n        bytes_fr_test(\n            // Some bytes fail because they are not in the field.\n            [\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 115,\n            ],\n            false,\n        );\n        bytes_fr_test(\n            // This is okay.\n            [\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 114,\n            ],\n            true,\n        );\n        bytes_fr_test(\n            // So is this.\n            [\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 236, 115,\n            ],\n            true,\n        );\n        bytes_fr_test(\n            // But not this.\n            [\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 237, 115,\n            ],\n            false,\n        );\n    }\n}\n"
  },
  {
    "path": "fr32/src/lib.rs",
    "content": "mod convert;\nmod padding;\nmod reader;\n\npub use convert::*;\npub use padding::*;\npub use reader::*;\n"
  },
  {
    "path": "fr32/src/padding.rs",
    "content": "use std::cmp::{min, Ordering};\nuse std::io::{self, Error, ErrorKind, Write};\n\n/** PaddingMap represents a mapping between data and its padded equivalent.\n\nThe padding process takes a *byte-aligned stream* of unpadded *raw* data\nas input and returns another byte stream where padding is applied every\n`data_bits` to align them to the byte boundary (`element_bits`). The\n(inverse) *unpadding* process maps that output back to the raw input\nthat generated it.\n\n# Padded layout\n\nAt the *byte-level*, the padded layout is:\n\n```text\n      (full element)              (full)                 (incomplete)\n||  data_bits  pad_bits  ||  data_bits  pad_bits  ||  some_data  (no_padding)\n                         ^^                               ^^\n                  element boundary                (some_data < data_bits)\n                   (byte-aligned)\n```\n\nEach *element* is a byte-aligned stream comprised of a *full unit* of `data_bits`\nwith `pad_bits` at the end to byte-align it (where `pad_bits` is less than a byte,\nthis is a *sub-byte padding* scheme). After the last element boundary there may be\nan incomplete unit of data (`some_data`) with a length smaller than `data_bits`\nthat hasn't been padded. The padding rules are:\n  1. Padding is always applied to a full unit of `data_bits`.\n  2. A full data unit cannot exist without its corresponding padding.\n  3. A unit of padding is complete by definition: padding can only be\n     applied fully to each element.\n  4. If there is padding present then there has to be an already formed\n     element there (an element is full if and only if its data unit is full).\n\n# Last byte\n\nWhen returning the byte-aligned output generated from the padded *bitstream*\n(since the padding is done at the bit-level) the conversion results in the\nlast byte having (potentially) more bits than desired. At the *bit-level*\nthe layout of the last byte can either be a complete element (bits of raw\ndata followed by the corresponding padding bits) or an incomplete unit of\ndata: some number of *valid* data (D) bits followed by any number of *extra*\nbits (X) necessary to complete the byte-aligned stream:\n\n```text\n |   D   D   D   D   X   X   X   X   |\n         (data)         (extra)      ^ byte boundary (end of output)\n```\n\n(This diagram is just for illustrative purposes, we actually return the output\n in little-endian order, see `BitVecLEu8`).\n\nIt's important to distinguish these extra bits (generated as a side\neffect of the conversion to a byte-aligned stream) from the padding bits\nthemselves introduced in the padding process: even though both will be\nleft with a zero value, these extra bits are a place-holder for the actual\nraw data bits needed to complete the current unit of data (and hence also\nthe element, with the corresponding padding bits added after it). Since\nextra bits are only a product of an incomplete unit of data there can't\nbe extra bits after padding bits.\n\nThere's no metadata signaling the number of extra bits present in the\nlast byte in any given padded layout, this is deduced from the fact\nthat there's only a single number of valid data bits in the last byte,\nand hence a number of data bits in total, that maps to a byte-aligned\n(multiple of 8) raw data stream that could have been used as input.\n\n# Example: `FR32_PADDING_MAP`\n\nIn this case the `PaddingMap` is defined with a data unit of 254 bits that\nare byte aligned to a 256-bit (32-byte) element. If the user writes as input,\nsay, 40 bytes (320 bits) of raw input data to the padding process the resulting\nlayout would be, at the element (byte) level:\n\n```text\n      (full element: 32 bytes)         (incomplete: 9 bytes)\n||  data_bits: 254  pad_bits: 2  ||   some_data: 66 bits (+ extra bits)\n                                 ^^\n                          element boundary\n```\n\nThat is, of the original 320 bits (40 bytes) of raw input data, 254 are\npadded in the first element and the remaining 66 bits form the incomplete\ndata unit after it, which is aligned to 9 bytes. At the bit level, that\nlast incomplete byte will have 2 valid bits and 6 extra bits.\n\n# Alignment of raw data bytes in the padded output\n\nThis section is not necessary to use this structure but it does help to\nreason about it. By the previous definition, the raw data bits *embedded*\nin the padded layout are not necessarily grouped in the same byte units\nas in the original raw data input (due to the inclusion of the padding\nbits interleaved in that bit stream, which keep shifting the data bits\nafter them).\n\nThis can also be stated as: the offsets of the bits (relative to the byte\nthey belong to, i.e., *bit-offset*) in the raw data input won't necessarily\nmatch the bit-offsets of the raw data bits embedded in the padded layout.\nThe consequence is that each raw byte written to the padded layout won't\nresult in a byte-aligned bit stream output, i.e., it may cause the appearance\nof extra bits (to convert the output to a byte-aligned stream).\n\nThere are portions of the padded layout, however, where this alignment does\nhappen. Particularly, when the padded layout accumulates enough padding bits\nthat they altogether add up to a byte, the following raw data byte written\nwill result in a byte-aligned output, and the same is true for all the other\nraw data byte that follow it up until the element end, where new padding bits\nshift away this alignment. (The other obvious case is the first element, which,\nwith no padded bits in front of it, has by definition all its embedded raw data\nbytes aligned, independently of the `data_bits`/`pad_bits` configuration used.)\n\nIn the previous example, that happens after the fourth element, where 4 units\nof `pad_bits` add up to one byte and all of the raw data bytes in the fifth\nelement will keep its original alignment from the byte input stream (and the\nsame will happen with every other element multiple of 4). When that fourth\nelement is completed we have then 127 bytes of raw data and 1 byte of padding\n(totalling 32 * 4 = 128 bytes of padded output), so the interval of raw data\nbytes `[127..159]` (indexed like this in the input raw data stream) will keep\nits original alignment when embedded in the padded layout, i.e., every raw\ndata byte written will keep the output bit stream byte-aligned (without extra\nbits). (Technically, the last byte actually won't be a full byte since its last\nbits will be replaced by padding).\n\n# Key terms\n\nCollection of terms introduced in this documentation (with the format\n`*<new-term>*`). This section doesn't provide a self-contained definition\nof them (to avoid unnecessary repetition), it just provides (when appropriate)\nan additional summary of what was already discussed.\n\n * Raw data: unpadded user-supplied data (we don't use the *unpadded* term\n   to avoid excessive *padding* suffixes in the code). Padding (data) bits.\n * Element: byte-aligned stream consisting of a full unit of data plus the\n   padding bits.\n * Full unit of raw `data_bits` (always followed by padding). Incomplete unit,\n   not followed by padding, doesn't form an element.\n * Byte-aligned stream: always input and output of the (un)padding process,\n   either as raw data or padded (using the term \"byte-aligned\" and not \"byte\n   stream\" to stress the boundaries of the elements). Bit streams: used internally\n   when padding data (never returned as bits).\n * Valid data bits, only in the context of the last byte of a byte-aligned stream\n   generated from the padding process. Extra bits: what's left unused of the last\n   byte (in a way the extra bits are the padding at the byte-level, but we don't\n   use that term here to avoid confusions).\n * Sub-byte padding.\n * Bit-offset: offset of a bit within the byte it belongs to, ranging in `[0..8]`.\n * Embedded raw data: view of the input raw data when it has been decomposed in\n   bit streams and padded in the resulting output.\n\n**/\n#[derive(Debug)]\nstruct PaddingMap {\n    /// The number of bits of raw data in an element.\n    data_bits: usize,\n    /// Number of bits in an element: `data_bits` + `pad_bits()`. Its value\n    /// is fixed to the next byte-aligned size after `data_bits` (sub-byte padding).\n    element_bits: usize,\n}\n// TODO: Optimization: Evaluate saving the state of a (un)padding operation\n// inside (e.g., as a cursor like in `BitVec`), maybe not in this structure but\n// in a new `Padder` structure which would remember the positions (remaining\n// data bits in the element, etc.) to avoid recalculating them each time across\n// different (un)pad calls.\n\n// This is the padding map corresponding to Fr32.\n// Most of the code in this module is general-purpose and could move elsewhere.\n// The application-specific wrappers which implicitly use Fr32 embed the FR32_PADDING_MAP.\nconst FR32_PADDING_MAP: PaddingMap = PaddingMap {\n    data_bits: 254,\n    element_bits: 256,\n};\n\npub fn to_unpadded_bytes(padded_bytes: u64) -> u64 {\n    FR32_PADDING_MAP.transform_byte_offset(padded_bytes as usize, false) as u64\n}\n\npub fn to_padded_bytes(unpadded_bytes: usize) -> usize {\n    FR32_PADDING_MAP.transform_byte_offset(unpadded_bytes, true)\n}\n\n////////////////////////////////////////////////////////////////////////////////////////////////////\n// BitByte represents a size expressed in bytes extended\n// with bit precision, that is, not rounded.\n// Invariant: it is an error for bits to be > 7.\n#[derive(Debug)]\nstruct BitByte {\n    bytes: usize,\n    bits: usize,\n}\n\nimpl BitByte {\n    // Create a BitByte from number of bits. Guaranteed to return a well-formed value (bits < 8)\n    fn from_bits(bits: usize) -> BitByte {\n        BitByte {\n            bytes: bits / 8,\n            bits: bits % 8,\n        }\n    }\n\n    // How many bits in the BitByte (inverse of from_bits).\n    fn total_bits(&self) -> usize {\n        self.bytes * 8 + self.bits\n    }\n\n    // How many distinct bytes are needed to represent data of this size?\n    fn bytes_needed(&self) -> usize {\n        self.bytes + if self.bits == 0 { 0 } else { 1 }\n    }\n}\n\nimpl PaddingMap {\n    fn pad_bits(&self) -> usize {\n        self.element_bits - self.data_bits\n    }\n\n    // Transform an offset (either a position or a size) *expressed in\n    // bits* in a raw byte-aligned data stream to its equivalent in a\n    // generated padded bit stream, that is, not byte aligned (so we\n    // don't count the extra bits here). If `padding` is `false` calculate\n    // the inverse transformation.\n    fn transform_bit_offset(&self, pos: usize, padding: bool) -> usize {\n        // Set the sizes we're converting to and from.\n        let (from_size, to_size) = if padding {\n            (self.data_bits, self.element_bits)\n        } else {\n            (self.element_bits, self.data_bits)\n        };\n\n        // For both the padding and unpadding cases the operation is the same.\n        // The quotient is the number of full, either elements, in the padded layout,\n        // or groups of `data_bits`, in the raw data input (that will be converted\n        // to full elements).\n        // The remainder (in both cases) is the last *incomplete* part of either of\n        // the two. Even in the padded layout, if there is an incomplete element it\n        // has to consist *only* of data (see `PaddingMap#padded-layout`). That amount\n        // of spare raw data doesn't need conversion, it can just be added to the new\n        // position.\n        let (full_elements, incomplete_data) = div_rem(pos, from_size);\n        (full_elements * to_size) + incomplete_data\n    }\n\n    // Similar to `transform_bit_pos` this function transforms an offset\n    // expressed in bytes, that is, we are taking into account the extra\n    // bits here.\n    // TODO: Evaluate the relationship between this function and `transform_bit_offset`,\n    // it seems the two could be merged, or at least restructured to better expose\n    // their differences.\n    fn transform_byte_offset(&self, pos: usize, padding: bool) -> usize {\n        let transformed_bit_pos = self.transform_bit_offset(pos * 8, padding);\n\n        let transformed_byte_pos = transformed_bit_pos as f64 / 8.;\n        // TODO: Optimization: It might end up being cheaper to avoid this\n        // float conversion and use / and %.\n\n        // When padding, the final bits in the bit stream will grow into the\n        // last (potentially incomplete) byte of the byte stream, so round the\n        // number up (`ceil`). When unpadding, there's no way to know a priori\n        // how many valid bits are in the last byte, we have to choose the number\n        // that fits in a byte-aligned raw data stream, so round the number down\n        // to that (`floor`).\n        (if padding {\n            transformed_byte_pos.ceil()\n        } else {\n            transformed_byte_pos.floor()\n        }) as usize\n    }\n\n    // From the `position` specified, it returns:\n    // - the absolute position of the start of the next element,\n    //   in bytes (since elements -with padding- are byte aligned).\n    // - the number of bits left to read (write) from (to) the current\n    //   data unit (assuming it's full).\n    fn next_boundary(&self, position: &BitByte) -> (usize, usize) {\n        let position_bits = position.total_bits();\n\n        let (_, bits_after_last_boundary) = div_rem(position_bits, self.element_bits);\n\n        let remaining_data_unit_bits = self.data_bits - bits_after_last_boundary;\n\n        let next_element_position_bits = position_bits + remaining_data_unit_bits + self.pad_bits();\n\n        (next_element_position_bits / 8, remaining_data_unit_bits)\n    }\n}\n\n#[inline]\nfn div_rem(a: usize, b: usize) -> (usize, usize) {\n    let div = a / b;\n    let rem = a % b;\n    (div, rem)\n}\n\n// TODO: The following extraction functions could be moved to a different file.\n\n/** Shift an `amount` of bits from the `input` in the direction indicated by `is_left`.\n\nThis function tries to imitate the behavior of `shl` and `shr` of a\n`BitVec<LittleEndian, u8>`, where the inner vector is traversed one byte\nat a time (`u8`), and inside each byte, bits are traversed (`LittleEndian`)\nfrom LSB (\"right\") to MSB (\"left\"). For example, the bits in the this two-byte\nslice will be traversed according to their numbering:\n\n```text\nADDR     |  7  6  5  4  3  2  1  0  |\n\nADDR +1  |  F  E  D  C  B  A  9  8  |\n```\n\n`BitVec` uses the opposite naming convention than this function, shifting left\nhere is equivalent to `shr` there, and shifting right to `shl`.\n\nIf shifting in the left direction, the `input` is expanded by one extra byte to\naccommodate the overflow (instead of just discarding it, which is what's done\nin the right direction).\n\nThe maximum `amount` to shift is 7 (and the minimum is 1), that is, we always\nshift less than a byte. This precondition is only checked during testing (with\n`debug_assert!`) for performance reasons, it is up to the caller to enforce it.\n\n# Examples\n\nShift the `input` (taken from the diagram above) left by an `amount` of 3 bits,\ngrowing the output slice:\n\n```text\nADDR     |  4  3  2  1  0  _  _  _  |  Filled with zeros.\n\nADDR +1  |  C  B  A  9  8  7  6  5  |\n\nADDR +2  |  _  _  _  _  _  F  E  D  |  The overflow of the last input byte\n                                               is moved to this (new) byte.\n```\n\nSame, but shift right:\n\n```text\nADDR     |  A  9  8  7  6  5  4  3  |  The overflow `[2,1,0]` is just discarded,\n                                                         the slice doesn't grow.\nADDR +1  |  _  _  _  F  E  D  C  B  |\n```\n\n(Note: `0`, `1`, `2`, etc. are bits identified by their original position,\n`_` means a bit left at zero after shifting, to avoid confusions with\nthe unique bit `0`, that just *started* at that position but doesn't\nnecessarily carry that value.)\n\n**/\nfn shift_bits(input: &[u8], amount: usize, is_left: bool) -> Vec<u8> {\n    debug_assert!(amount >= 1);\n    debug_assert!(amount <= 7);\n\n    // Create the `output` vector from the original input values, extending\n    // its size by one if shifting left.\n    let mut output = Vec::with_capacity(input.len() + if is_left { 1 } else { 0 });\n    output.extend_from_slice(input);\n    if is_left {\n        output.push(0);\n    }\n    // TODO: Is there a cleaner way to do this? Is the extra byte worth the initial\n    // `with_capacity` call?\n\n    // Split the shift in two parts. First, do a simple bit shift (losing the\n    // overflow) for each byte, then, in a second pass, recover the lost overflow\n    // from the `input`. The advantage of splitting it like this is that the place-holder\n    // spaces are already being cleared with zeros to just join the overflow part with an\n    // single `OR` operation (instead of assembling both parts together at the same time\n    // which requires an extra clear operation with a mask of zeros).\n    for output_byte in output.iter_mut().take(input.len()) {\n        if is_left {\n            *output_byte <<= amount;\n        } else {\n            *output_byte >>= amount;\n        }\n    }\n\n    if is_left {\n        // The `output` looks at this point like this (following the original\n        // example):\n        //\n        // ADDR     |  4  3  2  1  0  _  _  _  |\n        //\n        // ADDR +1  |  C  B  A  9  8  _  _  _  |\n        //\n        // ADDR +2  |  _  _  _  _  _  _  _  _  |  Extra byte allocated to extend the `input`,\n        //                                            hasn't been modified in the first pass.\n        //\n        // We need to recover the overflow of each shift (e.g., `[7,6,5]` from\n        // the first byte and `[F,E,D]` from the second) and move it to the next\n        // byte, shifting it to place it at the \"start\" (in the current ordering\n        // that means aligning it to the LSB). For example, the overflow of (also)\n        // `amount` bits from the first byte is:\n        //\n        // ADDR     |  7  6  5  4  3  2  1  0  |\n        //             +-----+\n        //           overflow lost\n        //\n        // and it's \"recovered\" with a shift in the opposite direction, which both\n        // positions it in the correct place *and* leaves cleared the rest of the\n        // bits to be able to `OR` (join) it with the next byte of `output` (shifted\n        // in the first pass):\n        //\n        // (`output` so far)\n        // ADDR +1  |  C  B  A  9  8  _  _  _  |    +\n        //                                          |\n        // (shifted overflow                        |  join both (`|=`)\n        //      from `input`)                       |\n        // ADDR     |  _  _  _  _  _  7  6  5  |    V\n        //             +------------->\n        //\n        for i in 0..input.len() {\n            let overflow = input[i] >> (8 - amount);\n            output[i + 1] |= overflow;\n        }\n    } else {\n        // The overflow handling in the right shift follows the same logic as the left\n        // one with just two differences: (1) the overflow goes to the *previous* byte\n        // in memory and (2) the overflow of the first byte is discarded (hence the `for`\n        // loop iterates just `input.len` *minus one* positions).\n        for i in 1..input.len() {\n            let overflow = input[i] << (8 - amount);\n            output[i - 1] |= overflow;\n        }\n    }\n\n    // TODO: Optimization: Join both passes in one `for` loop for cache\n    // efficiency (do everything we need to do in the same address once).\n    // (This is low priority since we normally shift small arrays -32 byte\n    // elements- per call.)\n\n    output\n}\n\n/** Extract bits and relocate them.\n\nExtract `num_bits` from the `input` starting at absolute `pos` (expressed in\nbits). Format the extracted bit stream as a byte stream `output` (in a `Vec<u8>`)\nwhere the extracted bits start at `new_offset` bits in the first byte (i.e.,\n`new_offset` can't be bigger than 7) allowing them to be relocated from their\noriginal bit-offset (encoded in `pos`). The rest of the bits (below `new_offset`\nand after the extracted `num_bits`) are left at zero (to prepare them to be\njoined with another extracted `output`). This function follows the ordering in\n`BitVec<LittleEndian, u8>` (see `shift_bits` for more details).\n\nThe length of the input must be big enough to perform the extraction\nof `num_bits`. This precondition is only checked during testing (with\n`debug_assert!`) for performance reasons, it is up to the caller to enforce it.\n\n# Example\n\nTaking as `input` the original two-byte layout from `shift_bits`, extracting 4\n`num_bits` from `pos` 12 and relocating them in `new_offset` 2 would result in\nan `output` of a single byte like:\n\n```text\nADDR     |  _  _  F  E  D  C  _  _  |\n```\n\n(The second byte in `ADDR +1` has been dropped after the extraction\nas it's no longer needed.)\n\n**/\n//\n// TODO: Replace the byte terminology for a generic term that can mean\n// anything that implements the `bitvec::Bits` trait (`u8`, `u32`, etc.).\n// `BitVec` calls it \"element\" but that's already used here (this function\n// may need to be moved elsewhere which would allow to reuse that term).\n// This also will imply removing the hardcoded `8`s (size of byte).\n#[inline]\nfn extract_bits_and_shift(input: &[u8], pos: usize, num_bits: usize, new_offset: usize) -> Vec<u8> {\n    debug_assert!(input.len() * 8 >= pos + num_bits);\n    debug_assert!(new_offset <= 7);\n\n    // 1. Trim the whole bytes (before and after) we don't need for the\n    //    extraction (we don't want to waste shift operations on them).\n    // 2. Shift from the original `pos` to the `new_offset`.\n    // 3. Trim the bits in the first and last byte we also don't need.\n    //\n    // TODO: Does (3) need to happen *after* the shift in (2)? It feels\n    // more natural but can't we just trim everything in (1)?\n\n    // Determine from `pos` the number of full bytes that can be completely skipped\n    // (`skip_bytes`), and the number of bits within the first byte of interest that\n    // we'll start extracting from (`extraction_offset`).\n    let (skip_bytes, extraction_offset) = div_rem(pos, 8);\n\n    // (1).\n    let input = &input[skip_bytes..];\n    let input = &input[..BitByte::from_bits(extraction_offset + num_bits).bytes_needed()];\n\n    // (2).\n    let mut output = match new_offset.cmp(&extraction_offset) {\n        Ordering::Less => {\n            // Shift right.\n            shift_bits(input, extraction_offset - new_offset, false)\n        }\n        Ordering::Greater => {\n            // Shift left.\n            shift_bits(input, new_offset - extraction_offset, true)\n        }\n        Ordering::Equal => {\n            // No shift needed, take the `input` as is.\n            input.to_vec()\n        }\n    };\n\n    // After the shift we may not need the last byte of the `output` (either\n    // because the left shift extended it by one byte or because the right shift\n    // move the extraction span below that threshold).\n    if output.len() > BitByte::from_bits(new_offset + num_bits).bytes_needed() {\n        output.pop();\n    }\n    // TODO: Optimization: A more specialized shift would have just dropped\n    // that byte (we would need to pass it the `num_bits` we want).\n\n    // (3).\n    if new_offset != 0 {\n        clear_right_bits(output.first_mut().expect(\"output is empty\"), new_offset);\n    }\n    let end_offset = (new_offset + num_bits) % 8;\n    if end_offset != 0 {\n        clear_left_bits(output.last_mut().expect(\"output is empty\"), end_offset);\n    }\n\n    output\n}\n\n// Set to zero all the bits to the \"left\" of the `offset` including\n// it, that is, [MSB; `offset`].\n#[inline]\nfn clear_left_bits(byte: &mut u8, offset: usize) {\n    *(byte) &= (1 << offset) - 1\n}\n\n// Set to zero all the bits to the \"right\" of the `offset` excluding\n// it, that is, (`offset`; LSB].\n#[inline]\nfn clear_right_bits(byte: &mut u8, offset: usize) {\n    *(byte) &= !((1 << offset) - 1)\n}\n\n/** Padding process.\n\nRead a `source` of raw byte-aligned data, pad it in a bit stream and\nwrite a byte-aligned version of it in the `target`. The `target` needs\nto implement (besides `Write`) the `Read` and `Seek` traits since the\nlast byte written may be incomplete and will need to be rewritten.\n\nThe reader will always be byte-aligned, the writer will operate with\nbit precision since we may have (when calling this function multiple\ntimes) a written `target` with extra bits (that need to be overwritten)\nand also incomplete data units.\nThe ideal alignment scenario is for the writer to be positioned at the\nbyte-aligned element boundary and just write whole chunks of `data_chunk_bits`\n(full data units) followed by its corresponding padding. To get there then we\nneed to handle the potential bit-level misalignments:\n  1. extra bits: the last byte is only partially valid so we\n     need to get some bits from the `source` to overwrite them.\n  2. Incomplete data unit: we need to fill the rest of it and add the padding\n     to form a element that would position the writer at the desired boundary.\n**/\n\n// offset and num_bytes are based on the unpadded data, so\n// if [0, 1, ..., 255] was the original unpadded data, offset 3 and len 4 would return\n// [3, 4, 5, 6].\npub fn write_unpadded<W: ?Sized>(\n    source: &[u8],\n    target: &mut W,\n    offset: usize,\n    len: usize,\n) -> io::Result<usize>\nwhere\n    W: Write,\n{\n    // Check that there's actually `len` raw data bytes encoded inside\n    // `source` starting at `offset`.\n    let read_pos = BitByte::from_bits(FR32_PADDING_MAP.transform_bit_offset(offset * 8, true));\n    let raw_data_size = BitByte::from_bits(\n        FR32_PADDING_MAP.transform_bit_offset(source.len() * 8 - read_pos.total_bits(), false),\n    )\n    .bytes_needed();\n    if raw_data_size < len {\n        return Err(Error::new(\n            ErrorKind::Other,\n            format!(\n                \"requested extraction of {} raw data bytes when there's at most {} in the source\",\n                len, raw_data_size\n            ),\n        ));\n    }\n\n    // In order to optimize alignment in the common case of writing from an aligned start,\n    // we should make the chunk a multiple of 128 (4 full elements in the padded layout).\n    // n was hand-tuned to do reasonably well in the benchmarks.\n    let n = 1000;\n    let chunk_size = 128 * n;\n\n    let mut written = 0;\n\n    let mut offset = offset;\n    let mut len = len;\n\n    for chunk in source.chunks(chunk_size) {\n        let write_len = min(len, chunk.len());\n\n        written += write_unpadded_aux(&FR32_PADDING_MAP, source, target, offset, write_len)?;\n        offset += write_len;\n        len -= write_len;\n    }\n\n    Ok(written)\n}\n\n/**  Unpadding process.\n\nRead a `source` of padded data and recover from it the byte-aligned\nraw data writing it in `target`, where `write_pos` specifies from which\nbyte of the raw data stream to start recovering to, up to `max_write_size`\nbytes.\n\nThere are 3 limits that tell us how much padded data to process in\neach iteration (`bits_to_extract`):\n1. Element boundary: we can process only one element at a time (to be\n   able to skip the padding bits).\n2. End of `source`: no more data to read.\n3. No more space to write the recovered raw data: we shouldn't write\n   into the `target` beyond `max_write_size`.\n\nThe reader will generally operate with bit precision, even if the padded\nlayout is byte-aligned (no extra bits) the data inside it isn't (since\nwe pad at the bit-level).\n**/\nfn write_unpadded_aux<W: ?Sized>(\n    padding_map: &PaddingMap,\n    source: &[u8],\n    target: &mut W,\n    write_pos: usize,\n    max_write_size: usize,\n) -> io::Result<usize>\nwhere\n    W: Write,\n{\n    // Position of the reader in the padded bit stream layout, deduced from\n    // the position of the writer (`write_pos`) in the raw data layout.\n    let mut read_pos = BitByte::from_bits(padding_map.transform_bit_offset(write_pos * 8, true));\n\n    // Specify the maximum data to recover (write) in bits, since the data unit\n    // in the element (in contrast with the original raw data that generated it)\n    // is not byte aligned.\n    let max_write_size_bits = max_write_size * 8;\n\n    // Estimate how many bytes we'll need for the `raw_data` to allocate\n    // them all at once. We need to take into account both how much do\n    // we have left to read *and* write, and even then, since we may start\n    // in the middle of an element (`write_pos`) there's some variability\n    // as to how many padding bits will be encountered.\n    // Allow then an *over*-estimation error of 1 byte: `transform_bit_offset`\n    // has the implicit assumption that the data provided is starting at the\n    // beginning of an element, i.e., the padding bits are as far as possible,\n    // which maximizes the chances of not getting an extra `pad_bits` in the\n    // `source` (which are unpadded away and not carried to the `target`). That\n    // is, in this context `transform_bit_offset` is optimistic about the number\n    // of raw data bits we'll be able to recover from a fixed number of `source`\n    // bits.\n    let mut raw_data_size = BitByte::from_bits(\n        padding_map.transform_bit_offset(source.len() * 8 - read_pos.total_bits(), false),\n    )\n    .bytes_needed();\n    raw_data_size = min(raw_data_size, max_write_size);\n\n    // Recovered raw data unpadded from the `source` which will\n    // be written to the `target`.\n    let mut raw_data: Vec<u8> = Vec::with_capacity(raw_data_size);\n\n    // Total number of raw data bits we have written (unpadded from the `source`).\n    let mut written_bits = 0;\n    // Bit offset within the last byte at which the next write needs to happen\n    // (derived from `written_bits`), we keep track of this since we write in chunks\n    // that may not be byte-aligned.\n    let mut write_bit_offset = 0;\n\n    // If there is no more data to read or no more space to write stop.\n    while read_pos.bytes < source.len() && written_bits < max_write_size_bits {\n        // (1): Find the element boundary and, assuming that there is a full\n        //      unit of data (which actually may be incomplete), how many bits\n        //      are left to read from `read_pos`.\n        let (next_element_position, mut bits_to_extract) = padding_map.next_boundary(&read_pos);\n\n        // (2): As the element may be incomplete check how much data is\n        //      actually available so as not to access the `source` past\n        //      its limit.\n        bits_to_extract = min(bits_to_extract, source.len() * 8 - read_pos.total_bits());\n\n        // (3): Don't read more than `max_write_size`.\n        let bits_left_to_write = max_write_size_bits - written_bits;\n        bits_to_extract = min(bits_to_extract, bits_left_to_write);\n\n        // Extract the next data unit from the element (or whatever space we\n        // have left to write) and reposition it in the `write_bit_offset`.\n        // N.B., the bit offset of the data in the original raw data byte\n        // stream and the same data in the padded layout are not necessarily\n        // the same (since the added padding bits shift it).\n        let mut recovered = extract_bits_and_shift(\n            &source,\n            read_pos.total_bits(),\n            bits_to_extract,\n            write_bit_offset,\n        );\n\n        if write_bit_offset != 0 {\n            // Since the two data units we are joining are not byte-aligned we can't\n            // just append the whole bytes to `raw_data`, we need to join the last\n            // byte of the already written `raw_data` with the first one of data unit\n            // `recovered` in this iteration. Since `extract_bits_and_shift` already\n            // takes care of setting to zero the bits beyond the extraction limit we\n            // can just `OR` the two.\n            *(raw_data.last_mut().expect(\"raw_data is empty\")) |=\n                *(recovered.first().expect(\"recovered is empty\"));\n            raw_data.append(&mut recovered[1..].to_vec());\n        } else {\n            raw_data.append(&mut recovered);\n        }\n\n        written_bits += bits_to_extract;\n        write_bit_offset = written_bits % 8;\n\n        // Position the reader in the next element boundary, this will be ignored\n        // if we already hit limits (2) or (3) (in that case this was the last iteration).\n        read_pos = BitByte {\n            bytes: next_element_position,\n            bits: 0,\n        };\n    }\n\n    // TODO: Don't write the whole output into a huge BitVec.\n    // Instead, write it incrementally –\n    // but ONLY when the bits waiting in bits_out are byte-aligned. i.e. a multiple of 8\n\n    // Check that our estimated size was correct, allow it to be overestimated\n    // (not *under*) by 1 byte.\n    debug_assert!(raw_data_size - raw_data.len() <= 1);\n    debug_assert!(raw_data_size >= raw_data.len());\n\n    target.write_all(&raw_data)?;\n\n    Ok(raw_data.len())\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use std::io::{Cursor, Read};\n\n    use bitvec::{order::Lsb0 as LittleEndian, vec::BitVec};\n    use itertools::Itertools;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    use crate::Fr32Reader;\n\n    const TEST_SEED: [u8; 16] = [\n        0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc,\n        0xe5,\n    ];\n\n    #[test]\n    fn test_position() {\n        let mut bits = 0;\n        for i in 0..10 {\n            for j in 0..8 {\n                let position = BitByte { bytes: i, bits: j };\n                assert_eq!(position.total_bits(), bits);\n                bits += 1;\n            }\n        }\n    }\n\n    // Test the `extract_bits_le` function against the `BitVec` functionality\n    // (assumed to be correct).\n    #[test]\n    fn test_random_bit_extraction() {\n        // Length of the data vector we'll be extracting from.\n        let len = 20;\n\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n        let data: Vec<u8> = (0..len).map(|_| rng.gen()).collect();\n\n        // TODO: Evaluate designing a scattered pattered of `pos` and `num_bits`\n        // instead of repeating too many iterations with any number.\n        for _ in 0..100 {\n            let pos = rng.gen_range(0, data.len() / 2);\n            let num_bits = rng.gen_range(1, data.len() * 8 - pos);\n            let new_offset = rng.gen_range(0, 8);\n\n            let mut bv = BitVec::<LittleEndian, u8>::new();\n            bv.extend(\n                BitVec::<LittleEndian, u8>::from(&data[..])\n                    .into_iter()\n                    .skip(pos)\n                    .take(num_bits),\n            );\n            let shifted_bv: BitVec<LittleEndian, u8> = bv >> new_offset;\n\n            assert_eq!(\n                shifted_bv.as_slice(),\n                &extract_bits_and_shift(&data, pos, num_bits, new_offset)[..],\n            );\n        }\n    }\n\n    // Test the `shift_bits` function against the `BitVec<LittleEndian, u8>`\n    // implementation of `shr_assign` and `shl_assign`.\n    #[test]\n    fn test_bit_shifts() {\n        let len = 5;\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n        for amount in 1..8 {\n            for left in [true, false].iter() {\n                let data: Vec<u8> = (0..len).map(|_| rng.gen()).collect();\n\n                let shifted_bits = shift_bits(&data, amount, *left);\n\n                let mut bv: BitVec<LittleEndian, u8> = data.into();\n                if *left {\n                    bv >>= amount;\n                } else {\n                    bv <<= amount;\n                }\n                // We use the opposite shift notation (see `shift_bits`).\n\n                assert_eq!(bv.as_slice(), shifted_bits.as_slice());\n            }\n        }\n    }\n\n    // Simple (and slow) padder implementation using `BitVec`.\n    // It is technically not quite right to use `BitVec` to test\n    // `write_padded` since at the moment that function still uses\n    // it for some corner cases, but since largely this implementation\n    // has been replaced it seems reasonable.\n    fn bit_vec_padding(raw_data: Vec<u8>) -> Box<[u8]> {\n        let mut padded_data: BitVec<LittleEndian, u8> = BitVec::new();\n        let raw_data: BitVec<LittleEndian, u8> = BitVec::from(raw_data);\n\n        for data_unit in raw_data\n            .into_iter()\n            .chunks(FR32_PADDING_MAP.data_bits)\n            .into_iter()\n        {\n            padded_data.extend(data_unit);\n\n            // To avoid reconverting the iterator, we deduce if we need the padding\n            // by the length of `padded_data`: a full data unit would not leave the\n            // padded layout aligned (it would leave it unaligned by just `pad_bits()`).\n            if padded_data.len() % 8 != 0 {\n                for _ in 0..FR32_PADDING_MAP.pad_bits() {\n                    padded_data.push(false);\n                }\n            }\n        }\n\n        padded_data.into_boxed_slice()\n    }\n\n    // `write_padded` and `write_unpadded` for 1016 bytes of 1s, check the\n    // recovered raw data.\n    #[test]\n    fn test_read_write_padded() {\n        let len = 1016; // Use a multiple of 254.\n        let data = vec![255u8; len];\n        let mut padded = Vec::new();\n        let mut reader = Fr32Reader::new(Cursor::new(&data));\n        reader\n            .read_to_end(&mut padded)\n            .expect(\"in-memory read failed\");\n\n        assert_eq!(\n            padded.len(),\n            FR32_PADDING_MAP.transform_byte_offset(len, true)\n        );\n\n        let mut unpadded = Vec::new();\n        let unpadded_written =\n            write_unpadded(&padded, &mut unpadded, 0, len).expect(\"un-padded write failed\");\n        assert_eq!(unpadded_written, len);\n        assert_eq!(data, unpadded);\n        assert_eq!(padded.into_boxed_slice(), bit_vec_padding(data));\n    }\n\n    // `write_padded` and `write_unpadded` for 1016 bytes of random data, recover\n    // different lengths of raw data at different offset, check integrity.\n    #[test]\n    fn test_read_write_padded_offset() {\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n        let len = 1016;\n        let data: Vec<u8> = (0..len).map(|_| rng.gen()).collect();\n\n        let mut padded = Vec::new();\n        let mut reader = Fr32Reader::new(Cursor::new(&data));\n        reader\n            .read_to_end(&mut padded)\n            .expect(\"in-memory read failed\");\n\n        {\n            let mut unpadded = Vec::new();\n            write_unpadded(&padded, &mut unpadded, 0, 1016).expect(\"un-padded write failed: 1016\");\n            let expected = &data[0..1016];\n\n            assert_eq!(expected.len(), unpadded.len());\n            assert_eq!(expected, &unpadded[..]);\n        }\n\n        {\n            let mut unpadded = Vec::new();\n            write_unpadded(&padded, &mut unpadded, 0, 44).expect(\"un-padded write failed: 44\");\n            let expected = &data[0..44];\n\n            assert_eq!(expected.len(), unpadded.len());\n            assert_eq!(expected, &unpadded[..]);\n        }\n\n        let excessive_len = 35;\n        for start in (1016 - excessive_len + 2)..1016 {\n            assert!(write_unpadded(&padded, &mut Vec::new(), start, excessive_len).is_err());\n        }\n    }\n\n    // TODO: Add a test that drops the last part of an element and tries to recover\n    // the rest of the data (may already be present in some form in the above tests).\n}\n"
  },
  {
    "path": "fr32/src/reader.rs",
    "content": "use std::cmp::min;\nuse std::io::{self, Read};\nuse std::mem::size_of;\n\n#[cfg(not(target_arch = \"aarch64\"))]\nuse byte_slice_cast::AsSliceOf;\n\nuse byte_slice_cast::AsByteSlice;\n\n/// The number of Frs per Block.\nconst NUM_FRS_PER_BLOCK: usize = 4;\n/// The amount of bits in an Fr when not padded.\nconst IN_BITS_FR: usize = 254;\n/// The amount of bits in an Fr when padded.\nconst OUT_BITS_FR: usize = 256;\n\nconst NUM_BYTES_IN_BLOCK: usize = NUM_FRS_PER_BLOCK * IN_BITS_FR / 8;\nconst NUM_BYTES_OUT_BLOCK: usize = NUM_FRS_PER_BLOCK * OUT_BITS_FR / 8;\n\nconst NUM_U128S_PER_BLOCK: usize = NUM_BYTES_OUT_BLOCK / size_of::<u128>();\n\nconst MASK_SKIP_HIGH_2: u128 = 0b0011_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111_1111;\n\n#[repr(align(16))]\nstruct AlignedBuffer([u8; NUM_BYTES_IN_BLOCK + 1]);\n\n/// An `io::Reader` that converts unpadded input into valid `Fr32` padded output.\npub struct Fr32Reader<R> {\n    /// The source being padded.\n    source: R,\n    /// Currently read block.\n    /// This is padded to 128 bytes to allow reading all values as `u128`s, but only the first\n    /// 127 bytes are ever valid.\n    in_buffer: AlignedBuffer,\n    /// Currently writing out block.\n    out_buffer: [u128; NUM_U128S_PER_BLOCK],\n    /// The current offset into the `out_buffer` in bytes.\n    out_offset: usize,\n    /// How many `Fr32`s are available in the `out_buffer`.\n    available_frs: usize,\n    /// Are we done reading?\n    done: bool,\n}\n\nmacro_rules! process_fr {\n    (\n        $in_buffer:expr,\n        $out0:expr,\n        $out1:expr,\n        $bit_offset:expr\n    ) => {{\n        $out0 = $in_buffer[0] >> 128 - $bit_offset;\n        $out0 |= $in_buffer[1] << $bit_offset;\n        $out1 = $in_buffer[1] >> 128 - $bit_offset;\n        $out1 |= $in_buffer[2] << $bit_offset;\n        $out1 &= MASK_SKIP_HIGH_2; // zero high 2 bits\n    }};\n}\n\nimpl<R: Read> Fr32Reader<R> {\n    pub fn new(source: R) -> Self {\n        Fr32Reader {\n            source,\n            in_buffer: AlignedBuffer([0; NUM_BYTES_IN_BLOCK + 1]),\n            out_buffer: [0; NUM_U128S_PER_BLOCK],\n            out_offset: 0,\n            available_frs: 0,\n            done: false,\n        }\n    }\n\n    /// Processes a single block in in_buffer, writing the result to out_buffer.\n    fn process_block(&mut self) {\n        let in_buffer: &[u128] = {\n            #[cfg(target_arch = \"aarch64\")]\n            // Safety: This is safe because the struct/data is aligned on\n            // a 16 byte boundary and can therefore be casted from u128\n            // to u8 without alignment safety issues.\n            unsafe {\n                &mut (*(&self.in_buffer.0 as *const [u8] as *mut [u128]))\n            }\n            #[cfg(not(target_arch = \"aarch64\"))]\n            self.in_buffer.0.as_slice_of::<u128>().unwrap()\n        };\n        let out = &mut self.out_buffer;\n\n        // 0..254\n        {\n            out[0] = in_buffer[0];\n            out[1] = in_buffer[1] & MASK_SKIP_HIGH_2;\n        }\n        // 254..508\n        process_fr!(&in_buffer[1..], out[2], out[3], 2);\n        // 508..762\n        process_fr!(&in_buffer[3..], out[4], out[5], 4);\n        // 762..1016\n        process_fr!(&in_buffer[5..], out[6], out[7], 6);\n\n        // Reset buffer offset.\n        self.out_offset = 0;\n    }\n\n    fn fill_in_buffer(&mut self) -> io::Result<usize> {\n        let mut bytes_read = 0;\n        let mut buf = &mut self.in_buffer.0[..NUM_BYTES_IN_BLOCK];\n\n        while !buf.is_empty() {\n            match self.source.read(buf) {\n                Ok(0) => {\n                    break;\n                }\n                Ok(n) => {\n                    buf = &mut buf[n..];\n                    bytes_read += n;\n                }\n                Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}\n                Err(e) => return Err(e),\n            }\n        }\n\n        // Clear unfilled memory.\n        for val in &mut self.in_buffer.0[bytes_read..NUM_BYTES_IN_BLOCK] {\n            *val = 0;\n        }\n\n        Ok(bytes_read)\n    }\n}\n\n/// Division of x by y, rounding up.\n/// x must be > 0\n#[inline]\nconst fn div_ceil(x: usize, y: usize) -> usize {\n    1 + ((x - 1) / y)\n}\n\nimpl<R: Read> Read for Fr32Reader<R> {\n    fn read(&mut self, target: &mut [u8]) -> io::Result<usize> {\n        if self.done || target.is_empty() {\n            return Ok(0);\n        }\n\n        // The number of bytes already read and written into `target`.\n        let mut bytes_read = 0;\n        // The number of bytes to read.\n        let bytes_to_read = target.len();\n\n        while bytes_read < bytes_to_read {\n            // Load and process the next block, if no Frs are available anymore.\n            if self.available_frs == 0 {\n                let bytes_read = self.fill_in_buffer()?;\n\n                // All data was read from the source, no new data in the buffer.\n                if bytes_read == 0 {\n                    self.done = true;\n                    break;\n                }\n\n                self.process_block();\n\n                // Update state of how many new Frs are now available.\n                self.available_frs = div_ceil(bytes_read * 8, IN_BITS_FR);\n            }\n\n            // Write out as many Frs as available and requested\n            {\n                let available_bytes = self.available_frs * (OUT_BITS_FR / 8);\n\n                let target_start = bytes_read;\n                let target_end = min(target_start + available_bytes, bytes_to_read);\n                let len = target_end - target_start;\n\n                let out_start = self.out_offset;\n                let out_end = out_start + len;\n\n                target[target_start..target_end]\n                    .copy_from_slice(&self.out_buffer.as_byte_slice()[out_start..out_end]);\n                bytes_read += len;\n                self.out_offset += len;\n                self.available_frs -= div_ceil(len * 8, OUT_BITS_FR);\n            }\n        }\n\n        Ok(bytes_read)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use std::io::Cursor;\n\n    use bitvec::{order::Lsb0 as LittleEndian, vec::BitVec};\n    use itertools::Itertools;\n    use pretty_assertions::assert_eq;\n    use rand::random;\n\n    use crate::bytes_into_fr;\n\n    const DATA_BITS: u64 = 254;\n    const TARGET_BITS: u64 = 256;\n\n    #[test]\n    fn test_simple_short() {\n        // Source is shorter than 1 padding cycle.\n        let data = vec![3u8; 30];\n        let mut reader = Fr32Reader::new(Cursor::new(&data));\n        let mut padded = Vec::new();\n        reader\n            .read_to_end(&mut padded)\n            .expect(\"in-memory read failed\");\n        assert_eq!(padded.len(), 32);\n        assert_eq!(&data[..], &padded[..30]);\n\n        assert_eq!(padded.into_boxed_slice(), bit_vec_padding(data));\n    }\n\n    #[test]\n    fn test_simple_single() {\n        let data = vec![255u8; 32];\n        let mut padded = Vec::new();\n        let mut reader = Fr32Reader::new(Cursor::new(&data));\n        reader\n            .read_to_end(&mut padded)\n            .expect(\"in-memory read failed\");\n\n        assert_eq!(&padded[0..31], &data[0..31]);\n        assert_eq!(padded[31], 0b0011_1111);\n        assert_eq!(padded[32], 0b0000_0011);\n        assert_eq!(padded.len(), 64);\n        let bv = bit_vec_padding(data);\n        assert_eq!(bv.len(), 64);\n        assert_eq!(padded.into_boxed_slice(), bv);\n    }\n\n    #[test]\n    fn test_simple_127() {\n        let data = vec![255u8; 127];\n        let mut padded = Vec::new();\n        let mut reader = Fr32Reader::new(Cursor::new(&data));\n        reader\n            .read_to_end(&mut padded)\n            .expect(\"in-memory read failed\");\n\n        assert_eq!(&padded[0..31], &data[0..31]);\n        assert_eq!(padded[31], 0b0011_1111);\n        assert_eq!(padded[32], 0b1111_1111);\n\n        assert_eq!(padded.len(), 128);\n\n        assert_eq!(padded.into_boxed_slice(), bit_vec_padding(data));\n    }\n\n    #[test]\n    fn test_chained_byte_source() {\n        let random_bytes: Vec<u8> = (0..127).map(|_| random::<u8>()).collect();\n\n        // read 127 bytes from a non-chained source\n        let output_x = {\n            let input_x = Cursor::new(random_bytes.clone());\n\n            let mut reader = Fr32Reader::new(input_x);\n            let mut buf_x = Vec::new();\n            reader.read_to_end(&mut buf_x).expect(\"could not seek\");\n            buf_x\n        };\n\n        for n in 1..127 {\n            let random_bytes = random_bytes.clone();\n\n            // read 127 bytes from a n-byte buffer and then the rest\n            let output_y = {\n                let input_y =\n                    Cursor::new(random_bytes.iter().take(n).cloned().collect::<Vec<u8>>()).chain(\n                        Cursor::new(random_bytes.iter().skip(n).cloned().collect::<Vec<u8>>()),\n                    );\n\n                let mut reader = Fr32Reader::new(input_y);\n                let mut buf_y = Vec::new();\n                reader.read_to_end(&mut buf_y).expect(\"could not seek\");\n\n                buf_y\n            };\n\n            assert_eq!(&output_x, &output_y, \"should have written same bytes\");\n            assert_eq!(\n                output_x.clone().into_boxed_slice(),\n                bit_vec_padding(random_bytes)\n            );\n        }\n    }\n\n    #[test]\n    fn test_full() {\n        let data = vec![255u8; 127];\n\n        let mut buf = Vec::new();\n        let mut reader = Fr32Reader::new(Cursor::new(&data));\n        reader.read_to_end(&mut buf).expect(\"in-memory read failed\");\n\n        assert_eq!(buf.clone().into_boxed_slice(), bit_vec_padding(data));\n        validate_fr32(&buf);\n    }\n\n    #[test]\n    #[ignore]\n    fn test_long() {\n        use rand::{thread_rng, RngCore};\n\n        let mut rng = thread_rng();\n        for i in 1..100 {\n            for j in 1..50 {\n                let mut data = vec![0u8; i * j];\n                rng.fill_bytes(&mut data);\n\n                let mut buf = Vec::new();\n                let mut reader = Fr32Reader::new(Cursor::new(&data));\n                reader.read_to_end(&mut buf).expect(\"in-memory read failed\");\n\n                assert_eq!(\n                    buf.into_boxed_slice(),\n                    bit_vec_padding(data),\n                    \"{} - {}\",\n                    i,\n                    j\n                );\n            }\n        }\n    }\n\n    fn bit_vec_padding(raw_data: Vec<u8>) -> Box<[u8]> {\n        let mut padded_data: BitVec<LittleEndian, u8> = BitVec::new();\n        let raw_data: BitVec<LittleEndian, u8> = BitVec::from(raw_data);\n\n        for data_unit in raw_data.into_iter().chunks(DATA_BITS as usize).into_iter() {\n            padded_data.extend(data_unit);\n\n            // To avoid reconverting the iterator, we deduce if we need the padding\n            // by the length of `padded_data`: a full data unit would not leave the\n            // padded layout aligned (it would leave it unaligned by just `pad_bits()`).\n            if padded_data.len() % 8 != 0 {\n                for _ in 0..(TARGET_BITS - DATA_BITS) {\n                    padded_data.push(false);\n                }\n            }\n        }\n\n        while padded_data.len() % (TARGET_BITS as usize) != 0 {\n            padded_data.push(false);\n        }\n\n        padded_data.into_boxed_slice()\n    }\n\n    fn validate_fr32(bytes: &[u8]) {\n        let chunks = (bytes.len() as f64 / 32_f64).ceil() as usize;\n        for (i, chunk) in bytes.chunks(32).enumerate() {\n            let _ = bytes_into_fr(chunk).unwrap_or_else(|_| {\n                panic!(\n                    \"chunk {}/{} cannot be converted to valid Fr: {:?}\",\n                    i + 1,\n                    chunks,\n                    chunk\n                )\n            });\n        }\n    }\n\n    // raw data stream of increasing values and specific\n    // outliers (0xFF, 9), check the content of the raw data encoded (with\n    // different alignments) in the padded layouts.\n    #[test]\n    fn test_exotic() {\n        let mut source = vec![\n            1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,\n            25, 26, 27, 28, 29, 30, 31, 0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,\n            16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0xff, 9, 9,\n        ];\n        source.extend(vec![9, 0xff]);\n\n        let mut buf = Vec::new();\n        let mut reader = Fr32Reader::new(Cursor::new(&source));\n        reader.read_to_end(&mut buf).expect(\"in-memory read failed\");\n\n        for (i, &byte) in buf.iter().enumerate().take(31) {\n            assert_eq!(byte, i as u8 + 1);\n        }\n        assert_eq!(buf[31], 63); // Six least significant bits of 0xff\n        assert_eq!(buf[32], (1 << 2) | 0b11); // 7\n        for (i, &byte) in buf.iter().enumerate().skip(33).take(30) {\n            assert_eq!(byte, (i as u8 - 31) << 2);\n        }\n        assert_eq!(buf[63], (0x0f << 2)); // 4-bits of ones, half of 0xff, shifted by two, followed by two bits of 0-padding.\n        assert_eq!(buf[64], 0x0f | 9 << 4); // The last half of 0xff, 'followed' by 9.\n        assert_eq!(buf[65], 9 << 4); // A shifted 9.\n        assert_eq!(buf[66], 9 << 4); // Another.\n        assert_eq!(buf[67], 0xf0); // The final 0xff is split into two bytes. Here is the first half.\n        assert_eq!(buf[68], 0x0f); // And here is the second.\n\n        assert_eq!(buf.into_boxed_slice(), bit_vec_padding(source));\n    }\n}\n"
  },
  {
    "path": "graphql/graphiql.go",
    "content": "// The MIT License (MIT)\n//\n// Copyright (c) 2016 Muhammed Thanish\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in all\n// copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n// SOFTWARE.\n\npackage graphql\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net/http\"\n)\n\n// GraphiQL is an in-browser IDE for exploring GraphiQL APIs.\n// This handler returns GraphiQL when requested.\n//\n// For more information, see https://github.com/graphql/graphiql.\ntype GraphiQL struct{}\n\nfunc respond(w http.ResponseWriter, body []byte, code int) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tw.WriteHeader(code)\n\t_, _ = w.Write(body)\n}\n\nfunc errorJSON(msg string) []byte {\n\tbuf := bytes.Buffer{}\n\tfmt.Fprintf(&buf, `{\"error\": \"%s\"}`, msg)\n\treturn buf.Bytes()\n}\n\nfunc (h GraphiQL) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\trespond(w, errorJSON(\"only GET requests are supported\"), http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\tw.Write(graphiql)\n}\n\nvar graphiql = []byte(`\n<!DOCTYPE html>\n<html>\n\t<head>\n\t\t<link\n                rel=\"icon\"\n                type=\"image/png\"\n                href=\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAAAXNSR0IArs4c6QAAAAlwSFlzAAALEwAACxMBAJqcGAAAActpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDUuNC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx4bXA6Q3JlYXRvclRvb2w+QWRvYmUgSW1hZ2VSZWFkeTwveG1wOkNyZWF0b3JUb29sPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KKS7NPQAAB5FJREFUWAm1FmtsnEdxdr/vfC8/mpgEfHYa6gaUJqAihfhVO7UprSokHsn5jKgKiKLGIIEEbSlpJdQLIJw+UFFQUSuBWir1z9nnpEmgCUnkcxPSmDRCgkKpGoJpfXdxHSc4ftzr2x1m9rvPPQdDDSgrfd/O7szOe2YX4H8cGEtY3tFK2Nu7pjMCChbgzVfD11h4XLKAibahL6dbBv+SaRl6LUsw78XBxTG80mEsWSkxu1oM9qmJlkR7UPhPWSDJCzSISw5zXZGxvpMezUp5GmtWQszunpiAKiPPZ20KyCqY1/ncgs4v+IUPwLJvYhzTVIbmvXgvqwAxkImKJHt1yzM+AQLXvdKXy3QevB4R+3O6wIYHSUCIlABEtTO9bf86pmFa7B6xPeHMi3l668p5SQjInbRGQQw0E3FMH4FHaFPoP8USVaveEo9aaH3LsdRh2vsYKqwhMhRBKw82vGbNQbcC9ePL1+PDmwf7iix0N+xmPoafq4TgDDaRYxmLCrBwD5HpSK4vKRVeP9b3ZyaaaE18UaL4KYE5x5afsWxoBgefFfX+jX6pMH9RvSnX2v1YxPP4D3UAHG2hgm80vRp7ns9nWxOb8kIt3HD6C+O8rpRVoYCxHDOtQwOg4QHS1kIb9oHGVQJlN0h8qPF07FFmkG4byouAjEdSO/bwOntr8kGt8EeNJ3uN27O37fse5PT3lVIjUsrL6MB2IVCThMcbx3ofIt7sZeMFExeTubSR3Zq4tVoEdhHSJs30WqjbIS1Zk6/VqzzhmdbBpyn5p1g4W8LMGkajj9GUSfcM/4IVaji+/QdOa7hehKz69xEPsllLkFZY+HdlWhOdLNxrXm5iTK1xPSHEeo4KxTFPzEsFLHH8D914rG+GGWe2Dd9UJav6ZbW1k9ep7rgF3SnTEUXA3hko2fdkowc2M27dk3deomgfLBIPYlJytC4QLzKLZdAoy3QzNTVqksT2y6Oz+YVL1TK4Oo9FYAVIkRFzgH8F/bOiD0cjv4m+hEA9IdXn8HaC4Mjxzx7OdCZH8R14mra6eB9sfUKTj4SCQLUvCHMqN235rKMGV5ZpPCAoSzGOcs2JaFZYVuc8FF5XQl8uCHV75FT0ZT6Q6Ry+02fZ3b7agLF+MGbYmF/Mg+vE14NY1Xnhjv2fZkTkWO+R2VXqc1BrLczp/OtULV0fOLXjHS5LlvkuhzL05oZf+xnMbtv3BLXZIwyPQNx4iRLvrXRXci/vcV/guXJ4dZ/elnwqfctQlnFxoGyhkY2+eCbTlnyCYU8GwzzcHHBhmKl7261X1CEBaIT0QNxJdyQfpLRdHblt4wNMeuhsVpWPvDulqAXQKH5i9f0Ut7pMT/LhOEWc96hfkBEYYnhDU3DJ2SUKMAEPIagRoTSJObF9uF5oHAC/uF/ENxeRrPcai0vt/k1mE+6GeE9eVIlvQwF+yGfL/KiNuMpUnmF4WQUYwX3AEEzjXmqi5yOp6DO8hrM7TeIZ+Orf2X6DY1oU+FeY1D8xJLh8G2bcsgpQ3vqoAU1P3nWouQaDd8mQdS8Tj1B/Z0sZXm6QyxbvAFlj3Us95e7Jbx6/EYScpnP/kjfMwy3DMre6mXVGIVTqiqi1mtVk8blZR78UOdGbQqDLheLMjWc54Yt7KSAaUvRwTyrdMXREvFF6VtRZfgrALNOcm8ixZxe9uOgBLsMPnftUIdM+tBFKcLtwxCeJ7GbdHDJlJ6DHYetX8gHfSTTEB4P9WNBb5JRq0VrfwbxZRuVN61pMt56ICz3elWxAB18OS//Nep4MKeowTOU/zMwo8RaV5fVKhs4WN1DzCjkzJV1jBT9K1TB6oWN4bR89arDMz7iTa1ikepxsy+CXqmXol1fUfJ4qwUfeptsXL1JNTFNWXkfmO5ydi8KXBIMWvCYnmbOWmKXr5zpZhHotSbQGp9YO+qkb3h05E3vBk+nmwJopw5SSdVxRsOjiCGhEXSMCMFdTrAdbPikul35PvWAN1adPgqAGz8Kk1FLTX2hlCyF9pHSIQlwnp+x6/yb1t9zu8LgFszJHt5v0K+TakuPmbFnmog2cXBzfbFtyj1b6O4SQ4BP76Zr1k1Etwoe7Ir+N/dwcfo8f3QnbsYR7yAO/kxICdAH1En+km/WxhtPRXZ4sZrOoQBk2npjcmmwu2ipMz6s/MlG6JflVqrC9pN8VqLK+1nhix4u8/3Z7YjXPRHeJ52z3vm7Mq6eISa0UeF/DK7FB3r/w8eGP0Htg4f1noud5TXgy1g1lpQIGQelGyLjbQk3J7TZr8yT7uxzwSfu+oiwdIL//gTKc+4MUltxL/lpPFn+ebvqByFhswAjid+VgTLNnXcGcyHGuY7PmvWUHZ2hlqXgXDRNfbD/YSE+2MeeWYzjZMmw+p+MYpnuSJy/FjtZ5DCvPuI9SFv5/DI4buZxfwZBuH7pnpu0QprcOztM3N9v2K8x2DH+FcZktB/nSWeJZ3v93Y8VasRubmqBoGKF4g6oBwjIQoi/MMDrqHOMamnMFmv6ziw0T97diTb0zHB7OEe4ZlCjf5X2U8vGm09HnKrPbo78mMwu6mjFn9tV713TtvWpZSCX83wr9J1EKd8CrhC26AAAAAElFTkSuQmCC\"\n        />\n        <link\n                rel=\"stylesheet\"\n                href=\"https://cdnjs.cloudflare.com/ajax/libs/graphiql/0.13.0/graphiql.css\"\n                integrity=\"sha384-Qua2xoKBxcHOg1ivsKWo98zSI5KD/UuBpzMIg8coBd4/jGYoxeozCYFI9fesatT0\"\n                crossorigin=\"anonymous\"\n        />\n        <script\n                src=\"https://cdnjs.cloudflare.com/ajax/libs/fetch/3.0.0/fetch.min.js\"\n                integrity=\"sha384-5B8/4F9AQqp/HCHReGLSOWbyAOwnJsPrvx6C0+VPUr44Olzi99zYT1xbVh+ZanQJ\"\n                crossorigin=\"anonymous\"\n        ></script>\n        <script\n                src=\"https://cdnjs.cloudflare.com/ajax/libs/react/16.8.5/umd/react.production.min.js\"\n                integrity=\"sha384-dOCiLz3nZfHiJj//EWxjwSKSC6Z1IJtyIEK/b/xlHVNdVLXDYSesoxiZb94bbuGE\"\n                crossorigin=\"anonymous\"\n        ></script>\n        <script\n                src=\"https://cdnjs.cloudflare.com/ajax/libs/react-dom/16.8.5/umd/react-dom.production.min.js\"\n                integrity=\"sha384-QI+ql5f+khgo3mMdCktQ3E7wUKbIpuQo8S5rA/3i1jg2rMsloCNyiZclI7sFQUGN\"\n                crossorigin=\"anonymous\"\n        ></script>\n        <script\n                src=\"https://cdnjs.cloudflare.com/ajax/libs/graphiql/0.13.0/graphiql.min.js\"\n                integrity=\"sha384-roSmzNmO4zJK9X4lwggDi4/oVy+9V4nlS1+MN8Taj7tftJy1GvMWyAhTNXdC/fFR\"\n                crossorigin=\"anonymous\"\n        ></script>\n\t</head>\n\t<body style=\"width: 100%; height: 100%; margin: 0; overflow: hidden;\">\n\t\t<div id=\"graphiql\" style=\"height: 100vh;\">Loading...</div>\n\t\t<script>\n\t\t\tfunction fetchGQL(params) {\n\t\t\t\treturn fetch(\"/graphql\", {\n\t\t\t\t\tmethod: \"post\",\n\t\t\t\t\tbody: JSON.stringify(params),\n\t\t\t\t\tcredentials: \"include\",\n\t\t\t\t}).then(function (resp) {\n\t\t\t\t\treturn resp.text();\n\t\t\t\t}).then(function (body) {\n\t\t\t\t\ttry {\n\t\t\t\t\t\treturn JSON.parse(body);\n\t\t\t\t\t} catch (error) {\n\t\t\t\t\t\treturn body;\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t}\n\t\t\tReactDOM.render(\n\t\t\t\tReact.createElement(GraphiQL, {fetcher: fetchGQL}),\n\t\t\t\tdocument.getElementById(\"graphiql\")\n\t\t\t)\n\t\t</script>\n\t</body>\n</html>\n`)\n"
  },
  {
    "path": "graphql/graphql.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Package graphql provides a GraphQL interface to Ethereum node data.\npackage graphql\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/eth/filters\"\n\t\"github.com/ethereum/go-ethereum/internal/ethapi\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\nvar (\n\terrBlockInvariant = errors.New(\"block objects must be instantiated with at least one of num or hash\")\n)\n\ntype Long int64\n\n// ImplementsGraphQLType returns true if Long implements the provided GraphQL type.\nfunc (b Long) ImplementsGraphQLType(name string) bool { return name == \"Long\" }\n\n// UnmarshalGraphQL unmarshals the provided GraphQL query data.\nfunc (b *Long) UnmarshalGraphQL(input interface{}) error {\n\tvar err error\n\tswitch input := input.(type) {\n\tcase string:\n\t\t// uncomment to support hex values\n\t\t//if strings.HasPrefix(input, \"0x\") {\n\t\t//\t// apply leniency and support hex representations of longs.\n\t\t//\tvalue, err := hexutil.DecodeUint64(input)\n\t\t//\t*b = Long(value)\n\t\t//\treturn err\n\t\t//} else {\n\t\tvalue, err := strconv.ParseInt(input, 10, 64)\n\t\t*b = Long(value)\n\t\treturn err\n\t\t//}\n\tcase int32:\n\t\t*b = Long(input)\n\tcase int64:\n\t\t*b = Long(input)\n\tdefault:\n\t\terr = fmt.Errorf(\"unexpected type %T for Long\", input)\n\t}\n\treturn err\n}\n\n// Account represents an Ethereum account at a particular block.\ntype Account struct {\n\tbackend       ethapi.Backend\n\taddress       common.Address\n\tblockNrOrHash rpc.BlockNumberOrHash\n}\n\n// getState fetches the StateDB object for an account.\nfunc (a *Account) getState(ctx context.Context) (*state.StateDB, error) {\n\tstate, _, err := a.backend.StateAndHeaderByNumberOrHash(ctx, a.blockNrOrHash)\n\treturn state, err\n}\n\nfunc (a *Account) Address(ctx context.Context) (common.Address, error) {\n\treturn a.address, nil\n}\n\nfunc (a *Account) Balance(ctx context.Context) (hexutil.Big, error) {\n\tstate, err := a.getState(ctx)\n\tif err != nil {\n\t\treturn hexutil.Big{}, err\n\t}\n\treturn hexutil.Big(*state.GetBalance(a.address)), nil\n}\n\nfunc (a *Account) TransactionCount(ctx context.Context) (hexutil.Uint64, error) {\n\tstate, err := a.getState(ctx)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn hexutil.Uint64(state.GetNonce(a.address)), nil\n}\n\nfunc (a *Account) Code(ctx context.Context) (hexutil.Bytes, error) {\n\tstate, err := a.getState(ctx)\n\tif err != nil {\n\t\treturn hexutil.Bytes{}, err\n\t}\n\treturn state.GetCode(a.address), nil\n}\n\nfunc (a *Account) Storage(ctx context.Context, args struct{ Slot common.Hash }) (common.Hash, error) {\n\tstate, err := a.getState(ctx)\n\tif err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\treturn state.GetState(a.address, args.Slot), nil\n}\n\n// Log represents an individual log message. All arguments are mandatory.\ntype Log struct {\n\tbackend     ethapi.Backend\n\ttransaction *Transaction\n\tlog         *types.Log\n}\n\nfunc (l *Log) Transaction(ctx context.Context) *Transaction {\n\treturn l.transaction\n}\n\nfunc (l *Log) Account(ctx context.Context, args BlockNumberArgs) *Account {\n\treturn &Account{\n\t\tbackend:       l.backend,\n\t\taddress:       l.log.Address,\n\t\tblockNrOrHash: args.NumberOrLatest(),\n\t}\n}\n\nfunc (l *Log) Index(ctx context.Context) int32 {\n\treturn int32(l.log.Index)\n}\n\nfunc (l *Log) Topics(ctx context.Context) []common.Hash {\n\treturn l.log.Topics\n}\n\nfunc (l *Log) Data(ctx context.Context) hexutil.Bytes {\n\treturn l.log.Data\n}\n\n// Transaction represents an Ethereum transaction.\n// backend and hash are mandatory; all others will be fetched when required.\ntype Transaction struct {\n\tbackend ethapi.Backend\n\thash    common.Hash\n\ttx      *types.Transaction\n\tblock   *Block\n\tindex   uint64\n}\n\n// resolve returns the internal transaction object, fetching it if needed.\nfunc (t *Transaction) resolve(ctx context.Context) (*types.Transaction, error) {\n\tif t.tx == nil {\n\t\ttx, blockHash, _, index := rawdb.ReadTransaction(t.backend.ChainDb(), t.hash)\n\t\tif tx != nil {\n\t\t\tt.tx = tx\n\t\t\tblockNrOrHash := rpc.BlockNumberOrHashWithHash(blockHash, false)\n\t\t\tt.block = &Block{\n\t\t\t\tbackend:      t.backend,\n\t\t\t\tnumberOrHash: &blockNrOrHash,\n\t\t\t}\n\t\t\tt.index = index\n\t\t} else {\n\t\t\tt.tx = t.backend.GetPoolTransaction(t.hash)\n\t\t}\n\t}\n\treturn t.tx, nil\n}\n\nfunc (t *Transaction) Hash(ctx context.Context) common.Hash {\n\treturn t.hash\n}\n\nfunc (t *Transaction) InputData(ctx context.Context) (hexutil.Bytes, error) {\n\ttx, err := t.resolve(ctx)\n\tif err != nil || tx == nil {\n\t\treturn hexutil.Bytes{}, err\n\t}\n\treturn tx.Data(), nil\n}\n\nfunc (t *Transaction) Gas(ctx context.Context) (hexutil.Uint64, error) {\n\ttx, err := t.resolve(ctx)\n\tif err != nil || tx == nil {\n\t\treturn 0, err\n\t}\n\treturn hexutil.Uint64(tx.Gas()), nil\n}\n\nfunc (t *Transaction) GasPrice(ctx context.Context) (hexutil.Big, error) {\n\ttx, err := t.resolve(ctx)\n\tif err != nil || tx == nil {\n\t\treturn hexutil.Big{}, err\n\t}\n\treturn hexutil.Big(*tx.GasPrice()), nil\n}\n\nfunc (t *Transaction) Value(ctx context.Context) (hexutil.Big, error) {\n\ttx, err := t.resolve(ctx)\n\tif err != nil || tx == nil {\n\t\treturn hexutil.Big{}, err\n\t}\n\treturn hexutil.Big(*tx.Value()), nil\n}\n\nfunc (t *Transaction) Nonce(ctx context.Context) (hexutil.Uint64, error) {\n\ttx, err := t.resolve(ctx)\n\tif err != nil || tx == nil {\n\t\treturn 0, err\n\t}\n\treturn hexutil.Uint64(tx.Nonce()), nil\n}\n\nfunc (t *Transaction) To(ctx context.Context, args BlockNumberArgs) (*Account, error) {\n\ttx, err := t.resolve(ctx)\n\tif err != nil || tx == nil {\n\t\treturn nil, err\n\t}\n\tto := tx.To()\n\tif to == nil {\n\t\treturn nil, nil\n\t}\n\treturn &Account{\n\t\tbackend:       t.backend,\n\t\taddress:       *to,\n\t\tblockNrOrHash: args.NumberOrLatest(),\n\t}, nil\n}\n\nfunc (t *Transaction) From(ctx context.Context, args BlockNumberArgs) (*Account, error) {\n\ttx, err := t.resolve(ctx)\n\tif err != nil || tx == nil {\n\t\treturn nil, err\n\t}\n\tsigner := types.LatestSigner(t.backend.ChainConfig())\n\tfrom, _ := types.Sender(signer, tx)\n\treturn &Account{\n\t\tbackend:       t.backend,\n\t\taddress:       from,\n\t\tblockNrOrHash: args.NumberOrLatest(),\n\t}, nil\n}\n\nfunc (t *Transaction) Block(ctx context.Context) (*Block, error) {\n\tif _, err := t.resolve(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\treturn t.block, nil\n}\n\nfunc (t *Transaction) Index(ctx context.Context) (*int32, error) {\n\tif _, err := t.resolve(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\tif t.block == nil {\n\t\treturn nil, nil\n\t}\n\tindex := int32(t.index)\n\treturn &index, nil\n}\n\n// getReceipt returns the receipt associated with this transaction, if any.\nfunc (t *Transaction) getReceipt(ctx context.Context) (*types.Receipt, error) {\n\tif _, err := t.resolve(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\tif t.block == nil {\n\t\treturn nil, nil\n\t}\n\treceipts, err := t.block.resolveReceipts(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn receipts[t.index], nil\n}\n\nfunc (t *Transaction) Status(ctx context.Context) (*Long, error) {\n\treceipt, err := t.getReceipt(ctx)\n\tif err != nil || receipt == nil {\n\t\treturn nil, err\n\t}\n\tret := Long(receipt.Status)\n\treturn &ret, nil\n}\n\nfunc (t *Transaction) GasUsed(ctx context.Context) (*Long, error) {\n\treceipt, err := t.getReceipt(ctx)\n\tif err != nil || receipt == nil {\n\t\treturn nil, err\n\t}\n\tret := Long(receipt.GasUsed)\n\treturn &ret, nil\n}\n\nfunc (t *Transaction) CumulativeGasUsed(ctx context.Context) (*Long, error) {\n\treceipt, err := t.getReceipt(ctx)\n\tif err != nil || receipt == nil {\n\t\treturn nil, err\n\t}\n\tret := Long(receipt.CumulativeGasUsed)\n\treturn &ret, nil\n}\n\nfunc (t *Transaction) CreatedContract(ctx context.Context, args BlockNumberArgs) (*Account, error) {\n\treceipt, err := t.getReceipt(ctx)\n\tif err != nil || receipt == nil || receipt.ContractAddress == (common.Address{}) {\n\t\treturn nil, err\n\t}\n\treturn &Account{\n\t\tbackend:       t.backend,\n\t\taddress:       receipt.ContractAddress,\n\t\tblockNrOrHash: args.NumberOrLatest(),\n\t}, nil\n}\n\nfunc (t *Transaction) Logs(ctx context.Context) (*[]*Log, error) {\n\treceipt, err := t.getReceipt(ctx)\n\tif err != nil || receipt == nil {\n\t\treturn nil, err\n\t}\n\tret := make([]*Log, 0, len(receipt.Logs))\n\tfor _, log := range receipt.Logs {\n\t\tret = append(ret, &Log{\n\t\t\tbackend:     t.backend,\n\t\t\ttransaction: t,\n\t\t\tlog:         log,\n\t\t})\n\t}\n\treturn &ret, nil\n}\n\nfunc (t *Transaction) R(ctx context.Context) (hexutil.Big, error) {\n\ttx, err := t.resolve(ctx)\n\tif err != nil || tx == nil {\n\t\treturn hexutil.Big{}, err\n\t}\n\t_, r, _ := tx.RawSignatureValues()\n\treturn hexutil.Big(*r), nil\n}\n\nfunc (t *Transaction) S(ctx context.Context) (hexutil.Big, error) {\n\ttx, err := t.resolve(ctx)\n\tif err != nil || tx == nil {\n\t\treturn hexutil.Big{}, err\n\t}\n\t_, _, s := tx.RawSignatureValues()\n\treturn hexutil.Big(*s), nil\n}\n\nfunc (t *Transaction) V(ctx context.Context) (hexutil.Big, error) {\n\ttx, err := t.resolve(ctx)\n\tif err != nil || tx == nil {\n\t\treturn hexutil.Big{}, err\n\t}\n\tv, _, _ := tx.RawSignatureValues()\n\treturn hexutil.Big(*v), nil\n}\n\ntype BlockType int\n\n// Block represents an Ethereum block.\n// backend, and numberOrHash are mandatory. All other fields are lazily fetched\n// when required.\ntype Block struct {\n\tbackend      ethapi.Backend\n\tnumberOrHash *rpc.BlockNumberOrHash\n\thash         common.Hash\n\theader       *types.Header\n\tblock        *types.Block\n\treceipts     []*types.Receipt\n}\n\n// resolve returns the internal Block object representing this block, fetching\n// it if necessary.\nfunc (b *Block) resolve(ctx context.Context) (*types.Block, error) {\n\tif b.block != nil {\n\t\treturn b.block, nil\n\t}\n\tif b.numberOrHash == nil {\n\t\tlatest := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)\n\t\tb.numberOrHash = &latest\n\t}\n\tvar err error\n\tb.block, err = b.backend.BlockByNumberOrHash(ctx, *b.numberOrHash)\n\tif b.block != nil && b.header == nil {\n\t\tb.header = b.block.Header()\n\t\tif hash, ok := b.numberOrHash.Hash(); ok {\n\t\t\tb.hash = hash\n\t\t}\n\t}\n\treturn b.block, err\n}\n\n// resolveHeader returns the internal Header object for this block, fetching it\n// if necessary. Call this function instead of `resolve` unless you need the\n// additional data (transactions and uncles).\nfunc (b *Block) resolveHeader(ctx context.Context) (*types.Header, error) {\n\tif b.numberOrHash == nil && b.hash == (common.Hash{}) {\n\t\treturn nil, errBlockInvariant\n\t}\n\tvar err error\n\tif b.header == nil {\n\t\tif b.hash != (common.Hash{}) {\n\t\t\tb.header, err = b.backend.HeaderByHash(ctx, b.hash)\n\t\t} else {\n\t\t\tb.header, err = b.backend.HeaderByNumberOrHash(ctx, *b.numberOrHash)\n\t\t}\n\t}\n\treturn b.header, err\n}\n\n// resolveReceipts returns the list of receipts for this block, fetching them\n// if necessary.\nfunc (b *Block) resolveReceipts(ctx context.Context) ([]*types.Receipt, error) {\n\tif b.receipts == nil {\n\t\thash := b.hash\n\t\tif hash == (common.Hash{}) {\n\t\t\theader, err := b.resolveHeader(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\thash = header.Hash()\n\t\t}\n\t\treceipts, err := b.backend.GetReceipts(ctx, hash)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tb.receipts = receipts\n\t}\n\treturn b.receipts, nil\n}\n\nfunc (b *Block) Number(ctx context.Context) (Long, error) {\n\theader, err := b.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn Long(header.Number.Uint64()), nil\n}\n\nfunc (b *Block) Hash(ctx context.Context) (common.Hash, error) {\n\tif b.hash == (common.Hash{}) {\n\t\theader, err := b.resolveHeader(ctx)\n\t\tif err != nil {\n\t\t\treturn common.Hash{}, err\n\t\t}\n\t\tb.hash = header.Hash()\n\t}\n\treturn b.hash, nil\n}\n\nfunc (b *Block) GasLimit(ctx context.Context) (Long, error) {\n\theader, err := b.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn Long(header.GasLimit), nil\n}\n\nfunc (b *Block) GasUsed(ctx context.Context) (Long, error) {\n\theader, err := b.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn Long(header.GasUsed), nil\n}\n\nfunc (b *Block) Parent(ctx context.Context) (*Block, error) {\n\t// If the block header hasn't been fetched, and we'll need it, fetch it.\n\tif b.numberOrHash == nil && b.header == nil {\n\t\tif _, err := b.resolveHeader(ctx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif b.header != nil && b.header.Number.Uint64() > 0 {\n\t\tnum := rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(b.header.Number.Uint64() - 1))\n\t\treturn &Block{\n\t\t\tbackend:      b.backend,\n\t\t\tnumberOrHash: &num,\n\t\t\thash:         b.header.ParentHash,\n\t\t}, nil\n\t}\n\treturn nil, nil\n}\n\nfunc (b *Block) Difficulty(ctx context.Context) (hexutil.Big, error) {\n\theader, err := b.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn hexutil.Big{}, err\n\t}\n\treturn hexutil.Big(*header.Difficulty), nil\n}\n\nfunc (b *Block) Timestamp(ctx context.Context) (hexutil.Uint64, error) {\n\theader, err := b.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn hexutil.Uint64(header.Time), nil\n}\n\nfunc (b *Block) Nonce(ctx context.Context) (hexutil.Bytes, error) {\n\theader, err := b.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn hexutil.Bytes{}, err\n\t}\n\treturn header.Nonce[:], nil\n}\n\nfunc (b *Block) MixHash(ctx context.Context) (common.Hash, error) {\n\theader, err := b.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\treturn header.MixDigest, nil\n}\n\nfunc (b *Block) TransactionsRoot(ctx context.Context) (common.Hash, error) {\n\theader, err := b.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\treturn header.TxHash, nil\n}\n\nfunc (b *Block) StateRoot(ctx context.Context) (common.Hash, error) {\n\theader, err := b.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\treturn header.Root, nil\n}\n\nfunc (b *Block) ReceiptsRoot(ctx context.Context) (common.Hash, error) {\n\theader, err := b.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\treturn header.ReceiptHash, nil\n}\n\nfunc (b *Block) OmmerHash(ctx context.Context) (common.Hash, error) {\n\theader, err := b.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\treturn header.UncleHash, nil\n}\n\nfunc (b *Block) OmmerCount(ctx context.Context) (*int32, error) {\n\tblock, err := b.resolve(ctx)\n\tif err != nil || block == nil {\n\t\treturn nil, err\n\t}\n\tcount := int32(len(block.Uncles()))\n\treturn &count, err\n}\n\nfunc (b *Block) Ommers(ctx context.Context) (*[]*Block, error) {\n\tblock, err := b.resolve(ctx)\n\tif err != nil || block == nil {\n\t\treturn nil, err\n\t}\n\tret := make([]*Block, 0, len(block.Uncles()))\n\tfor _, uncle := range block.Uncles() {\n\t\tblockNumberOrHash := rpc.BlockNumberOrHashWithHash(uncle.Hash(), false)\n\t\tret = append(ret, &Block{\n\t\t\tbackend:      b.backend,\n\t\t\tnumberOrHash: &blockNumberOrHash,\n\t\t\theader:       uncle,\n\t\t})\n\t}\n\treturn &ret, nil\n}\n\nfunc (b *Block) ExtraData(ctx context.Context) (hexutil.Bytes, error) {\n\theader, err := b.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn hexutil.Bytes{}, err\n\t}\n\treturn header.Extra, nil\n}\n\nfunc (b *Block) LogsBloom(ctx context.Context) (hexutil.Bytes, error) {\n\theader, err := b.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn hexutil.Bytes{}, err\n\t}\n\treturn header.Bloom.Bytes(), nil\n}\n\nfunc (b *Block) TotalDifficulty(ctx context.Context) (hexutil.Big, error) {\n\th := b.hash\n\tif h == (common.Hash{}) {\n\t\theader, err := b.resolveHeader(ctx)\n\t\tif err != nil {\n\t\t\treturn hexutil.Big{}, err\n\t\t}\n\t\th = header.Hash()\n\t}\n\treturn hexutil.Big(*b.backend.GetTd(ctx, h)), nil\n}\n\n// BlockNumberArgs encapsulates arguments to accessors that specify a block number.\ntype BlockNumberArgs struct {\n\t// TODO: Ideally we could use input unions to allow the query to specify the\n\t// block parameter by hash, block number, or tag but input unions aren't part of the\n\t// standard GraphQL schema SDL yet, see: https://github.com/graphql/graphql-spec/issues/488\n\tBlock *hexutil.Uint64\n}\n\n// NumberOr returns the provided block number argument, or the \"current\" block number or hash if none\n// was provided.\nfunc (a BlockNumberArgs) NumberOr(current rpc.BlockNumberOrHash) rpc.BlockNumberOrHash {\n\tif a.Block != nil {\n\t\tblockNr := rpc.BlockNumber(*a.Block)\n\t\treturn rpc.BlockNumberOrHashWithNumber(blockNr)\n\t}\n\treturn current\n}\n\n// NumberOrLatest returns the provided block number argument, or the \"latest\" block number if none\n// was provided.\nfunc (a BlockNumberArgs) NumberOrLatest() rpc.BlockNumberOrHash {\n\treturn a.NumberOr(rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber))\n}\n\nfunc (b *Block) Miner(ctx context.Context, args BlockNumberArgs) (*Account, error) {\n\theader, err := b.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Account{\n\t\tbackend:       b.backend,\n\t\taddress:       header.Coinbase,\n\t\tblockNrOrHash: args.NumberOrLatest(),\n\t}, nil\n}\n\nfunc (b *Block) TransactionCount(ctx context.Context) (*int32, error) {\n\tblock, err := b.resolve(ctx)\n\tif err != nil || block == nil {\n\t\treturn nil, err\n\t}\n\tcount := int32(len(block.Transactions()))\n\treturn &count, err\n}\n\nfunc (b *Block) Transactions(ctx context.Context) (*[]*Transaction, error) {\n\tblock, err := b.resolve(ctx)\n\tif err != nil || block == nil {\n\t\treturn nil, err\n\t}\n\tret := make([]*Transaction, 0, len(block.Transactions()))\n\tfor i, tx := range block.Transactions() {\n\t\tret = append(ret, &Transaction{\n\t\t\tbackend: b.backend,\n\t\t\thash:    tx.Hash(),\n\t\t\ttx:      tx,\n\t\t\tblock:   b,\n\t\t\tindex:   uint64(i),\n\t\t})\n\t}\n\treturn &ret, nil\n}\n\nfunc (b *Block) TransactionAt(ctx context.Context, args struct{ Index int32 }) (*Transaction, error) {\n\tblock, err := b.resolve(ctx)\n\tif err != nil || block == nil {\n\t\treturn nil, err\n\t}\n\ttxs := block.Transactions()\n\tif args.Index < 0 || int(args.Index) >= len(txs) {\n\t\treturn nil, nil\n\t}\n\ttx := txs[args.Index]\n\treturn &Transaction{\n\t\tbackend: b.backend,\n\t\thash:    tx.Hash(),\n\t\ttx:      tx,\n\t\tblock:   b,\n\t\tindex:   uint64(args.Index),\n\t}, nil\n}\n\nfunc (b *Block) OmmerAt(ctx context.Context, args struct{ Index int32 }) (*Block, error) {\n\tblock, err := b.resolve(ctx)\n\tif err != nil || block == nil {\n\t\treturn nil, err\n\t}\n\tuncles := block.Uncles()\n\tif args.Index < 0 || int(args.Index) >= len(uncles) {\n\t\treturn nil, nil\n\t}\n\tuncle := uncles[args.Index]\n\tblockNumberOrHash := rpc.BlockNumberOrHashWithHash(uncle.Hash(), false)\n\treturn &Block{\n\t\tbackend:      b.backend,\n\t\tnumberOrHash: &blockNumberOrHash,\n\t\theader:       uncle,\n\t}, nil\n}\n\n// BlockFilterCriteria encapsulates criteria passed to a `logs` accessor inside\n// a block.\ntype BlockFilterCriteria struct {\n\tAddresses *[]common.Address // restricts matches to events created by specific contracts\n\n\t// The Topic list restricts matches to particular event topics. Each event has a list\n\t// of topics. Topics matches a prefix of that list. An empty element slice matches any\n\t// topic. Non-empty elements represent an alternative that matches any of the\n\t// contained topics.\n\t//\n\t// Examples:\n\t// {} or nil          matches any topic list\n\t// {{A}}              matches topic A in first position\n\t// {{}, {B}}          matches any topic in first position, B in second position\n\t// {{A}, {B}}         matches topic A in first position, B in second position\n\t// {{A, B}}, {C, D}}  matches topic (A OR B) in first position, (C OR D) in second position\n\tTopics *[][]common.Hash\n}\n\n// runFilter accepts a filter and executes it, returning all its results as\n// `Log` objects.\nfunc runFilter(ctx context.Context, be ethapi.Backend, filter *filters.Filter) ([]*Log, error) {\n\tlogs, err := filter.Logs(ctx)\n\tif err != nil || logs == nil {\n\t\treturn nil, err\n\t}\n\tret := make([]*Log, 0, len(logs))\n\tfor _, log := range logs {\n\t\tret = append(ret, &Log{\n\t\t\tbackend:     be,\n\t\t\ttransaction: &Transaction{backend: be, hash: log.TxHash},\n\t\t\tlog:         log,\n\t\t})\n\t}\n\treturn ret, nil\n}\n\nfunc (b *Block) Logs(ctx context.Context, args struct{ Filter BlockFilterCriteria }) ([]*Log, error) {\n\tvar addresses []common.Address\n\tif args.Filter.Addresses != nil {\n\t\taddresses = *args.Filter.Addresses\n\t}\n\tvar topics [][]common.Hash\n\tif args.Filter.Topics != nil {\n\t\ttopics = *args.Filter.Topics\n\t}\n\thash := b.hash\n\tif hash == (common.Hash{}) {\n\t\theader, err := b.resolveHeader(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thash = header.Hash()\n\t}\n\t// Construct the range filter\n\tfilter := filters.NewBlockFilter(b.backend, hash, addresses, topics)\n\n\t// Run the filter and return all the logs\n\treturn runFilter(ctx, b.backend, filter)\n}\n\nfunc (b *Block) Account(ctx context.Context, args struct {\n\tAddress common.Address\n}) (*Account, error) {\n\tif b.numberOrHash == nil {\n\t\t_, err := b.resolveHeader(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn &Account{\n\t\tbackend:       b.backend,\n\t\taddress:       args.Address,\n\t\tblockNrOrHash: *b.numberOrHash,\n\t}, nil\n}\n\n// CallData encapsulates arguments to `call` or `estimateGas`.\n// All arguments are optional.\ntype CallData struct {\n\tFrom     *common.Address // The Ethereum address the call is from.\n\tTo       *common.Address // The Ethereum address the call is to.\n\tGas      *hexutil.Uint64 // The amount of gas provided for the call.\n\tGasPrice *hexutil.Big    // The price of each unit of gas, in wei.\n\tValue    *hexutil.Big    // The value sent along with the call.\n\tData     *hexutil.Bytes  // Any data sent with the call.\n}\n\n// CallResult encapsulates the result of an invocation of the `call` accessor.\ntype CallResult struct {\n\tdata    hexutil.Bytes // The return data from the call\n\tgasUsed Long          // The amount of gas used\n\tstatus  Long          // The return status of the call - 0 for failure or 1 for success.\n}\n\nfunc (c *CallResult) Data() hexutil.Bytes {\n\treturn c.data\n}\n\nfunc (c *CallResult) GasUsed() Long {\n\treturn c.gasUsed\n}\n\nfunc (c *CallResult) Status() Long {\n\treturn c.status\n}\n\nfunc (b *Block) Call(ctx context.Context, args struct {\n\tData ethapi.CallArgs\n}) (*CallResult, error) {\n\tif b.numberOrHash == nil {\n\t\t_, err := b.resolve(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tresult, err := ethapi.DoCall(ctx, b.backend, args.Data, *b.numberOrHash, nil, vm.Config{}, 5*time.Second, b.backend.RPCGasCap())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstatus := Long(1)\n\tif result.Failed() {\n\t\tstatus = 0\n\t}\n\n\treturn &CallResult{\n\t\tdata:    result.ReturnData,\n\t\tgasUsed: Long(result.UsedGas),\n\t\tstatus:  status,\n\t}, nil\n}\n\nfunc (b *Block) EstimateGas(ctx context.Context, args struct {\n\tData ethapi.CallArgs\n}) (Long, error) {\n\tif b.numberOrHash == nil {\n\t\t_, err := b.resolveHeader(ctx)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\tgas, err := ethapi.DoEstimateGas(ctx, b.backend, args.Data, *b.numberOrHash, b.backend.RPCGasCap())\n\treturn Long(gas), err\n}\n\ntype Pending struct {\n\tbackend ethapi.Backend\n}\n\nfunc (p *Pending) TransactionCount(ctx context.Context) (int32, error) {\n\ttxs, err := p.backend.GetPoolTransactions()\n\treturn int32(len(txs)), err\n}\n\nfunc (p *Pending) Transactions(ctx context.Context) (*[]*Transaction, error) {\n\ttxs, err := p.backend.GetPoolTransactions()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret := make([]*Transaction, 0, len(txs))\n\tfor i, tx := range txs {\n\t\tret = append(ret, &Transaction{\n\t\t\tbackend: p.backend,\n\t\t\thash:    tx.Hash(),\n\t\t\ttx:      tx,\n\t\t\tindex:   uint64(i),\n\t\t})\n\t}\n\treturn &ret, nil\n}\n\nfunc (p *Pending) Account(ctx context.Context, args struct {\n\tAddress common.Address\n}) *Account {\n\tpendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)\n\treturn &Account{\n\t\tbackend:       p.backend,\n\t\taddress:       args.Address,\n\t\tblockNrOrHash: pendingBlockNr,\n\t}\n}\n\nfunc (p *Pending) Call(ctx context.Context, args struct {\n\tData ethapi.CallArgs\n}) (*CallResult, error) {\n\tpendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)\n\tresult, err := ethapi.DoCall(ctx, p.backend, args.Data, pendingBlockNr, nil, vm.Config{}, 5*time.Second, p.backend.RPCGasCap())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstatus := Long(1)\n\tif result.Failed() {\n\t\tstatus = 0\n\t}\n\n\treturn &CallResult{\n\t\tdata:    result.ReturnData,\n\t\tgasUsed: Long(result.UsedGas),\n\t\tstatus:  status,\n\t}, nil\n}\n\nfunc (p *Pending) EstimateGas(ctx context.Context, args struct {\n\tData ethapi.CallArgs\n}) (Long, error) {\n\tpendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber)\n\tgas, err := ethapi.DoEstimateGas(ctx, p.backend, args.Data, pendingBlockNr, p.backend.RPCGasCap())\n\treturn Long(gas), err\n}\n\n// Resolver is the top-level object in the GraphQL hierarchy.\ntype Resolver struct {\n\tbackend ethapi.Backend\n}\n\nfunc (r *Resolver) Block(ctx context.Context, args struct {\n\tNumber *Long\n\tHash   *common.Hash\n}) (*Block, error) {\n\tvar block *Block\n\tif args.Number != nil {\n\t\tif *args.Number < 0 {\n\t\t\treturn nil, nil\n\t\t}\n\t\tnumber := rpc.BlockNumber(*args.Number)\n\t\tnumberOrHash := rpc.BlockNumberOrHashWithNumber(number)\n\t\tblock = &Block{\n\t\t\tbackend:      r.backend,\n\t\t\tnumberOrHash: &numberOrHash,\n\t\t}\n\t} else if args.Hash != nil {\n\t\tnumberOrHash := rpc.BlockNumberOrHashWithHash(*args.Hash, false)\n\t\tblock = &Block{\n\t\t\tbackend:      r.backend,\n\t\t\tnumberOrHash: &numberOrHash,\n\t\t}\n\t} else {\n\t\tnumberOrHash := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber)\n\t\tblock = &Block{\n\t\t\tbackend:      r.backend,\n\t\t\tnumberOrHash: &numberOrHash,\n\t\t}\n\t}\n\t// Resolve the header, return nil if it doesn't exist.\n\t// Note we don't resolve block directly here since it will require an\n\t// additional network request for light client.\n\th, err := block.resolveHeader(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if h == nil {\n\t\treturn nil, nil\n\t}\n\treturn block, nil\n}\n\nfunc (r *Resolver) Blocks(ctx context.Context, args struct {\n\tFrom *Long\n\tTo   *Long\n}) ([]*Block, error) {\n\tfrom := rpc.BlockNumber(*args.From)\n\n\tvar to rpc.BlockNumber\n\tif args.To != nil {\n\t\tto = rpc.BlockNumber(*args.To)\n\t} else {\n\t\tto = rpc.BlockNumber(r.backend.CurrentBlock().Number().Int64())\n\t}\n\tif to < from {\n\t\treturn []*Block{}, nil\n\t}\n\tret := make([]*Block, 0, to-from+1)\n\tfor i := from; i <= to; i++ {\n\t\tnumberOrHash := rpc.BlockNumberOrHashWithNumber(i)\n\t\tret = append(ret, &Block{\n\t\t\tbackend:      r.backend,\n\t\t\tnumberOrHash: &numberOrHash,\n\t\t})\n\t}\n\treturn ret, nil\n}\n\nfunc (r *Resolver) Pending(ctx context.Context) *Pending {\n\treturn &Pending{r.backend}\n}\n\nfunc (r *Resolver) Transaction(ctx context.Context, args struct{ Hash common.Hash }) (*Transaction, error) {\n\ttx := &Transaction{\n\t\tbackend: r.backend,\n\t\thash:    args.Hash,\n\t}\n\t// Resolve the transaction; if it doesn't exist, return nil.\n\tt, err := tx.resolve(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if t == nil {\n\t\treturn nil, nil\n\t}\n\treturn tx, nil\n}\n\nfunc (r *Resolver) SendRawTransaction(ctx context.Context, args struct{ Data hexutil.Bytes }) (common.Hash, error) {\n\ttx := new(types.Transaction)\n\tif err := tx.UnmarshalBinary(args.Data); err != nil {\n\t\treturn common.Hash{}, err\n\t}\n\thash, err := ethapi.SubmitTransaction(ctx, r.backend, tx)\n\treturn hash, err\n}\n\n// FilterCriteria encapsulates the arguments to `logs` on the root resolver object.\ntype FilterCriteria struct {\n\tFromBlock *hexutil.Uint64   // beginning of the queried range, nil means genesis block\n\tToBlock   *hexutil.Uint64   // end of the range, nil means latest block\n\tAddresses *[]common.Address // restricts matches to events created by specific contracts\n\n\t// The Topic list restricts matches to particular event topics. Each event has a list\n\t// of topics. Topics matches a prefix of that list. An empty element slice matches any\n\t// topic. Non-empty elements represent an alternative that matches any of the\n\t// contained topics.\n\t//\n\t// Examples:\n\t// {} or nil          matches any topic list\n\t// {{A}}              matches topic A in first position\n\t// {{}, {B}}          matches any topic in first position, B in second position\n\t// {{A}, {B}}         matches topic A in first position, B in second position\n\t// {{A, B}}, {C, D}}  matches topic (A OR B) in first position, (C OR D) in second position\n\tTopics *[][]common.Hash\n}\n\nfunc (r *Resolver) Logs(ctx context.Context, args struct{ Filter FilterCriteria }) ([]*Log, error) {\n\t// Convert the RPC block numbers into internal representations\n\tbegin := rpc.LatestBlockNumber.Int64()\n\tif args.Filter.FromBlock != nil {\n\t\tbegin = int64(*args.Filter.FromBlock)\n\t}\n\tend := rpc.LatestBlockNumber.Int64()\n\tif args.Filter.ToBlock != nil {\n\t\tend = int64(*args.Filter.ToBlock)\n\t}\n\tvar addresses []common.Address\n\tif args.Filter.Addresses != nil {\n\t\taddresses = *args.Filter.Addresses\n\t}\n\tvar topics [][]common.Hash\n\tif args.Filter.Topics != nil {\n\t\ttopics = *args.Filter.Topics\n\t}\n\t// Construct the range filter\n\tfilter := filters.NewRangeFilter(filters.Backend(r.backend), begin, end, addresses, topics)\n\treturn runFilter(ctx, r.backend, filter)\n}\n\nfunc (r *Resolver) GasPrice(ctx context.Context) (hexutil.Big, error) {\n\tprice, err := r.backend.SuggestPrice(ctx)\n\treturn hexutil.Big(*price), err\n}\n\nfunc (r *Resolver) ChainID(ctx context.Context) (hexutil.Big, error) {\n\treturn hexutil.Big(*r.backend.ChainConfig().ChainID), nil\n}\n\n// SyncState represents the synchronisation status returned from the `syncing` accessor.\ntype SyncState struct {\n\tprogress ethereum.SyncProgress\n}\n\nfunc (s *SyncState) StartingBlock() hexutil.Uint64 {\n\treturn hexutil.Uint64(s.progress.StartingBlock)\n}\n\nfunc (s *SyncState) CurrentBlock() hexutil.Uint64 {\n\treturn hexutil.Uint64(s.progress.CurrentBlock)\n}\n\nfunc (s *SyncState) HighestBlock() hexutil.Uint64 {\n\treturn hexutil.Uint64(s.progress.HighestBlock)\n}\n\nfunc (s *SyncState) PulledStates() *hexutil.Uint64 {\n\tret := hexutil.Uint64(s.progress.PulledStates)\n\treturn &ret\n}\n\nfunc (s *SyncState) KnownStates() *hexutil.Uint64 {\n\tret := hexutil.Uint64(s.progress.KnownStates)\n\treturn &ret\n}\n\n// Syncing returns false in case the node is currently not syncing with the network. It can be up to date or has not\n// yet received the latest block headers from its pears. In case it is synchronizing:\n// - startingBlock: block number this node started to synchronise from\n// - currentBlock:  block number this node is currently importing\n// - highestBlock:  block number of the highest block header this node has received from peers\n// - pulledStates:  number of state entries processed until now\n// - knownStates:   number of known state entries that still need to be pulled\nfunc (r *Resolver) Syncing() (*SyncState, error) {\n\tprogress := r.backend.Downloader().Progress()\n\n\t// Return not syncing if the synchronisation already completed\n\tif progress.CurrentBlock >= progress.HighestBlock {\n\t\treturn nil, nil\n\t}\n\t// Otherwise gather the block sync stats\n\treturn &SyncState{progress}, nil\n}\n"
  },
  {
    "path": "graphql/graphql_test.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage graphql\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"math/big\"\n\t\"net/http\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/eth\"\n\t\"github.com/ethereum/go-ethereum/eth/ethconfig\"\n\t\"github.com/ethereum/go-ethereum/node\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestBuildSchema(t *testing.T) {\n\tddir, err := ioutil.TempDir(\"\", \"graphql-buildschema\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create temporary datadir: %v\", err)\n\t}\n\t// Copy config\n\tconf := node.DefaultConfig\n\tconf.DataDir = ddir\n\tstack, err := node.New(&conf)\n\tif err != nil {\n\t\tt.Fatalf(\"could not create new node: %v\", err)\n\t}\n\t// Make sure the schema can be parsed and matched up to the object model.\n\tif err := newHandler(stack, nil, []string{}, []string{}); err != nil {\n\t\tt.Errorf(\"Could not construct GraphQL handler: %v\", err)\n\t}\n}\n\n// Tests that a graphQL request is successfully handled when graphql is enabled on the specified endpoint\nfunc TestGraphQLBlockSerialization(t *testing.T) {\n\tstack := createNode(t, true)\n\tdefer stack.Close()\n\t// start node\n\tif err := stack.Start(); err != nil {\n\t\tt.Fatalf(\"could not start node: %v\", err)\n\t}\n\n\tfor i, tt := range []struct {\n\t\tbody string\n\t\twant string\n\t\tcode int\n\t}{\n\t\t{ // Should return latest block\n\t\t\tbody: `{\"query\": \"{block{number}}\",\"variables\": null}`,\n\t\t\twant: `{\"data\":{\"block\":{\"number\":10}}}`,\n\t\t\tcode: 200,\n\t\t},\n\t\t{ // Should return info about latest block\n\t\t\tbody: `{\"query\": \"{block{number,gasUsed,gasLimit}}\",\"variables\": null}`,\n\t\t\twant: `{\"data\":{\"block\":{\"number\":10,\"gasUsed\":0,\"gasLimit\":11500000}}}`,\n\t\t\tcode: 200,\n\t\t},\n\t\t{\n\t\t\tbody: `{\"query\": \"{block(number:0){number,gasUsed,gasLimit}}\",\"variables\": null}`,\n\t\t\twant: `{\"data\":{\"block\":{\"number\":0,\"gasUsed\":0,\"gasLimit\":11500000}}}`,\n\t\t\tcode: 200,\n\t\t},\n\t\t{\n\t\t\tbody: `{\"query\": \"{block(number:-1){number,gasUsed,gasLimit}}\",\"variables\": null}`,\n\t\t\twant: `{\"data\":{\"block\":null}}`,\n\t\t\tcode: 200,\n\t\t},\n\t\t{\n\t\t\tbody: `{\"query\": \"{block(number:-500){number,gasUsed,gasLimit}}\",\"variables\": null}`,\n\t\t\twant: `{\"data\":{\"block\":null}}`,\n\t\t\tcode: 200,\n\t\t},\n\t\t{\n\t\t\tbody: `{\"query\": \"{block(number:\\\"0\\\"){number,gasUsed,gasLimit}}\",\"variables\": null}`,\n\t\t\twant: `{\"data\":{\"block\":{\"number\":0,\"gasUsed\":0,\"gasLimit\":11500000}}}`,\n\t\t\tcode: 200,\n\t\t},\n\t\t{\n\t\t\tbody: `{\"query\": \"{block(number:\\\"-33\\\"){number,gasUsed,gasLimit}}\",\"variables\": null}`,\n\t\t\twant: `{\"data\":{\"block\":null}}`,\n\t\t\tcode: 200,\n\t\t},\n\t\t{\n\t\t\tbody: `{\"query\": \"{block(number:\\\"1337\\\"){number,gasUsed,gasLimit}}\",\"variables\": null}`,\n\t\t\twant: `{\"data\":{\"block\":null}}`,\n\t\t\tcode: 200,\n\t\t},\n\t\t{\n\t\t\tbody: `{\"query\": \"{block(number:\\\"0xbad\\\"){number,gasUsed,gasLimit}}\",\"variables\": null}`,\n\t\t\twant: `{\"errors\":[{\"message\":\"strconv.ParseInt: parsing \\\"0xbad\\\": invalid syntax\"}],\"data\":{}}`,\n\t\t\tcode: 400,\n\t\t},\n\t\t{ // hex strings are currently not supported. If that's added to the spec, this test will need to change\n\t\t\tbody: `{\"query\": \"{block(number:\\\"0x0\\\"){number,gasUsed,gasLimit}}\",\"variables\": null}`,\n\t\t\twant: `{\"errors\":[{\"message\":\"strconv.ParseInt: parsing \\\"0x0\\\": invalid syntax\"}],\"data\":{}}`,\n\t\t\tcode: 400,\n\t\t},\n\t\t{\n\t\t\tbody: `{\"query\": \"{block(number:\\\"a\\\"){number,gasUsed,gasLimit}}\",\"variables\": null}`,\n\t\t\twant: `{\"errors\":[{\"message\":\"strconv.ParseInt: parsing \\\"a\\\": invalid syntax\"}],\"data\":{}}`,\n\t\t\tcode: 400,\n\t\t},\n\t\t{\n\t\t\tbody: `{\"query\": \"{bleh{number}}\",\"variables\": null}\"`,\n\t\t\twant: `{\"errors\":[{\"message\":\"Cannot query field \\\"bleh\\\" on type \\\"Query\\\".\",\"locations\":[{\"line\":1,\"column\":2}]}]}`,\n\t\t\tcode: 400,\n\t\t},\n\t\t// should return `estimateGas` as decimal\n\t\t{\n\t\t\tbody: `{\"query\": \"{block{ estimateGas(data:{}) }}\"}`,\n\t\t\twant: `{\"data\":{\"block\":{\"estimateGas\":53000}}}`,\n\t\t\tcode: 200,\n\t\t},\n\t\t// should return `status` as decimal\n\t\t{\n\t\t\tbody: `{\"query\": \"{block {number call (data : {from : \\\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\\\", to: \\\"0x6295ee1b4f6dd65047762f924ecd367c17eabf8f\\\", data :\\\"0x12a7b914\\\"}){data status}}}\"}`,\n\t\t\twant: `{\"data\":{\"block\":{\"number\":10,\"call\":{\"data\":\"0x\",\"status\":1}}}}`,\n\t\t\tcode: 200,\n\t\t},\n\t} {\n\t\tresp, err := http.Post(fmt.Sprintf(\"%s/graphql\", stack.HTTPEndpoint()), \"application/json\", strings.NewReader(tt.body))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"could not post: %v\", err)\n\t\t}\n\t\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"could not read from response body: %v\", err)\n\t\t}\n\t\tif have := string(bodyBytes); have != tt.want {\n\t\t\tt.Errorf(\"testcase %d %s,\\nhave:\\n%v\\nwant:\\n%v\", i, tt.body, have, tt.want)\n\t\t}\n\t\tif tt.code != resp.StatusCode {\n\t\t\tt.Errorf(\"testcase %d %s,\\nwrong statuscode, have: %v, want: %v\", i, tt.body, resp.StatusCode, tt.code)\n\t\t}\n\t}\n}\n\n// Tests that a graphQL request is not handled successfully when graphql is not enabled on the specified endpoint\nfunc TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) {\n\tstack := createNode(t, false)\n\tdefer stack.Close()\n\tif err := stack.Start(); err != nil {\n\t\tt.Fatalf(\"could not start node: %v\", err)\n\t}\n\tbody := strings.NewReader(`{\"query\": \"{block{number}}\",\"variables\": null}`)\n\tresp, err := http.Post(fmt.Sprintf(\"%s/graphql\", stack.HTTPEndpoint()), \"application/json\", body)\n\tif err != nil {\n\t\tt.Fatalf(\"could not post: %v\", err)\n\t}\n\t// make sure the request is not handled successfully\n\tassert.Equal(t, http.StatusNotFound, resp.StatusCode)\n}\n\nfunc createNode(t *testing.T, gqlEnabled bool) *node.Node {\n\tstack, err := node.New(&node.Config{\n\t\tHTTPHost: \"127.0.0.1\",\n\t\tHTTPPort: 0,\n\t\tWSHost:   \"127.0.0.1\",\n\t\tWSPort:   0,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"could not create node: %v\", err)\n\t}\n\tif !gqlEnabled {\n\t\treturn stack\n\t}\n\tcreateGQLService(t, stack)\n\treturn stack\n}\n\nfunc createGQLService(t *testing.T, stack *node.Node) {\n\t// create backend\n\tethConf := &ethconfig.Config{\n\t\tGenesis: &core.Genesis{\n\t\t\tConfig:     params.AllEthashProtocolChanges,\n\t\t\tGasLimit:   11500000,\n\t\t\tDifficulty: big.NewInt(1048576),\n\t\t},\n\t\tEthash: ethash.Config{\n\t\t\tPowMode: ethash.ModeFake,\n\t\t},\n\t\tNetworkId:               1337,\n\t\tTrieCleanCache:          5,\n\t\tTrieCleanCacheJournal:   \"triecache\",\n\t\tTrieCleanCacheRejournal: 60 * time.Minute,\n\t\tTrieDirtyCache:          5,\n\t\tTrieTimeout:             60 * time.Minute,\n\t\tSnapshotCache:           5,\n\t}\n\tethBackend, err := eth.New(stack, ethConf)\n\tif err != nil {\n\t\tt.Fatalf(\"could not create eth backend: %v\", err)\n\t}\n\t// Create some blocks and import them\n\tchain, _ := core.GenerateChain(params.AllEthashProtocolChanges, ethBackend.BlockChain().Genesis(),\n\t\tethash.NewFaker(), ethBackend.ChainDb(), 10, func(i int, gen *core.BlockGen) {})\n\t_, err = ethBackend.BlockChain().InsertChain(chain)\n\tif err != nil {\n\t\tt.Fatalf(\"could not create import blocks: %v\", err)\n\t}\n\t// create gql service\n\terr = New(stack, ethBackend.APIBackend, []string{}, []string{})\n\tif err != nil {\n\t\tt.Fatalf(\"could not create graphql service: %v\", err)\n\t}\n}\n"
  },
  {
    "path": "graphql/schema.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage graphql\n\nconst schema string = `\n    # Bytes32 is a 32 byte binary string, represented as 0x-prefixed hexadecimal.\n    scalar Bytes32\n    # Address is a 20 byte Ethereum address, represented as 0x-prefixed hexadecimal.\n    scalar Address\n    # Bytes is an arbitrary length binary string, represented as 0x-prefixed hexadecimal.\n    # An empty byte string is represented as '0x'. Byte strings must have an even number of hexadecimal nybbles.\n    scalar Bytes\n    # BigInt is a large integer. Input is accepted as either a JSON number or as a string.\n    # Strings may be either decimal or 0x-prefixed hexadecimal. Output values are all\n    # 0x-prefixed hexadecimal.\n    scalar BigInt\n    # Long is a 64 bit unsigned integer.\n    scalar Long\n\n    schema {\n        query: Query\n        mutation: Mutation\n    }\n\n    # Account is an Ethereum account at a particular block.\n    type Account {\n        # Address is the address owning the account.\n        address: Address!\n        # Balance is the balance of the account, in wei.\n        balance: BigInt!\n        # TransactionCount is the number of transactions sent from this account,\n        # or in the case of a contract, the number of contracts created. Otherwise\n        # known as the nonce.\n        transactionCount: Long!\n        # Code contains the smart contract code for this account, if the account\n        # is a (non-self-destructed) contract.\n        code: Bytes!\n        # Storage provides access to the storage of a contract account, indexed\n        # by its 32 byte slot identifier.\n        storage(slot: Bytes32!): Bytes32!\n    }\n\n    # Log is an Ethereum event log.\n    type Log {\n        # Index is the index of this log in the block.\n        index: Int!\n        # Account is the account which generated this log - this will always\n        # be a contract account.\n        account(block: Long): Account!\n        # Topics is a list of 0-4 indexed topics for the log.\n        topics: [Bytes32!]!\n        # Data is unindexed data for this log.\n        data: Bytes!\n        # Transaction is the transaction that generated this log entry.\n        transaction: Transaction!\n    }\n\n    # Transaction is an Ethereum transaction.\n    type Transaction {\n        # Hash is the hash of this transaction.\n        hash: Bytes32!\n        # Nonce is the nonce of the account this transaction was generated with.\n        nonce: Long!\n        # Index is the index of this transaction in the parent block. This will\n        # be null if the transaction has not yet been mined.\n        index: Int\n        # From is the account that sent this transaction - this will always be\n        # an externally owned account.\n        from(block: Long): Account!\n        # To is the account the transaction was sent to. This is null for\n        # contract-creating transactions.\n        to(block: Long): Account\n        # Value is the value, in wei, sent along with this transaction.\n        value: BigInt!\n        # GasPrice is the price offered to miners for gas, in wei per unit.\n        gasPrice: BigInt!\n        # Gas is the maximum amount of gas this transaction can consume.\n        gas: Long!\n        # InputData is the data supplied to the target of the transaction.\n        inputData: Bytes!\n        # Block is the block this transaction was mined in. This will be null if\n        # the transaction has not yet been mined.\n        block: Block\n\n        # Status is the return status of the transaction. This will be 1 if the\n        # transaction succeeded, or 0 if it failed (due to a revert, or due to\n        # running out of gas). If the transaction has not yet been mined, this\n        # field will be null.\n        status: Long\n        # GasUsed is the amount of gas that was used processing this transaction.\n        # If the transaction has not yet been mined, this field will be null.\n        gasUsed: Long\n        # CumulativeGasUsed is the total gas used in the block up to and including\n        # this transaction. If the transaction has not yet been mined, this field\n        # will be null.\n        cumulativeGasUsed: Long\n        # CreatedContract is the account that was created by a contract creation\n        # transaction. If the transaction was not a contract creation transaction,\n        # or it has not yet been mined, this field will be null.\n        createdContract(block: Long): Account\n        # Logs is a list of log entries emitted by this transaction. If the\n        # transaction has not yet been mined, this field will be null.\n        logs: [Log!]\n        r: BigInt!\n        s: BigInt!\n        v: BigInt!\n    }\n\n    # BlockFilterCriteria encapsulates log filter criteria for a filter applied\n    # to a single block.\n    input BlockFilterCriteria {\n        # Addresses is list of addresses that are of interest. If this list is\n        # empty, results will not be filtered by address.\n        addresses: [Address!]\n        # Topics list restricts matches to particular event topics. Each event has a list\n      # of topics. Topics matches a prefix of that list. An empty element array matches any\n      # topic. Non-empty elements represent an alternative that matches any of the\n      # contained topics.\n      #\n      # Examples:\n      #  - [] or nil          matches any topic list\n      #  - [[A]]              matches topic A in first position\n      #  - [[], [B]]          matches any topic in first position, B in second position\n      #  - [[A], [B]]         matches topic A in first position, B in second position\n      #  - [[A, B]], [C, D]]  matches topic (A OR B) in first position, (C OR D) in second position\n        topics: [[Bytes32!]!]\n    }\n\n    # Block is an Ethereum block.\n    type Block {\n        # Number is the number of this block, starting at 0 for the genesis block.\n        number: Long!\n        # Hash is the block hash of this block.\n        hash: Bytes32!\n        # Parent is the parent block of this block.\n        parent: Block\n        # Nonce is the block nonce, an 8 byte sequence determined by the miner.\n        nonce: Bytes!\n        # TransactionsRoot is the keccak256 hash of the root of the trie of transactions in this block.\n        transactionsRoot: Bytes32!\n        # TransactionCount is the number of transactions in this block. if\n        # transactions are not available for this block, this field will be null.\n        transactionCount: Int\n        # StateRoot is the keccak256 hash of the state trie after this block was processed.\n        stateRoot: Bytes32!\n        # ReceiptsRoot is the keccak256 hash of the trie of transaction receipts in this block.\n        receiptsRoot: Bytes32!\n        # Miner is the account that mined this block.\n        miner(block: Long): Account!\n        # ExtraData is an arbitrary data field supplied by the miner.\n        extraData: Bytes!\n        # GasLimit is the maximum amount of gas that was available to transactions in this block.\n        gasLimit: Long!\n        # GasUsed is the amount of gas that was used executing transactions in this block.\n        gasUsed: Long!\n        # Timestamp is the unix timestamp at which this block was mined.\n        timestamp: Long!\n        # LogsBloom is a bloom filter that can be used to check if a block may\n        # contain log entries matching a filter.\n        logsBloom: Bytes!\n        # MixHash is the hash that was used as an input to the PoW process.\n        mixHash: Bytes32!\n        # Difficulty is a measure of the difficulty of mining this block.\n        difficulty: BigInt!\n        # TotalDifficulty is the sum of all difficulty values up to and including\n        # this block.\n        totalDifficulty: BigInt!\n        # OmmerCount is the number of ommers (AKA uncles) associated with this\n        # block. If ommers are unavailable, this field will be null.\n        ommerCount: Int\n        # Ommers is a list of ommer (AKA uncle) blocks associated with this block.\n        # If ommers are unavailable, this field will be null. Depending on your\n        # node, the transactions, transactionAt, transactionCount, ommers,\n        # ommerCount and ommerAt fields may not be available on any ommer blocks.\n        ommers: [Block]\n        # OmmerAt returns the ommer (AKA uncle) at the specified index. If ommers\n        # are unavailable, or the index is out of bounds, this field will be null.\n        ommerAt(index: Int!): Block\n        # OmmerHash is the keccak256 hash of all the ommers (AKA uncles)\n        # associated with this block.\n        ommerHash: Bytes32!\n        # Transactions is a list of transactions associated with this block. If\n        # transactions are unavailable for this block, this field will be null.\n        transactions: [Transaction!]\n        # TransactionAt returns the transaction at the specified index. If\n        # transactions are unavailable for this block, or if the index is out of\n        # bounds, this field will be null.\n        transactionAt(index: Int!): Transaction\n        # Logs returns a filtered set of logs from this block.\n        logs(filter: BlockFilterCriteria!): [Log!]!\n        # Account fetches an Ethereum account at the current block's state.\n        account(address: Address!): Account!\n        # Call executes a local call operation at the current block's state.\n        call(data: CallData!): CallResult\n        # EstimateGas estimates the amount of gas that will be required for\n        # successful execution of a transaction at the current block's state.\n        estimateGas(data: CallData!): Long!\n    }\n\n    # CallData represents the data associated with a local contract call.\n    # All fields are optional.\n    input CallData {\n        # From is the address making the call.\n        from: Address\n        # To is the address the call is sent to.\n        to: Address\n        # Gas is the amount of gas sent with the call.\n        gas: Long\n        # GasPrice is the price, in wei, offered for each unit of gas.\n        gasPrice: BigInt\n        # Value is the value, in wei, sent along with the call.\n        value: BigInt\n        # Data is the data sent to the callee.\n        data: Bytes\n    }\n\n    # CallResult is the result of a local call operation.\n    type CallResult {\n        # Data is the return data of the called contract.\n        data: Bytes!\n        # GasUsed is the amount of gas used by the call, after any refunds.\n        gasUsed: Long!\n        # Status is the result of the call - 1 for success or 0 for failure.\n        status: Long!\n    }\n\n    # FilterCriteria encapsulates log filter criteria for searching log entries.\n    input FilterCriteria {\n        # FromBlock is the block at which to start searching, inclusive. Defaults\n        # to the latest block if not supplied.\n        fromBlock: Long\n        # ToBlock is the block at which to stop searching, inclusive. Defaults\n        # to the latest block if not supplied.\n        toBlock: Long\n        # Addresses is a list of addresses that are of interest. If this list is\n        # empty, results will not be filtered by address.\n        addresses: [Address!]\n        # Topics list restricts matches to particular event topics. Each event has a list\n      # of topics. Topics matches a prefix of that list. An empty element array matches any\n      # topic. Non-empty elements represent an alternative that matches any of the\n      # contained topics.\n      #\n      # Examples:\n      #  - [] or nil          matches any topic list\n      #  - [[A]]              matches topic A in first position\n      #  - [[], [B]]          matches any topic in first position, B in second position\n      #  - [[A], [B]]         matches topic A in first position, B in second position\n      #  - [[A, B]], [C, D]]  matches topic (A OR B) in first position, (C OR D) in second position\n        topics: [[Bytes32!]!]\n    }\n\n    # SyncState contains the current synchronisation state of the client.\n    type SyncState{\n        # StartingBlock is the block number at which synchronisation started.\n        startingBlock: Long!\n        # CurrentBlock is the point at which synchronisation has presently reached.\n        currentBlock: Long!\n        # HighestBlock is the latest known block number.\n        highestBlock: Long!\n        # PulledStates is the number of state entries fetched so far, or null\n        # if this is not known or not relevant.\n        pulledStates: Long\n        # KnownStates is the number of states the node knows of so far, or null\n        # if this is not known or not relevant.\n        knownStates: Long\n    }\n\n    # Pending represents the current pending state.\n    type Pending {\n      # TransactionCount is the number of transactions in the pending state.\n      transactionCount: Int!\n      # Transactions is a list of transactions in the current pending state.\n      transactions: [Transaction!]\n      # Account fetches an Ethereum account for the pending state.\n      account(address: Address!): Account!\n      # Call executes a local call operation for the pending state.\n      call(data: CallData!): CallResult\n      # EstimateGas estimates the amount of gas that will be required for\n      # successful execution of a transaction for the pending state.\n      estimateGas(data: CallData!): Long!\n    }\n\n    type Query {\n        # Block fetches an Ethereum block by number or by hash. If neither is\n        # supplied, the most recent known block is returned.\n        block(number: Long, hash: Bytes32): Block\n        # Blocks returns all the blocks between two numbers, inclusive. If\n        # to is not supplied, it defaults to the most recent known block.\n        blocks(from: Long, to: Long): [Block!]!\n        # Pending returns the current pending state.\n        pending: Pending!\n        # Transaction returns a transaction specified by its hash.\n        transaction(hash: Bytes32!): Transaction\n        # Logs returns log entries matching the provided filter.\n        logs(filter: FilterCriteria!): [Log!]!\n        # GasPrice returns the node's estimate of a gas price sufficient to\n        # ensure a transaction is mined in a timely fashion.\n        gasPrice: BigInt!\n        # Syncing returns information on the current synchronisation state.\n        syncing: SyncState\n        # ChainID returns the current chain ID for transaction replay protection.\n        chainID: BigInt!\n    }\n\n    type Mutation {\n        # SendRawTransaction sends an RLP-encoded transaction to the network.\n        sendRawTransaction(data: Bytes!): Bytes32!\n    }\n`\n"
  },
  {
    "path": "graphql/service.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage graphql\n\nimport (\n\t\"encoding/json\"\n\t\"net/http\"\n\n\t\"github.com/ethereum/go-ethereum/internal/ethapi\"\n\t\"github.com/ethereum/go-ethereum/node\"\n\t\"github.com/graph-gophers/graphql-go\"\n)\n\ntype handler struct {\n\tSchema *graphql.Schema\n}\n\nfunc (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tvar params struct {\n\t\tQuery         string                 `json:\"query\"`\n\t\tOperationName string                 `json:\"operationName\"`\n\t\tVariables     map[string]interface{} `json:\"variables\"`\n\t}\n\tif err := json.NewDecoder(r.Body).Decode(&params); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tresponse := h.Schema.Exec(r.Context(), params.Query, params.OperationName, params.Variables)\n\tresponseJSON, err := json.Marshal(response)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif len(response.Errors) > 0 {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(responseJSON)\n\n}\n\n// New constructs a new GraphQL service instance.\nfunc New(stack *node.Node, backend ethapi.Backend, cors, vhosts []string) error {\n\tif backend == nil {\n\t\tpanic(\"missing backend\")\n\t}\n\t// check if http server with given endpoint exists and enable graphQL on it\n\treturn newHandler(stack, backend, cors, vhosts)\n}\n\n// newHandler returns a new `http.Handler` that will answer GraphQL queries.\n// It additionally exports an interactive query browser on the / endpoint.\nfunc newHandler(stack *node.Node, backend ethapi.Backend, cors, vhosts []string) error {\n\tq := Resolver{backend}\n\n\ts, err := graphql.ParseSchema(schema, &q)\n\tif err != nil {\n\t\treturn err\n\t}\n\th := handler{Schema: s}\n\thandler := node.NewHTTPHandlerStack(h, cors, vhosts)\n\n\tstack.RegisterHandler(\"GraphQL UI\", \"/graphql/ui\", GraphiQL{})\n\tstack.RegisterHandler(\"GraphQL\", \"/graphql\", handler)\n\tstack.RegisterHandler(\"GraphQL\", \"/graphql/\", handler)\n\n\treturn nil\n}\n"
  },
  {
    "path": "issue_template.md",
    "content": "### Description\n\n### Acceptance criteria\n\n### Risks + pitfalls\n\n### Where to begin\n"
  },
  {
    "path": "les/api.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\tvfs \"github.com/ethereum/go-ethereum/les/vflux/server\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n)\n\nvar (\n\terrNoCheckpoint         = errors.New(\"no local checkpoint provided\")\n\terrNotActivated         = errors.New(\"checkpoint registrar is not activated\")\n\terrUnknownBenchmarkType = errors.New(\"unknown benchmark type\")\n\terrNoPriority           = errors.New(\"priority too low to raise capacity\")\n)\n\n// PrivateLightServerAPI provides an API to access the LES light server.\ntype PrivateLightServerAPI struct {\n\tserver                               *LesServer\n\tdefaultPosFactors, defaultNegFactors vfs.PriceFactors\n}\n\n// NewPrivateLightServerAPI creates a new LES light server API.\nfunc NewPrivateLightServerAPI(server *LesServer) *PrivateLightServerAPI {\n\treturn &PrivateLightServerAPI{\n\t\tserver:            server,\n\t\tdefaultPosFactors: server.clientPool.defaultPosFactors,\n\t\tdefaultNegFactors: server.clientPool.defaultNegFactors,\n\t}\n}\n\n// parseNode parses either an enode address a raw hex node id\nfunc parseNode(node string) (enode.ID, error) {\n\tif id, err := enode.ParseID(node); err == nil {\n\t\treturn id, nil\n\t}\n\tif node, err := enode.Parse(enode.ValidSchemes, node); err == nil {\n\t\treturn node.ID(), nil\n\t} else {\n\t\treturn enode.ID{}, err\n\t}\n}\n\n// ServerInfo returns global server parameters\nfunc (api *PrivateLightServerAPI) ServerInfo() map[string]interface{} {\n\tres := make(map[string]interface{})\n\tres[\"minimumCapacity\"] = api.server.minCapacity\n\tres[\"maximumCapacity\"] = api.server.maxCapacity\n\tres[\"totalCapacity\"], res[\"totalConnectedCapacity\"], res[\"priorityConnectedCapacity\"] = api.server.clientPool.capacityInfo()\n\treturn res\n}\n\n// ClientInfo returns information about clients listed in the ids list or matching the given tags\nfunc (api *PrivateLightServerAPI) ClientInfo(nodes []string) map[enode.ID]map[string]interface{} {\n\tvar ids []enode.ID\n\tfor _, node := range nodes {\n\t\tif id, err := parseNode(node); err == nil {\n\t\t\tids = append(ids, id)\n\t\t}\n\t}\n\n\tres := make(map[enode.ID]map[string]interface{})\n\tapi.server.clientPool.forClients(ids, func(client *clientInfo) {\n\t\tres[client.node.ID()] = api.clientInfo(client)\n\t})\n\treturn res\n}\n\n// PriorityClientInfo returns information about clients with a positive balance\n// in the given ID range (stop excluded). If stop is null then the iterator stops\n// only at the end of the ID space. MaxCount limits the number of results returned.\n// If maxCount limit is applied but there are more potential results then the ID\n// of the next potential result is included in the map with an empty structure\n// assigned to it.\nfunc (api *PrivateLightServerAPI) PriorityClientInfo(start, stop enode.ID, maxCount int) map[enode.ID]map[string]interface{} {\n\tres := make(map[enode.ID]map[string]interface{})\n\tids := api.server.clientPool.bt.GetPosBalanceIDs(start, stop, maxCount+1)\n\tif len(ids) > maxCount {\n\t\tres[ids[maxCount]] = make(map[string]interface{})\n\t\tids = ids[:maxCount]\n\t}\n\tif len(ids) != 0 {\n\t\tapi.server.clientPool.forClients(ids, func(client *clientInfo) {\n\t\t\tres[client.node.ID()] = api.clientInfo(client)\n\t\t})\n\t}\n\treturn res\n}\n\n// clientInfo creates a client info data structure\nfunc (api *PrivateLightServerAPI) clientInfo(c *clientInfo) map[string]interface{} {\n\tinfo := make(map[string]interface{})\n\tpb, nb := c.balance.GetBalance()\n\tinfo[\"isConnected\"] = c.connected\n\tinfo[\"pricing/balance\"] = pb\n\tinfo[\"priority\"] = pb != 0\n\t//\t\tcb := api.server.clientPool.ndb.getCurrencyBalance(id)\n\t//\t\tinfo[\"pricing/currency\"] = cb.amount\n\tif c.connected {\n\t\tinfo[\"connectionTime\"] = float64(mclock.Now()-c.connectedAt) / float64(time.Second)\n\t\tinfo[\"capacity\"], _ = api.server.clientPool.ns.GetField(c.node, priorityPoolSetup.CapacityField).(uint64)\n\t\tinfo[\"pricing/negBalance\"] = nb\n\t}\n\treturn info\n}\n\n// setParams either sets the given parameters for a single connected client (if specified)\n// or the default parameters applicable to clients connected in the future\nfunc (api *PrivateLightServerAPI) setParams(params map[string]interface{}, client *clientInfo, posFactors, negFactors *vfs.PriceFactors) (updateFactors bool, err error) {\n\tdefParams := client == nil\n\tfor name, value := range params {\n\t\terrValue := func() error {\n\t\t\treturn fmt.Errorf(\"invalid value for parameter '%s'\", name)\n\t\t}\n\t\tsetFactor := func(v *float64) {\n\t\t\tif val, ok := value.(float64); ok && val >= 0 {\n\t\t\t\t*v = val / float64(time.Second)\n\t\t\t\tupdateFactors = true\n\t\t\t} else {\n\t\t\t\terr = errValue()\n\t\t\t}\n\t\t}\n\n\t\tswitch {\n\t\tcase name == \"pricing/timeFactor\":\n\t\t\tsetFactor(&posFactors.TimeFactor)\n\t\tcase name == \"pricing/capacityFactor\":\n\t\t\tsetFactor(&posFactors.CapacityFactor)\n\t\tcase name == \"pricing/requestCostFactor\":\n\t\t\tsetFactor(&posFactors.RequestFactor)\n\t\tcase name == \"pricing/negative/timeFactor\":\n\t\t\tsetFactor(&negFactors.TimeFactor)\n\t\tcase name == \"pricing/negative/capacityFactor\":\n\t\t\tsetFactor(&negFactors.CapacityFactor)\n\t\tcase name == \"pricing/negative/requestCostFactor\":\n\t\t\tsetFactor(&negFactors.RequestFactor)\n\t\tcase !defParams && name == \"capacity\":\n\t\t\tif capacity, ok := value.(float64); ok && uint64(capacity) >= api.server.minCapacity {\n\t\t\t\t_, err = api.server.clientPool.setCapacity(client.node, client.address, uint64(capacity), 0, true)\n\t\t\t\t// Don't have to call factor update explicitly. It's already done\n\t\t\t\t// in setCapacity function.\n\t\t\t} else {\n\t\t\t\terr = errValue()\n\t\t\t}\n\t\tdefault:\n\t\t\tif defParams {\n\t\t\t\terr = fmt.Errorf(\"invalid default parameter '%s'\", name)\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"invalid client parameter '%s'\", name)\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n// SetClientParams sets client parameters for all clients listed in the ids list\n// or all connected clients if the list is empty\nfunc (api *PrivateLightServerAPI) SetClientParams(nodes []string, params map[string]interface{}) error {\n\tvar (\n\t\tids []enode.ID\n\t\terr error\n\t)\n\tfor _, node := range nodes {\n\t\tif id, err := parseNode(node); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tids = append(ids, id)\n\t\t}\n\t}\n\tapi.server.clientPool.forClients(ids, func(client *clientInfo) {\n\t\tif client.connected {\n\t\t\tposFactors, negFactors := client.balance.GetPriceFactors()\n\t\t\tupdate, e := api.setParams(params, client, &posFactors, &negFactors)\n\t\t\tif update {\n\t\t\t\tclient.balance.SetPriceFactors(posFactors, negFactors)\n\t\t\t}\n\t\t\tif e != nil {\n\t\t\t\terr = e\n\t\t\t}\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"client %064x is not connected\", client.node.ID())\n\t\t}\n\t})\n\treturn err\n}\n\n// SetDefaultParams sets the default parameters applicable to clients connected in the future\nfunc (api *PrivateLightServerAPI) SetDefaultParams(params map[string]interface{}) error {\n\tupdate, err := api.setParams(params, nil, &api.defaultPosFactors, &api.defaultNegFactors)\n\tif update {\n\t\tapi.server.clientPool.setDefaultFactors(api.defaultPosFactors, api.defaultNegFactors)\n\t}\n\treturn err\n}\n\n// SetConnectedBias set the connection bias, which is applied to already connected clients\n// So that already connected client won't be kicked out very soon and we can ensure all\n// connected clients can have enough time to request or sync some data.\n// When the input parameter `bias` < 0 (illegal), return error.\nfunc (api *PrivateLightServerAPI) SetConnectedBias(bias time.Duration) error {\n\tif bias < time.Duration(0) {\n\t\treturn fmt.Errorf(\"bias illegal: %v less than 0\", bias)\n\t}\n\tapi.server.clientPool.setConnectedBias(bias)\n\treturn nil\n}\n\n// AddBalance adds the given amount to the balance of a client if possible and returns\n// the balance before and after the operation\nfunc (api *PrivateLightServerAPI) AddBalance(node string, amount int64) (balance [2]uint64, err error) {\n\tvar id enode.ID\n\tif id, err = parseNode(node); err != nil {\n\t\treturn\n\t}\n\tapi.server.clientPool.forClients([]enode.ID{id}, func(c *clientInfo) {\n\t\tbalance[0], balance[1], err = c.balance.AddBalance(amount)\n\t})\n\treturn\n}\n\n// Benchmark runs a request performance benchmark with a given set of measurement setups\n// in multiple passes specified by passCount. The measurement time for each setup in each\n// pass is specified in milliseconds by length.\n//\n// Note: measurement time is adjusted for each pass depending on the previous ones.\n// Therefore a controlled total measurement time is achievable in multiple passes.\nfunc (api *PrivateLightServerAPI) Benchmark(setups []map[string]interface{}, passCount, length int) ([]map[string]interface{}, error) {\n\tbenchmarks := make([]requestBenchmark, len(setups))\n\tfor i, setup := range setups {\n\t\tif t, ok := setup[\"type\"].(string); ok {\n\t\t\tgetInt := func(field string, def int) int {\n\t\t\t\tif value, ok := setup[field].(float64); ok {\n\t\t\t\t\treturn int(value)\n\t\t\t\t}\n\t\t\t\treturn def\n\t\t\t}\n\t\t\tgetBool := func(field string, def bool) bool {\n\t\t\t\tif value, ok := setup[field].(bool); ok {\n\t\t\t\t\treturn value\n\t\t\t\t}\n\t\t\t\treturn def\n\t\t\t}\n\t\t\tswitch t {\n\t\t\tcase \"header\":\n\t\t\t\tbenchmarks[i] = &benchmarkBlockHeaders{\n\t\t\t\t\tamount:  getInt(\"amount\", 1),\n\t\t\t\t\tskip:    getInt(\"skip\", 1),\n\t\t\t\t\tbyHash:  getBool(\"byHash\", false),\n\t\t\t\t\treverse: getBool(\"reverse\", false),\n\t\t\t\t}\n\t\t\tcase \"body\":\n\t\t\t\tbenchmarks[i] = &benchmarkBodiesOrReceipts{receipts: false}\n\t\t\tcase \"receipts\":\n\t\t\t\tbenchmarks[i] = &benchmarkBodiesOrReceipts{receipts: true}\n\t\t\tcase \"proof\":\n\t\t\t\tbenchmarks[i] = &benchmarkProofsOrCode{code: false}\n\t\t\tcase \"code\":\n\t\t\t\tbenchmarks[i] = &benchmarkProofsOrCode{code: true}\n\t\t\tcase \"cht\":\n\t\t\t\tbenchmarks[i] = &benchmarkHelperTrie{\n\t\t\t\t\tbloom:    false,\n\t\t\t\t\treqCount: getInt(\"amount\", 1),\n\t\t\t\t}\n\t\t\tcase \"bloom\":\n\t\t\t\tbenchmarks[i] = &benchmarkHelperTrie{\n\t\t\t\t\tbloom:    true,\n\t\t\t\t\treqCount: getInt(\"amount\", 1),\n\t\t\t\t}\n\t\t\tcase \"txSend\":\n\t\t\t\tbenchmarks[i] = &benchmarkTxSend{}\n\t\t\tcase \"txStatus\":\n\t\t\t\tbenchmarks[i] = &benchmarkTxStatus{}\n\t\t\tdefault:\n\t\t\t\treturn nil, errUnknownBenchmarkType\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil, errUnknownBenchmarkType\n\t\t}\n\t}\n\trs := api.server.handler.runBenchmark(benchmarks, passCount, time.Millisecond*time.Duration(length))\n\tresult := make([]map[string]interface{}, len(setups))\n\tfor i, r := range rs {\n\t\tres := make(map[string]interface{})\n\t\tif r.err == nil {\n\t\t\tres[\"totalCount\"] = r.totalCount\n\t\t\tres[\"avgTime\"] = r.avgTime\n\t\t\tres[\"maxInSize\"] = r.maxInSize\n\t\t\tres[\"maxOutSize\"] = r.maxOutSize\n\t\t} else {\n\t\t\tres[\"error\"] = r.err.Error()\n\t\t}\n\t\tresult[i] = res\n\t}\n\treturn result, nil\n}\n\n// PrivateDebugAPI provides an API to debug LES light server functionality.\ntype PrivateDebugAPI struct {\n\tserver *LesServer\n}\n\n// NewPrivateDebugAPI creates a new LES light server debug API.\nfunc NewPrivateDebugAPI(server *LesServer) *PrivateDebugAPI {\n\treturn &PrivateDebugAPI{\n\t\tserver: server,\n\t}\n}\n\n// FreezeClient forces a temporary client freeze which normally happens when the server is overloaded\nfunc (api *PrivateDebugAPI) FreezeClient(node string) error {\n\tvar (\n\t\tid  enode.ID\n\t\terr error\n\t)\n\tif id, err = parseNode(node); err != nil {\n\t\treturn err\n\t}\n\tapi.server.clientPool.forClients([]enode.ID{id}, func(c *clientInfo) {\n\t\tif c.connected {\n\t\t\tc.peer.freeze()\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"client %064x is not connected\", id[:])\n\t\t}\n\t})\n\treturn err\n}\n\n// PrivateLightAPI provides an API to access the LES light server or light client.\ntype PrivateLightAPI struct {\n\tbackend *lesCommons\n}\n\n// NewPrivateLightAPI creates a new LES service API.\nfunc NewPrivateLightAPI(backend *lesCommons) *PrivateLightAPI {\n\treturn &PrivateLightAPI{backend: backend}\n}\n\n// LatestCheckpoint returns the latest local checkpoint package.\n//\n// The checkpoint package consists of 4 strings:\n//   result[0], hex encoded latest section index\n//   result[1], 32 bytes hex encoded latest section head hash\n//   result[2], 32 bytes hex encoded latest section canonical hash trie root hash\n//   result[3], 32 bytes hex encoded latest section bloom trie root hash\nfunc (api *PrivateLightAPI) LatestCheckpoint() ([4]string, error) {\n\tvar res [4]string\n\tcp := api.backend.latestLocalCheckpoint()\n\tif cp.Empty() {\n\t\treturn res, errNoCheckpoint\n\t}\n\tres[0] = hexutil.EncodeUint64(cp.SectionIndex)\n\tres[1], res[2], res[3] = cp.SectionHead.Hex(), cp.CHTRoot.Hex(), cp.BloomRoot.Hex()\n\treturn res, nil\n}\n\n// GetLocalCheckpoint returns the specific local checkpoint package.\n//\n// The checkpoint package consists of 3 strings:\n//   result[0], 32 bytes hex encoded latest section head hash\n//   result[1], 32 bytes hex encoded latest section canonical hash trie root hash\n//   result[2], 32 bytes hex encoded latest section bloom trie root hash\nfunc (api *PrivateLightAPI) GetCheckpoint(index uint64) ([3]string, error) {\n\tvar res [3]string\n\tcp := api.backend.localCheckpoint(index)\n\tif cp.Empty() {\n\t\treturn res, errNoCheckpoint\n\t}\n\tres[0], res[1], res[2] = cp.SectionHead.Hex(), cp.CHTRoot.Hex(), cp.BloomRoot.Hex()\n\treturn res, nil\n}\n\n// GetCheckpointContractAddress returns the contract contract address in hex format.\nfunc (api *PrivateLightAPI) GetCheckpointContractAddress() (string, error) {\n\tif api.backend.oracle == nil {\n\t\treturn \"\", errNotActivated\n\t}\n\treturn api.backend.oracle.Contract().ContractAddr().Hex(), nil\n}\n"
  },
  {
    "path": "les/api_backend.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"math/big\"\n\n\t\"github.com/ethereum/go-ethereum/accounts\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/bloombits\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/eth/gasprice\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\ntype LesApiBackend struct {\n\textRPCEnabled       bool\n\tallowUnprotectedTxs bool\n\teth                 *LightEthereum\n\tgpo                 *gasprice.Oracle\n}\n\nfunc (b *LesApiBackend) ChainConfig() *params.ChainConfig {\n\treturn b.eth.chainConfig\n}\n\nfunc (b *LesApiBackend) CurrentBlock() *types.Block {\n\treturn types.NewBlockWithHeader(b.eth.BlockChain().CurrentHeader())\n}\n\nfunc (b *LesApiBackend) SetHead(number uint64) {\n\tb.eth.handler.downloader.Cancel()\n\tb.eth.blockchain.SetHead(number)\n}\n\nfunc (b *LesApiBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) {\n\tif number == rpc.LatestBlockNumber || number == rpc.PendingBlockNumber {\n\t\treturn b.eth.blockchain.CurrentHeader(), nil\n\t}\n\treturn b.eth.blockchain.GetHeaderByNumberOdr(ctx, uint64(number))\n}\n\nfunc (b *LesApiBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) {\n\tif blockNr, ok := blockNrOrHash.Number(); ok {\n\t\treturn b.HeaderByNumber(ctx, blockNr)\n\t}\n\tif hash, ok := blockNrOrHash.Hash(); ok {\n\t\theader, err := b.HeaderByHash(ctx, hash)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif header == nil {\n\t\t\treturn nil, errors.New(\"header for hash not found\")\n\t\t}\n\t\tif blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(header.Number.Uint64()) != hash {\n\t\t\treturn nil, errors.New(\"hash is not currently canonical\")\n\t\t}\n\t\treturn header, nil\n\t}\n\treturn nil, errors.New(\"invalid arguments; neither block nor hash specified\")\n}\n\nfunc (b *LesApiBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {\n\treturn b.eth.blockchain.GetHeaderByHash(hash), nil\n}\n\nfunc (b *LesApiBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) {\n\theader, err := b.HeaderByNumber(ctx, number)\n\tif header == nil || err != nil {\n\t\treturn nil, err\n\t}\n\treturn b.BlockByHash(ctx, header.Hash())\n}\n\nfunc (b *LesApiBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {\n\treturn b.eth.blockchain.GetBlockByHash(ctx, hash)\n}\n\nfunc (b *LesApiBackend) BlockByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Block, error) {\n\tif blockNr, ok := blockNrOrHash.Number(); ok {\n\t\treturn b.BlockByNumber(ctx, blockNr)\n\t}\n\tif hash, ok := blockNrOrHash.Hash(); ok {\n\t\tblock, err := b.BlockByHash(ctx, hash)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif block == nil {\n\t\t\treturn nil, errors.New(\"header found, but block body is missing\")\n\t\t}\n\t\tif blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(block.NumberU64()) != hash {\n\t\t\treturn nil, errors.New(\"hash is not currently canonical\")\n\t\t}\n\t\treturn block, nil\n\t}\n\treturn nil, errors.New(\"invalid arguments; neither block nor hash specified\")\n}\n\nfunc (b *LesApiBackend) StateAndHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*state.StateDB, *types.Header, error) {\n\theader, err := b.HeaderByNumber(ctx, number)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tif header == nil {\n\t\treturn nil, nil, errors.New(\"header not found\")\n\t}\n\treturn light.NewState(ctx, header, b.eth.odr), header, nil\n}\n\nfunc (b *LesApiBackend) StateAndHeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*state.StateDB, *types.Header, error) {\n\tif blockNr, ok := blockNrOrHash.Number(); ok {\n\t\treturn b.StateAndHeaderByNumber(ctx, blockNr)\n\t}\n\tif hash, ok := blockNrOrHash.Hash(); ok {\n\t\theader := b.eth.blockchain.GetHeaderByHash(hash)\n\t\tif header == nil {\n\t\t\treturn nil, nil, errors.New(\"header for hash not found\")\n\t\t}\n\t\tif blockNrOrHash.RequireCanonical && b.eth.blockchain.GetCanonicalHash(header.Number.Uint64()) != hash {\n\t\t\treturn nil, nil, errors.New(\"hash is not currently canonical\")\n\t\t}\n\t\treturn light.NewState(ctx, header, b.eth.odr), header, nil\n\t}\n\treturn nil, nil, errors.New(\"invalid arguments; neither block nor hash specified\")\n}\n\nfunc (b *LesApiBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {\n\tif number := rawdb.ReadHeaderNumber(b.eth.chainDb, hash); number != nil {\n\t\treturn light.GetBlockReceipts(ctx, b.eth.odr, hash, *number)\n\t}\n\treturn nil, nil\n}\n\nfunc (b *LesApiBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {\n\tif number := rawdb.ReadHeaderNumber(b.eth.chainDb, hash); number != nil {\n\t\treturn light.GetBlockLogs(ctx, b.eth.odr, hash, *number)\n\t}\n\treturn nil, nil\n}\n\nfunc (b *LesApiBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int {\n\tif number := rawdb.ReadHeaderNumber(b.eth.chainDb, hash); number != nil {\n\t\treturn b.eth.blockchain.GetTdOdr(ctx, hash, *number)\n\t}\n\treturn nil\n}\n\nfunc (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) {\n\ttxContext := core.NewEVMTxContext(msg)\n\tcontext := core.NewEVMBlockContext(header, b.eth.blockchain, nil)\n\treturn vm.NewEVM(context, txContext, state, b.eth.chainConfig, vm.Config{}), state.Error, nil\n}\n\nfunc (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error {\n\treturn b.eth.txPool.Add(ctx, signedTx)\n}\n\nfunc (b *LesApiBackend) RemoveTx(txHash common.Hash) {\n\tb.eth.txPool.RemoveTx(txHash)\n}\n\nfunc (b *LesApiBackend) GetPoolTransactions() (types.Transactions, error) {\n\treturn b.eth.txPool.GetTransactions()\n}\n\nfunc (b *LesApiBackend) GetPoolTransaction(txHash common.Hash) *types.Transaction {\n\treturn b.eth.txPool.GetTransaction(txHash)\n}\n\nfunc (b *LesApiBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) {\n\treturn light.GetTransaction(ctx, b.eth.odr, txHash)\n}\n\nfunc (b *LesApiBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) {\n\treturn b.eth.txPool.GetNonce(ctx, addr)\n}\n\nfunc (b *LesApiBackend) Stats() (pending int, queued int) {\n\treturn b.eth.txPool.Stats(), 0\n}\n\nfunc (b *LesApiBackend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) {\n\treturn b.eth.txPool.Content()\n}\n\nfunc (b *LesApiBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {\n\treturn b.eth.txPool.SubscribeNewTxsEvent(ch)\n}\n\nfunc (b *LesApiBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {\n\treturn b.eth.blockchain.SubscribeChainEvent(ch)\n}\n\nfunc (b *LesApiBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription {\n\treturn b.eth.blockchain.SubscribeChainHeadEvent(ch)\n}\n\nfunc (b *LesApiBackend) SubscribeChainSideEvent(ch chan<- core.ChainSideEvent) event.Subscription {\n\treturn b.eth.blockchain.SubscribeChainSideEvent(ch)\n}\n\nfunc (b *LesApiBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {\n\treturn b.eth.blockchain.SubscribeLogsEvent(ch)\n}\n\nfunc (b *LesApiBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {\n\treturn event.NewSubscription(func(quit <-chan struct{}) error {\n\t\t<-quit\n\t\treturn nil\n\t})\n}\n\nfunc (b *LesApiBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {\n\treturn b.eth.blockchain.SubscribeRemovedLogsEvent(ch)\n}\n\nfunc (b *LesApiBackend) Downloader() *downloader.Downloader {\n\treturn b.eth.Downloader()\n}\n\nfunc (b *LesApiBackend) ProtocolVersion() int {\n\treturn b.eth.LesVersion() + 10000\n}\n\nfunc (b *LesApiBackend) SuggestPrice(ctx context.Context) (*big.Int, error) {\n\treturn b.gpo.SuggestPrice(ctx)\n}\n\nfunc (b *LesApiBackend) ChainDb() ethdb.Database {\n\treturn b.eth.chainDb\n}\n\nfunc (b *LesApiBackend) AccountManager() *accounts.Manager {\n\treturn b.eth.accountManager\n}\n\nfunc (b *LesApiBackend) ExtRPCEnabled() bool {\n\treturn b.extRPCEnabled\n}\n\nfunc (b *LesApiBackend) UnprotectedAllowed() bool {\n\treturn b.allowUnprotectedTxs\n}\n\nfunc (b *LesApiBackend) RPCGasCap() uint64 {\n\treturn b.eth.config.RPCGasCap\n}\n\nfunc (b *LesApiBackend) RPCTxFeeCap() float64 {\n\treturn b.eth.config.RPCTxFeeCap\n}\n\nfunc (b *LesApiBackend) BloomStatus() (uint64, uint64) {\n\tif b.eth.bloomIndexer == nil {\n\t\treturn 0, 0\n\t}\n\tsections, _, _ := b.eth.bloomIndexer.Sections()\n\treturn params.BloomBitsBlocksClient, sections\n}\n\nfunc (b *LesApiBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) {\n\tfor i := 0; i < bloomFilterThreads; i++ {\n\t\tgo session.Multiplex(bloomRetrievalBatch, bloomRetrievalWait, b.eth.bloomRequests)\n\t}\n}\n\nfunc (b *LesApiBackend) Engine() consensus.Engine {\n\treturn b.eth.engine\n}\n\nfunc (b *LesApiBackend) CurrentHeader() *types.Header {\n\treturn b.eth.blockchain.CurrentHeader()\n}\n\nfunc (b *LesApiBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error) {\n\treturn b.eth.stateAtBlock(ctx, block, reexec)\n}\n\nfunc (b *LesApiBackend) StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error) {\n\treturn b.eth.statesInRange(ctx, fromBlock, toBlock, reexec)\n}\n\nfunc (b *LesApiBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) {\n\treturn b.eth.stateAtTransaction(ctx, block, txIndex, reexec)\n}\n"
  },
  {
    "path": "les/api_test.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"flag\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"os\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/eth\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/eth/ethconfig\"\n\t\"github.com/ethereum/go-ethereum/les/flowcontrol\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/node\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/simulations\"\n\t\"github.com/ethereum/go-ethereum/p2p/simulations/adapters\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n\t\"github.com/mattn/go-colorable\"\n)\n\n// Additional command line flags for the test binary.\nvar (\n\tloglevel   = flag.Int(\"loglevel\", 0, \"verbosity of logs\")\n\tsimAdapter = flag.String(\"adapter\", \"exec\", \"type of simulation: sim|socket|exec|docker\")\n)\n\nfunc TestMain(m *testing.M) {\n\tflag.Parse()\n\tlog.PrintOrigins(true)\n\tlog.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true))))\n\t// register the Delivery service which will run as a devp2p\n\t// protocol when using the exec adapter\n\tadapters.RegisterLifecycles(services)\n\tos.Exit(m.Run())\n}\n\n// This test is not meant to be a part of the automatic testing process because it\n// runs for a long time and also requires a large database in order to do a meaningful\n// request performance test. When testServerDataDir is empty, the test is skipped.\n\nconst (\n\ttestServerDataDir  = \"\" // should always be empty on the master branch\n\ttestServerCapacity = 200\n\ttestMaxClients     = 10\n\ttestTolerance      = 0.1\n\tminRelCap          = 0.2\n)\n\nfunc TestCapacityAPI3(t *testing.T) {\n\ttestCapacityAPI(t, 3)\n}\n\nfunc TestCapacityAPI6(t *testing.T) {\n\ttestCapacityAPI(t, 6)\n}\n\nfunc TestCapacityAPI10(t *testing.T) {\n\ttestCapacityAPI(t, 10)\n}\n\n// testCapacityAPI runs an end-to-end simulation test connecting one server with\n// a given number of clients. It sets different priority capacities to all clients\n// except a randomly selected one which runs in free client mode. All clients send\n// similar requests at the maximum allowed rate and the test verifies whether the\n// ratio of processed requests is close enough to the ratio of assigned capacities.\n// Running multiple rounds with different settings ensures that changing capacity\n// while connected and going back and forth between free and priority mode with\n// the supplied API calls is also thoroughly tested.\nfunc testCapacityAPI(t *testing.T, clientCount int) {\n\t// Skip test if no data dir specified\n\tif testServerDataDir == \"\" {\n\t\treturn\n\t}\n\tfor !testSim(t, 1, clientCount, []string{testServerDataDir}, nil, func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool {\n\t\tif len(servers) != 1 {\n\t\t\tt.Fatalf(\"Invalid number of servers: %d\", len(servers))\n\t\t}\n\t\tserver := servers[0]\n\n\t\tserverRpcClient, err := server.Client()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to obtain rpc client: %v\", err)\n\t\t}\n\t\theadNum, headHash := getHead(ctx, t, serverRpcClient)\n\t\tminCap, totalCap := getCapacityInfo(ctx, t, serverRpcClient)\n\t\ttestCap := totalCap * 3 / 4\n\t\tt.Logf(\"Server testCap: %d  minCap: %d  head number: %d  head hash: %064x\\n\", testCap, minCap, headNum, headHash)\n\t\treqMinCap := uint64(float64(testCap) * minRelCap / (minRelCap + float64(len(clients)-1)))\n\t\tif minCap > reqMinCap {\n\t\t\tt.Fatalf(\"Minimum client capacity (%d) bigger than required minimum for this test (%d)\", minCap, reqMinCap)\n\t\t}\n\t\tfreeIdx := rand.Intn(len(clients))\n\n\t\tclientRpcClients := make([]*rpc.Client, len(clients))\n\t\tfor i, client := range clients {\n\t\t\tvar err error\n\t\t\tclientRpcClients[i], err = client.Client()\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"Failed to obtain rpc client: %v\", err)\n\t\t\t}\n\t\t\tt.Log(\"connecting client\", i)\n\t\t\tif i != freeIdx {\n\t\t\t\tsetCapacity(ctx, t, serverRpcClient, client.ID(), testCap/uint64(len(clients)))\n\t\t\t}\n\t\t\tnet.Connect(client.ID(), server.ID())\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\tt.Fatalf(\"Timeout\")\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t\tnum, hash := getHead(ctx, t, clientRpcClients[i])\n\t\t\t\tif num == headNum && hash == headHash {\n\t\t\t\t\tt.Log(\"client\", i, \"synced\")\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Millisecond * 200)\n\t\t\t}\n\t\t}\n\n\t\tvar wg sync.WaitGroup\n\t\tstop := make(chan struct{})\n\n\t\treqCount := make([]uint64, len(clientRpcClients))\n\n\t\t// Send light request like crazy.\n\t\tfor i, c := range clientRpcClients {\n\t\t\twg.Add(1)\n\t\t\ti, c := i, c\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tqueue := make(chan struct{}, 100)\n\t\t\t\treqCount[i] = 0\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase queue <- struct{}{}:\n\t\t\t\t\t\tselect {\n\t\t\t\t\t\tcase <-stop:\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\twg.Add(1)\n\t\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\t\tok := testRequest(ctx, t, c)\n\t\t\t\t\t\t\t\twg.Done()\n\t\t\t\t\t\t\t\t<-queue\n\t\t\t\t\t\t\t\tif ok {\n\t\t\t\t\t\t\t\t\tcount := atomic.AddUint64(&reqCount[i], 1)\n\t\t\t\t\t\t\t\t\tif count%10000 == 0 {\n\t\t\t\t\t\t\t\t\t\tfreezeClient(ctx, t, serverRpcClient, clients[i].ID())\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}()\n\t\t\t\t\t\t}\n\t\t\t\t\tcase <-stop:\n\t\t\t\t\t\treturn\n\t\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\n\t\tprocessedSince := func(start []uint64) []uint64 {\n\t\t\tres := make([]uint64, len(reqCount))\n\t\t\tfor i := range reqCount {\n\t\t\t\tres[i] = atomic.LoadUint64(&reqCount[i])\n\t\t\t\tif start != nil {\n\t\t\t\t\tres[i] -= start[i]\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn res\n\t\t}\n\n\t\tweights := make([]float64, len(clients))\n\t\tfor c := 0; c < 5; c++ {\n\t\t\tsetCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), minCap)\n\t\t\tfreeIdx = rand.Intn(len(clients))\n\t\t\tvar sum float64\n\t\t\tfor i := range clients {\n\t\t\t\tif i == freeIdx {\n\t\t\t\t\tweights[i] = 0\n\t\t\t\t} else {\n\t\t\t\t\tweights[i] = rand.Float64()*(1-minRelCap) + minRelCap\n\t\t\t\t}\n\t\t\t\tsum += weights[i]\n\t\t\t}\n\t\t\tfor i, client := range clients {\n\t\t\t\tweights[i] *= float64(testCap-minCap-100) / sum\n\t\t\t\tcapacity := uint64(weights[i])\n\t\t\t\tif i != freeIdx && capacity < getCapacity(ctx, t, serverRpcClient, client.ID()) {\n\t\t\t\t\tsetCapacity(ctx, t, serverRpcClient, client.ID(), capacity)\n\t\t\t\t}\n\t\t\t}\n\t\t\tsetCapacity(ctx, t, serverRpcClient, clients[freeIdx].ID(), 0)\n\t\t\tfor i, client := range clients {\n\t\t\t\tcapacity := uint64(weights[i])\n\t\t\t\tif i != freeIdx && capacity > getCapacity(ctx, t, serverRpcClient, client.ID()) {\n\t\t\t\t\tsetCapacity(ctx, t, serverRpcClient, client.ID(), capacity)\n\t\t\t\t}\n\t\t\t}\n\t\t\tweights[freeIdx] = float64(minCap)\n\t\t\tfor i := range clients {\n\t\t\t\tweights[i] /= float64(testCap)\n\t\t\t}\n\n\t\t\ttime.Sleep(flowcontrol.DecParamDelay)\n\t\t\tt.Log(\"Starting measurement\")\n\t\t\tt.Logf(\"Relative weights:\")\n\t\t\tfor i := range clients {\n\t\t\t\tt.Logf(\"  %f\", weights[i])\n\t\t\t}\n\t\t\tt.Log()\n\t\t\tstart := processedSince(nil)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\tt.Fatalf(\"Timeout\")\n\t\t\t\tdefault:\n\t\t\t\t}\n\n\t\t\t\t_, totalCap = getCapacityInfo(ctx, t, serverRpcClient)\n\t\t\t\tif totalCap < testCap {\n\t\t\t\t\tt.Log(\"Total capacity underrun\")\n\t\t\t\t\tclose(stop)\n\t\t\t\t\twg.Wait()\n\t\t\t\t\treturn false\n\t\t\t\t}\n\n\t\t\t\tprocessed := processedSince(start)\n\t\t\t\tvar avg uint64\n\t\t\t\tt.Logf(\"Processed\")\n\t\t\t\tfor i, p := range processed {\n\t\t\t\t\tt.Logf(\" %d\", p)\n\t\t\t\t\tprocessed[i] = uint64(float64(p) / weights[i])\n\t\t\t\t\tavg += processed[i]\n\t\t\t\t}\n\t\t\t\tavg /= uint64(len(processed))\n\n\t\t\t\tif avg >= 10000 {\n\t\t\t\t\tvar maxDev float64\n\t\t\t\t\tfor _, p := range processed {\n\t\t\t\t\t\tdev := float64(int64(p-avg)) / float64(avg)\n\t\t\t\t\t\tt.Logf(\" %7.4f\", dev)\n\t\t\t\t\t\tif dev < 0 {\n\t\t\t\t\t\t\tdev = -dev\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif dev > maxDev {\n\t\t\t\t\t\t\tmaxDev = dev\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tt.Logf(\"  max deviation: %f  totalCap: %d\\n\", maxDev, totalCap)\n\t\t\t\t\tif maxDev <= testTolerance {\n\t\t\t\t\t\tt.Log(\"success\")\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tt.Log()\n\t\t\t\t}\n\t\t\t\ttime.Sleep(time.Millisecond * 200)\n\t\t\t}\n\t\t}\n\n\t\tclose(stop)\n\t\twg.Wait()\n\n\t\tfor i, count := range reqCount {\n\t\t\tt.Log(\"client\", i, \"processed\", count)\n\t\t}\n\t\treturn true\n\t}) {\n\t\tt.Log(\"restarting test\")\n\t}\n}\n\nfunc getHead(ctx context.Context, t *testing.T, client *rpc.Client) (uint64, common.Hash) {\n\tres := make(map[string]interface{})\n\tif err := client.CallContext(ctx, &res, \"eth_getBlockByNumber\", \"latest\", false); err != nil {\n\t\tt.Fatalf(\"Failed to obtain head block: %v\", err)\n\t}\n\tnumStr, ok := res[\"number\"].(string)\n\tif !ok {\n\t\tt.Fatalf(\"RPC block number field invalid\")\n\t}\n\tnum, err := hexutil.DecodeUint64(numStr)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to decode RPC block number: %v\", err)\n\t}\n\thashStr, ok := res[\"hash\"].(string)\n\tif !ok {\n\t\tt.Fatalf(\"RPC block number field invalid\")\n\t}\n\thash := common.HexToHash(hashStr)\n\treturn num, hash\n}\n\nfunc testRequest(ctx context.Context, t *testing.T, client *rpc.Client) bool {\n\tvar res string\n\tvar addr common.Address\n\trand.Read(addr[:])\n\tc, cancel := context.WithTimeout(ctx, time.Second*12)\n\tdefer cancel()\n\terr := client.CallContext(c, &res, \"eth_getBalance\", addr, \"latest\")\n\tif err != nil {\n\t\tt.Log(\"request error:\", err)\n\t}\n\treturn err == nil\n}\n\nfunc freezeClient(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) {\n\tif err := server.CallContext(ctx, nil, \"debug_freezeClient\", clientID); err != nil {\n\t\tt.Fatalf(\"Failed to freeze client: %v\", err)\n\t}\n\n}\n\nfunc setCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID, cap uint64) {\n\tparams := make(map[string]interface{})\n\tparams[\"capacity\"] = cap\n\tif err := server.CallContext(ctx, nil, \"les_setClientParams\", []enode.ID{clientID}, []string{}, params); err != nil {\n\t\tt.Fatalf(\"Failed to set client capacity: %v\", err)\n\t}\n}\n\nfunc getCapacity(ctx context.Context, t *testing.T, server *rpc.Client, clientID enode.ID) uint64 {\n\tvar res map[enode.ID]map[string]interface{}\n\tif err := server.CallContext(ctx, &res, \"les_clientInfo\", []enode.ID{clientID}, []string{}); err != nil {\n\t\tt.Fatalf(\"Failed to get client info: %v\", err)\n\t}\n\tinfo, ok := res[clientID]\n\tif !ok {\n\t\tt.Fatalf(\"Missing client info\")\n\t}\n\tv, ok := info[\"capacity\"]\n\tif !ok {\n\t\tt.Fatalf(\"Missing field in client info: capacity\")\n\t}\n\tvv, ok := v.(float64)\n\tif !ok {\n\t\tt.Fatalf(\"Failed to decode capacity field\")\n\t}\n\treturn uint64(vv)\n}\n\nfunc getCapacityInfo(ctx context.Context, t *testing.T, server *rpc.Client) (minCap, totalCap uint64) {\n\tvar res map[string]interface{}\n\tif err := server.CallContext(ctx, &res, \"les_serverInfo\"); err != nil {\n\t\tt.Fatalf(\"Failed to query server info: %v\", err)\n\t}\n\tdecode := func(s string) uint64 {\n\t\tv, ok := res[s]\n\t\tif !ok {\n\t\t\tt.Fatalf(\"Missing field in server info: %s\", s)\n\t\t}\n\t\tvv, ok := v.(float64)\n\t\tif !ok {\n\t\t\tt.Fatalf(\"Failed to decode server info field: %s\", s)\n\t\t}\n\t\treturn uint64(vv)\n\t}\n\tminCap = decode(\"minimumCapacity\")\n\ttotalCap = decode(\"totalCapacity\")\n\treturn\n}\n\nvar services = adapters.LifecycleConstructors{\n\t\"lesclient\": newLesClientService,\n\t\"lesserver\": newLesServerService,\n}\n\nfunc NewNetwork() (*simulations.Network, func(), error) {\n\tadapter, adapterTeardown, err := NewAdapter(*simAdapter, services)\n\tif err != nil {\n\t\treturn nil, adapterTeardown, err\n\t}\n\tdefaultService := \"streamer\"\n\tnet := simulations.NewNetwork(adapter, &simulations.NetworkConfig{\n\t\tID:             \"0\",\n\t\tDefaultService: defaultService,\n\t})\n\tteardown := func() {\n\t\tadapterTeardown()\n\t\tnet.Shutdown()\n\t}\n\treturn net, teardown, nil\n}\n\nfunc NewAdapter(adapterType string, services adapters.LifecycleConstructors) (adapter adapters.NodeAdapter, teardown func(), err error) {\n\tteardown = func() {}\n\tswitch adapterType {\n\tcase \"sim\":\n\t\tadapter = adapters.NewSimAdapter(services)\n\t\t//\tcase \"socket\":\n\t\t//\t\tadapter = adapters.NewSocketAdapter(services)\n\tcase \"exec\":\n\t\tbaseDir, err0 := ioutil.TempDir(\"\", \"les-test\")\n\t\tif err0 != nil {\n\t\t\treturn nil, teardown, err0\n\t\t}\n\t\tteardown = func() { os.RemoveAll(baseDir) }\n\t\tadapter = adapters.NewExecAdapter(baseDir)\n\t/*case \"docker\":\n\tadapter, err = adapters.NewDockerAdapter()\n\tif err != nil {\n\t\treturn nil, teardown, err\n\t}*/\n\tdefault:\n\t\treturn nil, teardown, errors.New(\"adapter needs to be one of sim, socket, exec, docker\")\n\t}\n\treturn adapter, teardown, nil\n}\n\nfunc testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir []string, test func(ctx context.Context, net *simulations.Network, servers []*simulations.Node, clients []*simulations.Node) bool) bool {\n\tnet, teardown, err := NewNetwork()\n\tdefer teardown()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create network: %v\", err)\n\t}\n\ttimeout := 1800 * time.Second\n\tctx, cancel := context.WithTimeout(context.Background(), timeout)\n\tdefer cancel()\n\n\tservers := make([]*simulations.Node, serverCount)\n\tclients := make([]*simulations.Node, clientCount)\n\n\tfor i := range clients {\n\t\tclientconf := adapters.RandomNodeConfig()\n\t\tclientconf.Lifecycles = []string{\"lesclient\"}\n\t\tif len(clientDir) == clientCount {\n\t\t\tclientconf.DataDir = clientDir[i]\n\t\t}\n\t\tclient, err := net.NewNodeWithConfig(clientconf)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create client: %v\", err)\n\t\t}\n\t\tclients[i] = client\n\t}\n\n\tfor i := range servers {\n\t\tserverconf := adapters.RandomNodeConfig()\n\t\tserverconf.Lifecycles = []string{\"lesserver\"}\n\t\tif len(serverDir) == serverCount {\n\t\t\tserverconf.DataDir = serverDir[i]\n\t\t}\n\t\tserver, err := net.NewNodeWithConfig(serverconf)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create server: %v\", err)\n\t\t}\n\t\tservers[i] = server\n\t}\n\n\tfor _, client := range clients {\n\t\tif err := net.Start(client.ID()); err != nil {\n\t\t\tt.Fatalf(\"Failed to start client node: %v\", err)\n\t\t}\n\t}\n\tfor _, server := range servers {\n\t\tif err := net.Start(server.ID()); err != nil {\n\t\t\tt.Fatalf(\"Failed to start server node: %v\", err)\n\t\t}\n\t}\n\n\treturn test(ctx, net, servers, clients)\n}\n\nfunc newLesClientService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {\n\tconfig := ethconfig.Defaults\n\tconfig.SyncMode = downloader.LightSync\n\tconfig.Ethash.PowMode = ethash.ModeFake\n\treturn New(stack, &config)\n}\n\nfunc newLesServerService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) {\n\tconfig := ethconfig.Defaults\n\tconfig.SyncMode = downloader.FullSync\n\tconfig.LightServ = testServerCapacity\n\tconfig.LightPeers = testMaxClients\n\tethereum, err := eth.New(stack, &config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = NewLesServer(stack, ethereum, &config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ethereum, nil\n}\n"
  },
  {
    "path": "les/bloombits.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/bitutil\"\n\t\"github.com/ethereum/go-ethereum/light\"\n)\n\nconst (\n\t// bloomServiceThreads is the number of goroutines used globally by an Ethereum\n\t// instance to service bloombits lookups for all running filters.\n\tbloomServiceThreads = 16\n\n\t// bloomFilterThreads is the number of goroutines used locally per filter to\n\t// multiplex requests onto the global servicing goroutines.\n\tbloomFilterThreads = 3\n\n\t// bloomRetrievalBatch is the maximum number of bloom bit retrievals to service\n\t// in a single batch.\n\tbloomRetrievalBatch = 16\n\n\t// bloomRetrievalWait is the maximum time to wait for enough bloom bit requests\n\t// to accumulate request an entire batch (avoiding hysteresis).\n\tbloomRetrievalWait = time.Microsecond * 100\n)\n\n// startBloomHandlers starts a batch of goroutines to accept bloom bit database\n// retrievals from possibly a range of filters and serving the data to satisfy.\nfunc (eth *LightEthereum) startBloomHandlers(sectionSize uint64) {\n\tfor i := 0; i < bloomServiceThreads; i++ {\n\t\tgo func() {\n\t\t\tdefer eth.wg.Done()\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-eth.closeCh:\n\t\t\t\t\treturn\n\n\t\t\t\tcase request := <-eth.bloomRequests:\n\t\t\t\t\ttask := <-request\n\t\t\t\t\ttask.Bitsets = make([][]byte, len(task.Sections))\n\t\t\t\t\tcompVectors, err := light.GetBloomBits(task.Context, eth.odr, task.Bit, task.Sections)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tfor i := range task.Sections {\n\t\t\t\t\t\t\tif blob, err := bitutil.DecompressBytes(compVectors[i], int(sectionSize/8)); err == nil {\n\t\t\t\t\t\t\t\ttask.Bitsets[i] = blob\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\ttask.Error = err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttask.Error = err\n\t\t\t\t\t}\n\t\t\t\t\trequest <- task\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n"
  },
  {
    "path": "les/checkpointoracle/oracle.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Package checkpointoracle is a wrapper of checkpoint oracle contract with\n// additional rules defined. This package can be used both in LES client or\n// server side for offering oracle related APIs.\npackage checkpointoracle\n\nimport (\n\t\"encoding/binary\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/accounts/abi/bind\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/contracts/checkpointoracle\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/params\"\n)\n\n// CheckpointOracle is responsible for offering the latest stable checkpoint\n// generated and announced by the contract admins on-chain. The checkpoint can\n// be verified by clients locally during the checkpoint syncing.\ntype CheckpointOracle struct {\n\tconfig   *params.CheckpointOracleConfig\n\tcontract *checkpointoracle.CheckpointOracle\n\n\trunning  int32                                 // Flag whether the contract backend is set or not\n\tgetLocal func(uint64) params.TrustedCheckpoint // Function used to retrieve local checkpoint\n\n\tcheckMu              sync.Mutex                // Mutex to sync access to the fields below\n\tlastCheckTime        time.Time                 // Time we last checked the checkpoint\n\tlastCheckPoint       *params.TrustedCheckpoint // The last stable checkpoint\n\tlastCheckPointHeight uint64                    // The height of last stable checkpoint\n}\n\n// New creates a checkpoint oracle handler with given configs and callback.\nfunc New(config *params.CheckpointOracleConfig, getLocal func(uint64) params.TrustedCheckpoint) *CheckpointOracle {\n\treturn &CheckpointOracle{\n\t\tconfig:   config,\n\t\tgetLocal: getLocal,\n\t}\n}\n\n// Start binds the contract backend, initializes the oracle instance\n// and marks the status as available.\nfunc (oracle *CheckpointOracle) Start(backend bind.ContractBackend) {\n\tcontract, err := checkpointoracle.NewCheckpointOracle(oracle.config.Address, backend)\n\tif err != nil {\n\t\tlog.Error(\"Oracle contract binding failed\", \"err\", err)\n\t\treturn\n\t}\n\tif !atomic.CompareAndSwapInt32(&oracle.running, 0, 1) {\n\t\tlog.Error(\"Already bound and listening to registrar\")\n\t\treturn\n\t}\n\toracle.contract = contract\n}\n\n// IsRunning returns an indicator whether the oracle is running.\nfunc (oracle *CheckpointOracle) IsRunning() bool {\n\treturn atomic.LoadInt32(&oracle.running) == 1\n}\n\n// Contract returns the underlying raw checkpoint oracle contract.\nfunc (oracle *CheckpointOracle) Contract() *checkpointoracle.CheckpointOracle {\n\treturn oracle.contract\n}\n\n// StableCheckpoint returns the stable checkpoint which was generated by local\n// indexers and announced by trusted signers.\nfunc (oracle *CheckpointOracle) StableCheckpoint() (*params.TrustedCheckpoint, uint64) {\n\toracle.checkMu.Lock()\n\tdefer oracle.checkMu.Unlock()\n\tif time.Since(oracle.lastCheckTime) < 1*time.Minute {\n\t\treturn oracle.lastCheckPoint, oracle.lastCheckPointHeight\n\t}\n\t// Look it up properly\n\t// Retrieve the latest checkpoint from the contract, abort if empty\n\tlatest, hash, height, err := oracle.contract.Contract().GetLatestCheckpoint(nil)\n\toracle.lastCheckTime = time.Now()\n\tif err != nil || (latest == 0 && hash == [32]byte{}) {\n\t\toracle.lastCheckPointHeight = 0\n\t\toracle.lastCheckPoint = nil\n\t\treturn oracle.lastCheckPoint, oracle.lastCheckPointHeight\n\t}\n\tlocal := oracle.getLocal(latest)\n\n\t// The following scenarios may occur:\n\t//\n\t// * local node is out of sync so that it doesn't have the\n\t//   checkpoint which registered in the contract.\n\t// * local checkpoint doesn't match with the registered one.\n\t//\n\t// In both cases, no stable checkpoint will be returned.\n\tif local.HashEqual(hash) {\n\t\toracle.lastCheckPointHeight = height.Uint64()\n\t\toracle.lastCheckPoint = &local\n\t\treturn oracle.lastCheckPoint, oracle.lastCheckPointHeight\n\t}\n\treturn nil, 0\n}\n\n// VerifySigners recovers the signer addresses according to the signature and\n// checks whether there are enough approvals to finalize the checkpoint.\nfunc (oracle *CheckpointOracle) VerifySigners(index uint64, hash [32]byte, signatures [][]byte) (bool, []common.Address) {\n\t// Short circuit if the given signatures doesn't reach the threshold.\n\tif len(signatures) < int(oracle.config.Threshold) {\n\t\treturn false, nil\n\t}\n\tvar (\n\t\tsigners []common.Address\n\t\tchecked = make(map[common.Address]struct{})\n\t)\n\tfor i := 0; i < len(signatures); i++ {\n\t\tif len(signatures[i]) != 65 {\n\t\t\tcontinue\n\t\t}\n\t\t// EIP 191 style signatures\n\t\t//\n\t\t// Arguments when calculating hash to validate\n\t\t// 1: byte(0x19) - the initial 0x19 byte\n\t\t// 2: byte(0) - the version byte (data with intended validator)\n\t\t// 3: this - the validator address\n\t\t// --  Application specific data\n\t\t// 4 : checkpoint section_index (uint64)\n\t\t// 5 : checkpoint hash (bytes32)\n\t\t//     hash = keccak256(checkpoint_index, section_head, cht_root, bloom_root)\n\t\tbuf := make([]byte, 8)\n\t\tbinary.BigEndian.PutUint64(buf, index)\n\t\tdata := append([]byte{0x19, 0x00}, append(oracle.config.Address.Bytes(), append(buf, hash[:]...)...)...)\n\t\tsignatures[i][64] -= 27 // Transform V from 27/28 to 0/1 according to the yellow paper for verification.\n\t\tpubkey, err := crypto.Ecrecover(crypto.Keccak256(data), signatures[i])\n\t\tif err != nil {\n\t\t\treturn false, nil\n\t\t}\n\t\tvar signer common.Address\n\t\tcopy(signer[:], crypto.Keccak256(pubkey[1:])[12:])\n\t\tif _, exist := checked[signer]; exist {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, s := range oracle.config.Signers {\n\t\t\tif s == signer {\n\t\t\t\tsigners = append(signers, signer)\n\t\t\t\tchecked[signer] = struct{}{}\n\t\t\t}\n\t\t}\n\t}\n\tthreshold := oracle.config.Threshold\n\tif uint64(len(signers)) < threshold {\n\t\tlog.Warn(\"Not enough signers to approve checkpoint\", \"signers\", len(signers), \"threshold\", threshold)\n\t\treturn false, nil\n\t}\n\treturn true, signers\n}\n"
  },
  {
    "path": "les/client.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Package les implements the Light Ethereum Subprotocol.\npackage les\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/accounts\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/hexutil\"\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/consensus\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/bloombits\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/eth/ethconfig\"\n\t\"github.com/ethereum/go-ethereum/eth/filters\"\n\t\"github.com/ethereum/go-ethereum/eth/gasprice\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/internal/ethapi\"\n\t\"github.com/ethereum/go-ethereum/les/vflux\"\n\tvfc \"github.com/ethereum/go-ethereum/les/vflux/client\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/node\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/enr\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\ntype LightEthereum struct {\n\tlesCommons\n\n\tpeers              *serverPeerSet\n\treqDist            *requestDistributor\n\tretriever          *retrieveManager\n\todr                *LesOdr\n\trelay              *lesTxRelay\n\thandler            *clientHandler\n\ttxPool             *light.TxPool\n\tblockchain         *light.LightChain\n\tserverPool         *vfc.ServerPool\n\tserverPoolIterator enode.Iterator\n\tpruner             *pruner\n\n\tbloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests\n\tbloomIndexer  *core.ChainIndexer             // Bloom indexer operating during block imports\n\n\tApiBackend     *LesApiBackend\n\teventMux       *event.TypeMux\n\tengine         consensus.Engine\n\taccountManager *accounts.Manager\n\tnetRPCService  *ethapi.PublicNetAPI\n\n\tp2pServer  *p2p.Server\n\tp2pConfig  *p2p.Config\n\tudpEnabled bool\n}\n\n// New creates an instance of the light client.\nfunc New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) {\n\tchainDb, err := stack.OpenDatabase(\"lightchaindata\", config.DatabaseCache, config.DatabaseHandles, \"eth/db/chaindata/\", false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlesDb, err := stack.OpenDatabase(\"les.client\", 0, 0, \"eth/db/lesclient/\", false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tchainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideBerlin)\n\tif _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat {\n\t\treturn nil, genesisErr\n\t}\n\tlog.Info(\"Initialised chain configuration\", \"config\", chainConfig)\n\n\tpeers := newServerPeerSet()\n\tleth := &LightEthereum{\n\t\tlesCommons: lesCommons{\n\t\t\tgenesis:     genesisHash,\n\t\t\tconfig:      config,\n\t\t\tchainConfig: chainConfig,\n\t\t\tiConfig:     light.DefaultClientIndexerConfig,\n\t\t\tchainDb:     chainDb,\n\t\t\tlesDb:       lesDb,\n\t\t\tcloseCh:     make(chan struct{}),\n\t\t},\n\t\tpeers:          peers,\n\t\teventMux:       stack.EventMux(),\n\t\treqDist:        newRequestDistributor(peers, &mclock.System{}),\n\t\taccountManager: stack.AccountManager(),\n\t\tengine:         ethconfig.CreateConsensusEngine(stack, chainConfig, &config.Ethash, nil, false, chainDb),\n\t\tbloomRequests:  make(chan chan *bloombits.Retrieval),\n\t\tbloomIndexer:   core.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations),\n\t\tp2pServer:      stack.Server(),\n\t\tp2pConfig:      &stack.Config().P2P,\n\t\tudpEnabled:     stack.Config().P2P.DiscoveryV5,\n\t}\n\n\tvar prenegQuery vfc.QueryFunc\n\tif leth.udpEnabled {\n\t\tprenegQuery = leth.prenegQuery\n\t}\n\tleth.serverPool, leth.serverPoolIterator = vfc.NewServerPool(lesDb, []byte(\"serverpool:\"), time.Second, prenegQuery, &mclock.System{}, config.UltraLightServers, requestList)\n\tleth.serverPool.AddMetrics(suggestedTimeoutGauge, totalValueGauge, serverSelectableGauge, serverConnectedGauge, sessionValueMeter, serverDialedMeter)\n\n\tleth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool.GetTimeout)\n\tleth.relay = newLesTxRelay(peers, leth.retriever)\n\n\tleth.odr = NewLesOdr(chainDb, light.DefaultClientIndexerConfig, leth.peers, leth.retriever)\n\tleth.chtIndexer = light.NewChtIndexer(chainDb, leth.odr, params.CHTFrequency, params.HelperTrieConfirmations, config.LightNoPrune)\n\tleth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, leth.odr, params.BloomBitsBlocksClient, params.BloomTrieFrequency, config.LightNoPrune)\n\tleth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer)\n\n\tcheckpoint := config.Checkpoint\n\tif checkpoint == nil {\n\t\tcheckpoint = params.TrustedCheckpoints[genesisHash]\n\t}\n\t// Note: NewLightChain adds the trusted checkpoint so it needs an ODR with\n\t// indexers already set but not started yet\n\tif leth.blockchain, err = light.NewLightChain(leth.odr, leth.chainConfig, leth.engine, checkpoint); err != nil {\n\t\treturn nil, err\n\t}\n\tleth.chainReader = leth.blockchain\n\tleth.txPool = light.NewTxPool(leth.chainConfig, leth.blockchain, leth.relay)\n\n\t// Set up checkpoint oracle.\n\tleth.oracle = leth.setupOracle(stack, genesisHash, config)\n\n\t// Note: AddChildIndexer starts the update process for the child\n\tleth.bloomIndexer.AddChildIndexer(leth.bloomTrieIndexer)\n\tleth.chtIndexer.Start(leth.blockchain)\n\tleth.bloomIndexer.Start(leth.blockchain)\n\n\t// Start a light chain pruner to delete useless historical data.\n\tleth.pruner = newPruner(chainDb, leth.chtIndexer, leth.bloomTrieIndexer)\n\n\t// Rewind the chain in case of an incompatible config upgrade.\n\tif compat, ok := genesisErr.(*params.ConfigCompatError); ok {\n\t\tlog.Warn(\"Rewinding chain to upgrade configuration\", \"err\", compat)\n\t\tleth.blockchain.SetHead(compat.RewindTo)\n\t\trawdb.WriteChainConfig(chainDb, genesisHash, chainConfig)\n\t}\n\n\tleth.ApiBackend = &LesApiBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, leth, nil}\n\tgpoParams := config.GPO\n\tif gpoParams.Default == nil {\n\t\tgpoParams.Default = config.Miner.GasPrice\n\t}\n\tleth.ApiBackend.gpo = gasprice.NewOracle(leth.ApiBackend, gpoParams)\n\n\tleth.handler = newClientHandler(config.UltraLightServers, config.UltraLightFraction, checkpoint, leth)\n\tif leth.handler.ulc != nil {\n\t\tlog.Warn(\"Ultra light client is enabled\", \"trustedNodes\", len(leth.handler.ulc.keys), \"minTrustedFraction\", leth.handler.ulc.fraction)\n\t\tleth.blockchain.DisableCheckFreq()\n\t}\n\n\tleth.netRPCService = ethapi.NewPublicNetAPI(leth.p2pServer, leth.config.NetworkId)\n\n\t// Register the backend on the node\n\tstack.RegisterAPIs(leth.APIs())\n\tstack.RegisterProtocols(leth.Protocols())\n\tstack.RegisterLifecycle(leth)\n\n\t// Check for unclean shutdown\n\tif uncleanShutdowns, discards, err := rawdb.PushUncleanShutdownMarker(chainDb); err != nil {\n\t\tlog.Error(\"Could not update unclean-shutdown-marker list\", \"error\", err)\n\t} else {\n\t\tif discards > 0 {\n\t\t\tlog.Warn(\"Old unclean shutdowns found\", \"count\", discards)\n\t\t}\n\t\tfor _, tstamp := range uncleanShutdowns {\n\t\t\tt := time.Unix(int64(tstamp), 0)\n\t\t\tlog.Warn(\"Unclean shutdown detected\", \"booted\", t,\n\t\t\t\t\"age\", common.PrettyAge(t))\n\t\t}\n\t}\n\treturn leth, nil\n}\n\n// VfluxRequest sends a batch of requests to the given node through discv5 UDP TalkRequest and returns the responses\nfunc (s *LightEthereum) VfluxRequest(n *enode.Node, reqs vflux.Requests) vflux.Replies {\n\tif !s.udpEnabled {\n\t\treturn nil\n\t}\n\treqsEnc, _ := rlp.EncodeToBytes(&reqs)\n\trepliesEnc, _ := s.p2pServer.DiscV5.TalkRequest(s.serverPool.DialNode(n), \"vfx\", reqsEnc)\n\tvar replies vflux.Replies\n\tif len(repliesEnc) == 0 || rlp.DecodeBytes(repliesEnc, &replies) != nil {\n\t\treturn nil\n\t}\n\treturn replies\n}\n\n// vfxVersion returns the version number of the \"les\" service subdomain of the vflux UDP\n// service, as advertised in the ENR record\nfunc (s *LightEthereum) vfxVersion(n *enode.Node) uint {\n\tif n.Seq() == 0 {\n\t\tvar err error\n\t\tif !s.udpEnabled {\n\t\t\treturn 0\n\t\t}\n\t\tif n, err = s.p2pServer.DiscV5.RequestENR(n); n != nil && err == nil && n.Seq() != 0 {\n\t\t\ts.serverPool.Persist(n)\n\t\t} else {\n\t\t\treturn 0\n\t\t}\n\t}\n\n\tvar les []rlp.RawValue\n\tif err := n.Load(enr.WithEntry(\"les\", &les)); err != nil || len(les) < 1 {\n\t\treturn 0\n\t}\n\tvar version uint\n\trlp.DecodeBytes(les[0], &version) // Ignore additional fields (for forward compatibility).\n\treturn version\n}\n\n// prenegQuery sends a capacity query to the given server node to determine whether\n// a connection slot is immediately available\nfunc (s *LightEthereum) prenegQuery(n *enode.Node) int {\n\tif s.vfxVersion(n) < 1 {\n\t\t// UDP query not supported, always try TCP connection\n\t\treturn 1\n\t}\n\n\tvar requests vflux.Requests\n\trequests.Add(\"les\", vflux.CapacityQueryName, vflux.CapacityQueryReq{\n\t\tBias:      180,\n\t\tAddTokens: []vflux.IntOrInf{{}},\n\t})\n\treplies := s.VfluxRequest(n, requests)\n\tvar cqr vflux.CapacityQueryReply\n\tif replies.Get(0, &cqr) != nil || len(cqr) != 1 { // Note: Get returns an error if replies is nil\n\t\treturn -1\n\t}\n\tif cqr[0] > 0 {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\ntype LightDummyAPI struct{}\n\n// Etherbase is the address that mining rewards will be send to\nfunc (s *LightDummyAPI) Etherbase() (common.Address, error) {\n\treturn common.Address{}, fmt.Errorf(\"mining is not supported in light mode\")\n}\n\n// Coinbase is the address that mining rewards will be send to (alias for Etherbase)\nfunc (s *LightDummyAPI) Coinbase() (common.Address, error) {\n\treturn common.Address{}, fmt.Errorf(\"mining is not supported in light mode\")\n}\n\n// Hashrate returns the POW hashrate\nfunc (s *LightDummyAPI) Hashrate() hexutil.Uint {\n\treturn 0\n}\n\n// Mining returns an indication if this node is currently mining.\nfunc (s *LightDummyAPI) Mining() bool {\n\treturn false\n}\n\n// APIs returns the collection of RPC services the ethereum package offers.\n// NOTE, some of these services probably need to be moved to somewhere else.\nfunc (s *LightEthereum) APIs() []rpc.API {\n\tapis := ethapi.GetAPIs(s.ApiBackend)\n\tapis = append(apis, s.engine.APIs(s.BlockChain().HeaderChain())...)\n\treturn append(apis, []rpc.API{\n\t\t{\n\t\t\tNamespace: \"eth\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   &LightDummyAPI{},\n\t\t\tPublic:    true,\n\t\t}, {\n\t\t\tNamespace: \"eth\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   downloader.NewPublicDownloaderAPI(s.handler.downloader, s.eventMux),\n\t\t\tPublic:    true,\n\t\t}, {\n\t\t\tNamespace: \"eth\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   filters.NewPublicFilterAPI(s.ApiBackend, true, 5*time.Minute),\n\t\t\tPublic:    true,\n\t\t}, {\n\t\t\tNamespace: \"net\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   s.netRPCService,\n\t\t\tPublic:    true,\n\t\t}, {\n\t\t\tNamespace: \"les\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   NewPrivateLightAPI(&s.lesCommons),\n\t\t\tPublic:    false,\n\t\t}, {\n\t\t\tNamespace: \"vflux\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   s.serverPool.API(),\n\t\t\tPublic:    false,\n\t\t},\n\t}...)\n}\n\nfunc (s *LightEthereum) ResetWithGenesisBlock(gb *types.Block) {\n\ts.blockchain.ResetWithGenesisBlock(gb)\n}\n\nfunc (s *LightEthereum) BlockChain() *light.LightChain      { return s.blockchain }\nfunc (s *LightEthereum) TxPool() *light.TxPool              { return s.txPool }\nfunc (s *LightEthereum) Engine() consensus.Engine           { return s.engine }\nfunc (s *LightEthereum) LesVersion() int                    { return int(ClientProtocolVersions[0]) }\nfunc (s *LightEthereum) Downloader() *downloader.Downloader { return s.handler.downloader }\nfunc (s *LightEthereum) EventMux() *event.TypeMux           { return s.eventMux }\n\n// Protocols returns all the currently configured network protocols to start.\nfunc (s *LightEthereum) Protocols() []p2p.Protocol {\n\treturn s.makeProtocols(ClientProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} {\n\t\tif p := s.peers.peer(id.String()); p != nil {\n\t\t\treturn p.Info()\n\t\t}\n\t\treturn nil\n\t}, s.serverPoolIterator)\n}\n\n// Start implements node.Lifecycle, starting all internal goroutines needed by the\n// light ethereum protocol implementation.\nfunc (s *LightEthereum) Start() error {\n\tlog.Warn(\"Light client mode is an experimental feature\")\n\n\tif s.udpEnabled && s.p2pServer.DiscV5 == nil {\n\t\ts.udpEnabled = false\n\t\tlog.Error(\"Discovery v5 is not initialized\")\n\t}\n\tdiscovery, err := s.setupDiscovery()\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.serverPool.AddSource(discovery)\n\ts.serverPool.Start()\n\t// Start bloom request workers.\n\ts.wg.Add(bloomServiceThreads)\n\ts.startBloomHandlers(params.BloomBitsBlocksClient)\n\ts.handler.start()\n\n\treturn nil\n}\n\n// Stop implements node.Lifecycle, terminating all internal goroutines used by the\n// Ethereum protocol.\nfunc (s *LightEthereum) Stop() error {\n\tclose(s.closeCh)\n\ts.serverPool.Stop()\n\ts.peers.close()\n\ts.reqDist.close()\n\ts.odr.Stop()\n\ts.relay.Stop()\n\ts.bloomIndexer.Close()\n\ts.chtIndexer.Close()\n\ts.blockchain.Stop()\n\ts.handler.stop()\n\ts.txPool.Stop()\n\ts.engine.Close()\n\ts.pruner.close()\n\ts.eventMux.Stop()\n\trawdb.PopUncleanShutdownMarker(s.chainDb)\n\ts.chainDb.Close()\n\ts.lesDb.Close()\n\ts.wg.Wait()\n\tlog.Info(\"Light ethereum stopped\")\n\treturn nil\n}\n"
  },
  {
    "path": "les/client_handler.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"context\"\n\t\"math/big\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/core/forkid\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/params\"\n)\n\n// clientHandler is responsible for receiving and processing all incoming server\n// responses.\ntype clientHandler struct {\n\tulc        *ulc\n\tforkFilter forkid.Filter\n\tcheckpoint *params.TrustedCheckpoint\n\tfetcher    *lightFetcher\n\tdownloader *downloader.Downloader\n\tbackend    *LightEthereum\n\n\tcloseCh chan struct{}\n\twg      sync.WaitGroup // WaitGroup used to track all connected peers.\n\n\t// Hooks used in the testing\n\tsyncStart func(header *types.Header) // Hook called when the syncing is started\n\tsyncEnd   func(header *types.Header) // Hook called when the syncing is done\n}\n\nfunc newClientHandler(ulcServers []string, ulcFraction int, checkpoint *params.TrustedCheckpoint, backend *LightEthereum) *clientHandler {\n\thandler := &clientHandler{\n\t\tforkFilter: forkid.NewFilter(backend.blockchain),\n\t\tcheckpoint: checkpoint,\n\t\tbackend:    backend,\n\t\tcloseCh:    make(chan struct{}),\n\t}\n\tif ulcServers != nil {\n\t\tulc, err := newULC(ulcServers, ulcFraction)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Failed to initialize ultra light client\")\n\t\t}\n\t\thandler.ulc = ulc\n\t\tlog.Info(\"Enable ultra light client mode\")\n\t}\n\tvar height uint64\n\tif checkpoint != nil {\n\t\theight = (checkpoint.SectionIndex+1)*params.CHTFrequency - 1\n\t}\n\thandler.fetcher = newLightFetcher(backend.blockchain, backend.engine, backend.peers, handler.ulc, backend.chainDb, backend.reqDist, handler.synchronise)\n\thandler.downloader = downloader.New(height, backend.chainDb, nil, backend.eventMux, nil, backend.blockchain, handler.removePeer)\n\thandler.backend.peers.subscribe((*downloaderPeerNotify)(handler))\n\treturn handler\n}\n\nfunc (h *clientHandler) start() {\n\th.fetcher.start()\n}\n\nfunc (h *clientHandler) stop() {\n\tclose(h.closeCh)\n\th.downloader.Terminate()\n\th.fetcher.stop()\n\th.wg.Wait()\n}\n\n// runPeer is the p2p protocol run function for the given version.\nfunc (h *clientHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error {\n\ttrusted := false\n\tif h.ulc != nil {\n\t\ttrusted = h.ulc.trusted(p.ID())\n\t}\n\tpeer := newServerPeer(int(version), h.backend.config.NetworkId, trusted, p, newMeteredMsgWriter(rw, int(version)))\n\tdefer peer.close()\n\th.wg.Add(1)\n\tdefer h.wg.Done()\n\terr := h.handle(peer)\n\treturn err\n}\n\nfunc (h *clientHandler) handle(p *serverPeer) error {\n\tif h.backend.peers.len() >= h.backend.config.LightPeers && !p.Peer.Info().Network.Trusted {\n\t\treturn p2p.DiscTooManyPeers\n\t}\n\tp.Log().Debug(\"Light Ethereum peer connected\", \"name\", p.Name())\n\n\t// Execute the LES handshake\n\tforkid := forkid.NewID(h.backend.blockchain.Config(), h.backend.genesis, h.backend.blockchain.CurrentHeader().Number.Uint64())\n\tif err := p.Handshake(h.backend.blockchain.Genesis().Hash(), forkid, h.forkFilter); err != nil {\n\t\tp.Log().Debug(\"Light Ethereum handshake failed\", \"err\", err)\n\t\treturn err\n\t}\n\t// Register peer with the server pool\n\tif h.backend.serverPool != nil {\n\t\tif nvt, err := h.backend.serverPool.RegisterNode(p.Node()); err == nil {\n\t\t\tp.setValueTracker(nvt)\n\t\t\tp.updateVtParams()\n\t\t\tdefer func() {\n\t\t\t\tp.setValueTracker(nil)\n\t\t\t\th.backend.serverPool.UnregisterNode(p.Node())\n\t\t\t}()\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\t// Register the peer locally\n\tif err := h.backend.peers.register(p); err != nil {\n\t\tp.Log().Error(\"Light Ethereum peer registration failed\", \"err\", err)\n\t\treturn err\n\t}\n\n\tserverConnectionGauge.Update(int64(h.backend.peers.len()))\n\n\tconnectedAt := mclock.Now()\n\tdefer func() {\n\t\th.backend.peers.unregister(p.id)\n\t\tconnectionTimer.Update(time.Duration(mclock.Now() - connectedAt))\n\t\tserverConnectionGauge.Update(int64(h.backend.peers.len()))\n\t}()\n\th.fetcher.announce(p, &announceData{Hash: p.headInfo.Hash, Number: p.headInfo.Number, Td: p.headInfo.Td})\n\n\t// Mark the peer starts to be served.\n\tatomic.StoreUint32(&p.serving, 1)\n\tdefer atomic.StoreUint32(&p.serving, 0)\n\n\t// Spawn a main loop to handle all incoming messages.\n\tfor {\n\t\tif err := h.handleMsg(p); err != nil {\n\t\t\tp.Log().Debug(\"Light Ethereum message handling failed\", \"err\", err)\n\t\t\tp.fcServer.DumpLogs()\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n// handleMsg is invoked whenever an inbound message is received from a remote\n// peer. The remote connection is torn down upon returning any error.\nfunc (h *clientHandler) handleMsg(p *serverPeer) error {\n\t// Read the next message from the remote peer, and ensure it's fully consumed\n\tmsg, err := p.rw.ReadMsg()\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.Log().Trace(\"Light Ethereum message arrived\", \"code\", msg.Code, \"bytes\", msg.Size)\n\n\tif msg.Size > ProtocolMaxMsgSize {\n\t\treturn errResp(ErrMsgTooLarge, \"%v > %v\", msg.Size, ProtocolMaxMsgSize)\n\t}\n\tdefer msg.Discard()\n\n\tvar deliverMsg *Msg\n\n\t// Handle the message depending on its contents\n\tswitch {\n\tcase msg.Code == AnnounceMsg:\n\t\tp.Log().Trace(\"Received announce message\")\n\t\tvar req announceData\n\t\tif err := msg.Decode(&req); err != nil {\n\t\t\treturn errResp(ErrDecode, \"%v: %v\", msg, err)\n\t\t}\n\t\tif err := req.sanityCheck(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tupdate, size := req.Update.decode()\n\t\tif p.rejectUpdate(size) {\n\t\t\treturn errResp(ErrRequestRejected, \"\")\n\t\t}\n\t\tp.updateFlowControl(update)\n\t\tp.updateVtParams()\n\n\t\tif req.Hash != (common.Hash{}) {\n\t\t\tif p.announceType == announceTypeNone {\n\t\t\t\treturn errResp(ErrUnexpectedResponse, \"\")\n\t\t\t}\n\t\t\tif p.announceType == announceTypeSigned {\n\t\t\t\tif err := req.checkSignature(p.ID(), update); err != nil {\n\t\t\t\t\tp.Log().Trace(\"Invalid announcement signature\", \"err\", err)\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tp.Log().Trace(\"Valid announcement signature\")\n\t\t\t}\n\t\t\tp.Log().Trace(\"Announce message content\", \"number\", req.Number, \"hash\", req.Hash, \"td\", req.Td, \"reorg\", req.ReorgDepth)\n\n\t\t\t// Update peer head information first and then notify the announcement\n\t\t\tp.updateHead(req.Hash, req.Number, req.Td)\n\t\t\th.fetcher.announce(p, &req)\n\t\t}\n\tcase msg.Code == BlockHeadersMsg:\n\t\tp.Log().Trace(\"Received block header response message\")\n\t\tvar resp struct {\n\t\t\tReqID, BV uint64\n\t\t\tHeaders   []*types.Header\n\t\t}\n\t\tif err := msg.Decode(&resp); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\theaders := resp.Headers\n\t\tp.fcServer.ReceivedReply(resp.ReqID, resp.BV)\n\t\tp.answeredRequest(resp.ReqID)\n\n\t\t// Filter out the explicitly requested header by the retriever\n\t\tif h.backend.retriever.requested(resp.ReqID) {\n\t\t\tdeliverMsg = &Msg{\n\t\t\t\tMsgType: MsgBlockHeaders,\n\t\t\t\tReqID:   resp.ReqID,\n\t\t\t\tObj:     resp.Headers,\n\t\t\t}\n\t\t} else {\n\t\t\t// Filter out any explicitly requested headers, deliver the rest to the downloader\n\t\t\tfilter := len(headers) == 1\n\t\t\tif filter {\n\t\t\t\theaders = h.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers)\n\t\t\t}\n\t\t\tif len(headers) != 0 || !filter {\n\t\t\t\tif err := h.downloader.DeliverHeaders(p.id, headers); err != nil {\n\t\t\t\t\tlog.Debug(\"Failed to deliver headers\", \"err\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase msg.Code == BlockBodiesMsg:\n\t\tp.Log().Trace(\"Received block bodies response\")\n\t\tvar resp struct {\n\t\t\tReqID, BV uint64\n\t\t\tData      []*types.Body\n\t\t}\n\t\tif err := msg.Decode(&resp); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ReceivedReply(resp.ReqID, resp.BV)\n\t\tp.answeredRequest(resp.ReqID)\n\t\tdeliverMsg = &Msg{\n\t\t\tMsgType: MsgBlockBodies,\n\t\t\tReqID:   resp.ReqID,\n\t\t\tObj:     resp.Data,\n\t\t}\n\tcase msg.Code == CodeMsg:\n\t\tp.Log().Trace(\"Received code response\")\n\t\tvar resp struct {\n\t\t\tReqID, BV uint64\n\t\t\tData      [][]byte\n\t\t}\n\t\tif err := msg.Decode(&resp); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ReceivedReply(resp.ReqID, resp.BV)\n\t\tp.answeredRequest(resp.ReqID)\n\t\tdeliverMsg = &Msg{\n\t\t\tMsgType: MsgCode,\n\t\t\tReqID:   resp.ReqID,\n\t\t\tObj:     resp.Data,\n\t\t}\n\tcase msg.Code == ReceiptsMsg:\n\t\tp.Log().Trace(\"Received receipts response\")\n\t\tvar resp struct {\n\t\t\tReqID, BV uint64\n\t\t\tReceipts  []types.Receipts\n\t\t}\n\t\tif err := msg.Decode(&resp); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ReceivedReply(resp.ReqID, resp.BV)\n\t\tp.answeredRequest(resp.ReqID)\n\t\tdeliverMsg = &Msg{\n\t\t\tMsgType: MsgReceipts,\n\t\t\tReqID:   resp.ReqID,\n\t\t\tObj:     resp.Receipts,\n\t\t}\n\tcase msg.Code == ProofsV2Msg:\n\t\tp.Log().Trace(\"Received les/2 proofs response\")\n\t\tvar resp struct {\n\t\t\tReqID, BV uint64\n\t\t\tData      light.NodeList\n\t\t}\n\t\tif err := msg.Decode(&resp); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ReceivedReply(resp.ReqID, resp.BV)\n\t\tp.answeredRequest(resp.ReqID)\n\t\tdeliverMsg = &Msg{\n\t\t\tMsgType: MsgProofsV2,\n\t\t\tReqID:   resp.ReqID,\n\t\t\tObj:     resp.Data,\n\t\t}\n\tcase msg.Code == HelperTrieProofsMsg:\n\t\tp.Log().Trace(\"Received helper trie proof response\")\n\t\tvar resp struct {\n\t\t\tReqID, BV uint64\n\t\t\tData      HelperTrieResps\n\t\t}\n\t\tif err := msg.Decode(&resp); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ReceivedReply(resp.ReqID, resp.BV)\n\t\tp.answeredRequest(resp.ReqID)\n\t\tdeliverMsg = &Msg{\n\t\t\tMsgType: MsgHelperTrieProofs,\n\t\t\tReqID:   resp.ReqID,\n\t\t\tObj:     resp.Data,\n\t\t}\n\tcase msg.Code == TxStatusMsg:\n\t\tp.Log().Trace(\"Received tx status response\")\n\t\tvar resp struct {\n\t\t\tReqID, BV uint64\n\t\t\tStatus    []light.TxStatus\n\t\t}\n\t\tif err := msg.Decode(&resp); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ReceivedReply(resp.ReqID, resp.BV)\n\t\tp.answeredRequest(resp.ReqID)\n\t\tdeliverMsg = &Msg{\n\t\t\tMsgType: MsgTxStatus,\n\t\t\tReqID:   resp.ReqID,\n\t\t\tObj:     resp.Status,\n\t\t}\n\tcase msg.Code == StopMsg && p.version >= lpv3:\n\t\tp.freeze()\n\t\th.backend.retriever.frozen(p)\n\t\tp.Log().Debug(\"Service stopped\")\n\tcase msg.Code == ResumeMsg && p.version >= lpv3:\n\t\tvar bv uint64\n\t\tif err := msg.Decode(&bv); err != nil {\n\t\t\treturn errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t}\n\t\tp.fcServer.ResumeFreeze(bv)\n\t\tp.unfreeze()\n\t\tp.Log().Debug(\"Service resumed\")\n\tdefault:\n\t\tp.Log().Trace(\"Received invalid message\", \"code\", msg.Code)\n\t\treturn errResp(ErrInvalidMsgCode, \"%v\", msg.Code)\n\t}\n\t// Deliver the received response to retriever.\n\tif deliverMsg != nil {\n\t\tif err := h.backend.retriever.deliver(p, deliverMsg); err != nil {\n\t\t\tif val := p.errCount.Add(1, mclock.Now()); val > maxResponseErrors {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (h *clientHandler) removePeer(id string) {\n\th.backend.peers.unregister(id)\n}\n\ntype peerConnection struct {\n\thandler *clientHandler\n\tpeer    *serverPeer\n}\n\nfunc (pc *peerConnection) Head() (common.Hash, *big.Int) {\n\treturn pc.peer.HeadAndTd()\n}\n\nfunc (pc *peerConnection) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error {\n\trq := &distReq{\n\t\tgetCost: func(dp distPeer) uint64 {\n\t\t\tpeer := dp.(*serverPeer)\n\t\t\treturn peer.getRequestCost(GetBlockHeadersMsg, amount)\n\t\t},\n\t\tcanSend: func(dp distPeer) bool {\n\t\t\treturn dp.(*serverPeer) == pc.peer\n\t\t},\n\t\trequest: func(dp distPeer) func() {\n\t\t\treqID := genReqID()\n\t\t\tpeer := dp.(*serverPeer)\n\t\t\tcost := peer.getRequestCost(GetBlockHeadersMsg, amount)\n\t\t\tpeer.fcServer.QueuedRequest(reqID, cost)\n\t\t\treturn func() { peer.requestHeadersByHash(reqID, origin, amount, skip, reverse) }\n\t\t},\n\t}\n\t_, ok := <-pc.handler.backend.reqDist.queue(rq)\n\tif !ok {\n\t\treturn light.ErrNoPeers\n\t}\n\treturn nil\n}\n\nfunc (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error {\n\trq := &distReq{\n\t\tgetCost: func(dp distPeer) uint64 {\n\t\t\tpeer := dp.(*serverPeer)\n\t\t\treturn peer.getRequestCost(GetBlockHeadersMsg, amount)\n\t\t},\n\t\tcanSend: func(dp distPeer) bool {\n\t\t\treturn dp.(*serverPeer) == pc.peer\n\t\t},\n\t\trequest: func(dp distPeer) func() {\n\t\t\treqID := genReqID()\n\t\t\tpeer := dp.(*serverPeer)\n\t\t\tcost := peer.getRequestCost(GetBlockHeadersMsg, amount)\n\t\t\tpeer.fcServer.QueuedRequest(reqID, cost)\n\t\t\treturn func() { peer.requestHeadersByNumber(reqID, origin, amount, skip, reverse) }\n\t\t},\n\t}\n\t_, ok := <-pc.handler.backend.reqDist.queue(rq)\n\tif !ok {\n\t\treturn light.ErrNoPeers\n\t}\n\treturn nil\n}\n\n// RetrieveSingleHeaderByNumber requests a single header by the specified block\n// number. This function will wait the response until it's timeout or delivered.\nfunc (pc *peerConnection) RetrieveSingleHeaderByNumber(context context.Context, number uint64) (*types.Header, error) {\n\treqID := genReqID()\n\trq := &distReq{\n\t\tgetCost: func(dp distPeer) uint64 {\n\t\t\tpeer := dp.(*serverPeer)\n\t\t\treturn peer.getRequestCost(GetBlockHeadersMsg, 1)\n\t\t},\n\t\tcanSend: func(dp distPeer) bool {\n\t\t\treturn dp.(*serverPeer) == pc.peer\n\t\t},\n\t\trequest: func(dp distPeer) func() {\n\t\t\tpeer := dp.(*serverPeer)\n\t\t\tcost := peer.getRequestCost(GetBlockHeadersMsg, 1)\n\t\t\tpeer.fcServer.QueuedRequest(reqID, cost)\n\t\t\treturn func() { peer.requestHeadersByNumber(reqID, number, 1, 0, false) }\n\t\t},\n\t}\n\tvar header *types.Header\n\tif err := pc.handler.backend.retriever.retrieve(context, reqID, rq, func(peer distPeer, msg *Msg) error {\n\t\tif msg.MsgType != MsgBlockHeaders {\n\t\t\treturn errInvalidMessageType\n\t\t}\n\t\theaders := msg.Obj.([]*types.Header)\n\t\tif len(headers) != 1 {\n\t\t\treturn errInvalidEntryCount\n\t\t}\n\t\theader = headers[0]\n\t\treturn nil\n\t}, nil); err != nil {\n\t\treturn nil, err\n\t}\n\treturn header, nil\n}\n\n// downloaderPeerNotify implements peerSetNotify\ntype downloaderPeerNotify clientHandler\n\nfunc (d *downloaderPeerNotify) registerPeer(p *serverPeer) {\n\th := (*clientHandler)(d)\n\tpc := &peerConnection{\n\t\thandler: h,\n\t\tpeer:    p,\n\t}\n\th.downloader.RegisterLightPeer(p.id, ethVersion, pc)\n}\n\nfunc (d *downloaderPeerNotify) unregisterPeer(p *serverPeer) {\n\th := (*clientHandler)(d)\n\th.downloader.UnregisterPeer(p.id)\n}\n"
  },
  {
    "path": "les/clientpool.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n\t\"github.com/ethereum/go-ethereum/les/vflux\"\n\tvfs \"github.com/ethereum/go-ethereum/les/vflux/server\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/enr\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\nconst (\n\tdefaultNegExpTC = 3600 // default time constant (in seconds) for exponentially reducing negative balance\n\n\t// defaultConnectedBias is applied to already connected clients So that\n\t// already connected client won't be kicked out very soon and we\n\t// can ensure all connected clients can have enough time to request\n\t// or sync some data.\n\t//\n\t// todo(rjl493456442) make it configurable. It can be the option of\n\t// free trial time!\n\tdefaultConnectedBias = time.Minute * 3\n\tinactiveTimeout      = time.Second * 10\n)\n\n// clientPool implements a client database that assigns a priority to each client\n// based on a positive and negative balance. Positive balance is externally assigned\n// to prioritized clients and is decreased with connection time and processed\n// requests (unless the price factors are zero). If the positive balance is zero\n// then negative balance is accumulated.\n//\n// Balance tracking and priority calculation for connected clients is done by\n// balanceTracker. activeQueue ensures that clients with the lowest positive or\n// highest negative balance get evicted when the total capacity allowance is full\n// and new clients with a better balance want to connect.\n//\n// Already connected nodes receive a small bias in their favor in order to avoid\n// accepting and instantly kicking out clients. In theory, we try to ensure that\n// each client can have several minutes of connection time.\n//\n// Balances of disconnected clients are stored in nodeDB including positive balance\n// and negative banalce. Boeth positive balance and negative balance will decrease\n// exponentially. If the balance is low enough, then the record will be dropped.\ntype clientPool struct {\n\tvfs.BalanceTrackerSetup\n\tvfs.PriorityPoolSetup\n\tlock       sync.Mutex\n\tclock      mclock.Clock\n\tclosed     bool\n\tremovePeer func(enode.ID)\n\tsynced     func() bool\n\tns         *nodestate.NodeStateMachine\n\tpp         *vfs.PriorityPool\n\tbt         *vfs.BalanceTracker\n\n\tdefaultPosFactors, defaultNegFactors vfs.PriceFactors\n\tposExpTC, negExpTC                   uint64\n\tminCap                               uint64 // The minimal capacity value allowed for any client\n\tconnectedBias                        time.Duration\n\tcapLimit                             uint64\n}\n\n// clientPoolPeer represents a client peer in the pool.\n// Positive balances are assigned to node key while negative balances are assigned\n// to freeClientId. Currently network IP address without port is used because\n// clients have a limited access to IP addresses while new node keys can be easily\n// generated so it would be useless to assign a negative value to them.\ntype clientPoolPeer interface {\n\tNode() *enode.Node\n\tfreeClientId() string\n\tupdateCapacity(uint64)\n\tfreeze()\n\tallowInactive() bool\n}\n\n// clientInfo defines all information required by clientpool.\ntype clientInfo struct {\n\tnode                *enode.Node\n\taddress             string\n\tpeer                clientPoolPeer\n\tconnected, priority bool\n\tconnectedAt         mclock.AbsTime\n\tbalance             *vfs.NodeBalance\n}\n\n// newClientPool creates a new client pool\nfunc newClientPool(ns *nodestate.NodeStateMachine, lesDb ethdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID), synced func() bool) *clientPool {\n\tpool := &clientPool{\n\t\tns:                  ns,\n\t\tBalanceTrackerSetup: balanceTrackerSetup,\n\t\tPriorityPoolSetup:   priorityPoolSetup,\n\t\tclock:               clock,\n\t\tminCap:              minCap,\n\t\tconnectedBias:       connectedBias,\n\t\tremovePeer:          removePeer,\n\t\tsynced:              synced,\n\t}\n\tpool.bt = vfs.NewBalanceTracker(ns, balanceTrackerSetup, lesDb, clock, &utils.Expirer{}, &utils.Expirer{})\n\tpool.pp = vfs.NewPriorityPool(ns, priorityPoolSetup, clock, minCap, connectedBias, 4)\n\n\t// set default expiration constants used by tests\n\t// Note: server overwrites this if token sale is active\n\tpool.bt.SetExpirationTCs(0, defaultNegExpTC)\n\n\tns.SubscribeState(pool.InactiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {\n\t\tif newState.Equals(pool.InactiveFlag) {\n\t\t\tns.AddTimeout(node, pool.InactiveFlag, inactiveTimeout)\n\t\t}\n\t\tif oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.InactiveFlag.Or(pool.PriorityFlag)) {\n\t\t\tns.SetStateSub(node, pool.InactiveFlag, nodestate.Flags{}, 0) // remove timeout\n\t\t}\n\t})\n\n\tns.SubscribeState(pool.ActiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {\n\t\tc, _ := ns.GetField(node, clientInfoField).(*clientInfo)\n\t\tif c == nil {\n\t\t\treturn\n\t\t}\n\t\tc.priority = newState.HasAll(pool.PriorityFlag)\n\t\tif newState.Equals(pool.ActiveFlag) {\n\t\t\tcap, _ := ns.GetField(node, pool.CapacityField).(uint64)\n\t\t\tif cap > minCap {\n\t\t\t\tpool.pp.RequestCapacity(node, minCap, 0, true)\n\t\t\t}\n\t\t}\n\t})\n\n\tns.SubscribeState(pool.InactiveFlag.Or(pool.ActiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {\n\t\tif oldState.IsEmpty() {\n\t\t\tclientConnectedMeter.Mark(1)\n\t\t\tlog.Debug(\"Client connected\", \"id\", node.ID())\n\t\t}\n\t\tif oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.ActiveFlag) {\n\t\t\tclientActivatedMeter.Mark(1)\n\t\t\tlog.Debug(\"Client activated\", \"id\", node.ID())\n\t\t}\n\t\tif oldState.Equals(pool.ActiveFlag) && newState.Equals(pool.InactiveFlag) {\n\t\t\tclientDeactivatedMeter.Mark(1)\n\t\t\tlog.Debug(\"Client deactivated\", \"id\", node.ID())\n\t\t\tc, _ := ns.GetField(node, clientInfoField).(*clientInfo)\n\t\t\tif c == nil || !c.peer.allowInactive() {\n\t\t\t\tpool.removePeer(node.ID())\n\t\t\t}\n\t\t}\n\t\tif newState.IsEmpty() {\n\t\t\tclientDisconnectedMeter.Mark(1)\n\t\t\tlog.Debug(\"Client disconnected\", \"id\", node.ID())\n\t\t\tpool.removePeer(node.ID())\n\t\t}\n\t})\n\n\tvar totalConnected uint64\n\tns.SubscribeField(pool.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {\n\t\toldCap, _ := oldValue.(uint64)\n\t\tnewCap, _ := newValue.(uint64)\n\t\ttotalConnected += newCap - oldCap\n\t\ttotalConnectedGauge.Update(int64(totalConnected))\n\t\tc, _ := ns.GetField(node, clientInfoField).(*clientInfo)\n\t\tif c != nil {\n\t\t\tc.peer.updateCapacity(newCap)\n\t\t}\n\t})\n\treturn pool\n}\n\n// stop shuts the client pool down\nfunc (f *clientPool) stop() {\n\tf.lock.Lock()\n\tf.closed = true\n\tf.lock.Unlock()\n\tf.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {\n\t\t// enforces saving all balances in BalanceTracker\n\t\tf.disconnectNode(node)\n\t})\n\tf.bt.Stop()\n}\n\n// connect should be called after a successful handshake. If the connection was\n// rejected, there is no need to call disconnect.\nfunc (f *clientPool) connect(peer clientPoolPeer) (uint64, error) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\t// Short circuit if clientPool is already closed.\n\tif f.closed {\n\t\treturn 0, fmt.Errorf(\"Client pool is already closed\")\n\t}\n\t// Dedup connected peers.\n\tnode, freeID := peer.Node(), peer.freeClientId()\n\tif f.ns.GetField(node, clientInfoField) != nil {\n\t\tlog.Debug(\"Client already connected\", \"address\", freeID, \"id\", node.ID().String())\n\t\treturn 0, fmt.Errorf(\"Client already connected address=%s id=%s\", freeID, node.ID().String())\n\t}\n\tnow := f.clock.Now()\n\tc := &clientInfo{\n\t\tnode:        node,\n\t\taddress:     freeID,\n\t\tpeer:        peer,\n\t\tconnected:   true,\n\t\tconnectedAt: now,\n\t}\n\tf.ns.SetField(node, clientInfoField, c)\n\tf.ns.SetField(node, connAddressField, freeID)\n\tif c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil {\n\t\tf.disconnect(peer)\n\t\treturn 0, nil\n\t}\n\tc.balance.SetPriceFactors(f.defaultPosFactors, f.defaultNegFactors)\n\n\tf.ns.SetState(node, f.InactiveFlag, nodestate.Flags{}, 0)\n\tvar allowed bool\n\tf.ns.Operation(func() {\n\t\t_, allowed = f.pp.RequestCapacity(node, f.minCap, f.connectedBias, true)\n\t})\n\tif allowed {\n\t\treturn f.minCap, nil\n\t}\n\tif !peer.allowInactive() {\n\t\tf.disconnect(peer)\n\t}\n\treturn 0, nil\n}\n\n// setConnectedBias sets the connection bias, which is applied to already connected clients\n// So that already connected client won't be kicked out very soon and we can ensure all\n// connected clients can have enough time to request or sync some data.\nfunc (f *clientPool) setConnectedBias(bias time.Duration) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tf.connectedBias = bias\n\tf.pp.SetActiveBias(bias)\n}\n\n// disconnect should be called when a connection is terminated. If the disconnection\n// was initiated by the pool itself using disconnectFn then calling disconnect is\n// not necessary but permitted.\nfunc (f *clientPool) disconnect(p clientPoolPeer) {\n\tf.disconnectNode(p.Node())\n}\n\n// disconnectNode removes node fields and flags related to connected status\nfunc (f *clientPool) disconnectNode(node *enode.Node) {\n\tf.ns.SetField(node, connAddressField, nil)\n\tf.ns.SetField(node, clientInfoField, nil)\n}\n\n// setDefaultFactors sets the default price factors applied to subsequently connected clients\nfunc (f *clientPool) setDefaultFactors(posFactors, negFactors vfs.PriceFactors) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tf.defaultPosFactors = posFactors\n\tf.defaultNegFactors = negFactors\n}\n\n// capacityInfo returns the total capacity allowance, the total capacity of connected\n// clients and the total capacity of connected and prioritized clients\nfunc (f *clientPool) capacityInfo() (uint64, uint64, uint64) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\t// total priority active cap will be supported when the token issuer module is added\n\t_, activeCap := f.pp.Active()\n\treturn f.capLimit, activeCap, 0\n}\n\n// setLimits sets the maximum number and total capacity of connected clients,\n// dropping some of them if necessary.\nfunc (f *clientPool) setLimits(totalConn int, totalCap uint64) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tf.capLimit = totalCap\n\tf.pp.SetLimits(uint64(totalConn), totalCap)\n}\n\n// setCapacity sets the assigned capacity of a connected client\nfunc (f *clientPool) setCapacity(node *enode.Node, freeID string, capacity uint64, bias time.Duration, setCap bool) (uint64, error) {\n\tc, _ := f.ns.GetField(node, clientInfoField).(*clientInfo)\n\tif c == nil {\n\t\tif setCap {\n\t\t\treturn 0, fmt.Errorf(\"client %064x is not connected\", node.ID())\n\t\t}\n\t\tc = &clientInfo{node: node}\n\t\tf.ns.SetField(node, clientInfoField, c)\n\t\tf.ns.SetField(node, connAddressField, freeID)\n\t\tif c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil {\n\t\t\tlog.Error(\"BalanceField is missing\", \"node\", node.ID())\n\t\t\treturn 0, fmt.Errorf(\"BalanceField of %064x is missing\", node.ID())\n\t\t}\n\t\tdefer func() {\n\t\t\tf.ns.SetField(node, connAddressField, nil)\n\t\t\tf.ns.SetField(node, clientInfoField, nil)\n\t\t}()\n\t}\n\tvar (\n\t\tminPriority int64\n\t\tallowed     bool\n\t)\n\tf.ns.Operation(func() {\n\t\tif !setCap || c.priority {\n\t\t\t// check clientInfo.priority inside Operation to ensure thread safety\n\t\t\tminPriority, allowed = f.pp.RequestCapacity(node, capacity, bias, setCap)\n\t\t}\n\t})\n\tif allowed {\n\t\treturn 0, nil\n\t}\n\tmissing := c.balance.PosBalanceMissing(minPriority, capacity, bias)\n\tif missing < 1 {\n\t\t// ensure that we never return 0 missing and insufficient priority error\n\t\tmissing = 1\n\t}\n\treturn missing, errNoPriority\n}\n\n// setCapacityLocked is the equivalent of setCapacity used when f.lock is already locked\nfunc (f *clientPool) setCapacityLocked(node *enode.Node, freeID string, capacity uint64, minConnTime time.Duration, setCap bool) (uint64, error) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\treturn f.setCapacity(node, freeID, capacity, minConnTime, setCap)\n}\n\n// forClients calls the supplied callback for either the listed node IDs or all connected\n// nodes. It passes a valid clientInfo to the callback and ensures that the necessary\n// fields and flags are set in order for BalanceTracker and PriorityPool to work even if\n// the node is not connected.\nfunc (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tif len(ids) == 0 {\n\t\tf.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {\n\t\t\tc, _ := f.ns.GetField(node, clientInfoField).(*clientInfo)\n\t\t\tif c != nil {\n\t\t\t\tcb(c)\n\t\t\t}\n\t\t})\n\t} else {\n\t\tfor _, id := range ids {\n\t\t\tnode := f.ns.GetNode(id)\n\t\t\tif node == nil {\n\t\t\t\tnode = enode.SignNull(&enr.Record{}, id)\n\t\t\t}\n\t\t\tc, _ := f.ns.GetField(node, clientInfoField).(*clientInfo)\n\t\t\tif c != nil {\n\t\t\t\tcb(c)\n\t\t\t} else {\n\t\t\t\tc = &clientInfo{node: node}\n\t\t\t\tf.ns.SetField(node, clientInfoField, c)\n\t\t\t\tf.ns.SetField(node, connAddressField, \"\")\n\t\t\t\tif c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance != nil {\n\t\t\t\t\tcb(c)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Error(\"BalanceField is missing\")\n\t\t\t\t}\n\t\t\t\tf.ns.SetField(node, connAddressField, nil)\n\t\t\t\tf.ns.SetField(node, clientInfoField, nil)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// serveCapQuery serves a vflux capacity query. It receives multiple token amount values\n// and a bias time value. For each given token amount it calculates the maximum achievable\n// capacity in case the amount is added to the balance.\nfunc (f *clientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []byte {\n\tvar req vflux.CapacityQueryReq\n\tif rlp.DecodeBytes(data, &req) != nil {\n\t\treturn nil\n\t}\n\tif l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen {\n\t\treturn nil\n\t}\n\tresult := make(vflux.CapacityQueryReply, len(req.AddTokens))\n\tif !f.synced() {\n\t\tcapacityQueryZeroMeter.Mark(1)\n\t\treply, _ := rlp.EncodeToBytes(&result)\n\t\treturn reply\n\t}\n\n\tnode := f.ns.GetNode(id)\n\tif node == nil {\n\t\tnode = enode.SignNull(&enr.Record{}, id)\n\t}\n\tc, _ := f.ns.GetField(node, clientInfoField).(*clientInfo)\n\tif c == nil {\n\t\tc = &clientInfo{node: node}\n\t\tf.ns.SetField(node, clientInfoField, c)\n\t\tf.ns.SetField(node, connAddressField, freeID)\n\t\tdefer func() {\n\t\t\tf.ns.SetField(node, connAddressField, nil)\n\t\t\tf.ns.SetField(node, clientInfoField, nil)\n\t\t}()\n\t\tif c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil {\n\t\t\tlog.Error(\"BalanceField is missing\", \"node\", node.ID())\n\t\t\treturn nil\n\t\t}\n\t}\n\t// use vfs.CapacityCurve to answer request for multiple newly bought token amounts\n\tcurve := f.pp.GetCapacityCurve().Exclude(id)\n\tbias := time.Second * time.Duration(req.Bias)\n\tif f.connectedBias > bias {\n\t\tbias = f.connectedBias\n\t}\n\tpb, _ := c.balance.GetBalance()\n\tfor i, addTokens := range req.AddTokens {\n\t\tadd := addTokens.Int64()\n\t\tresult[i] = curve.MaxCapacity(func(capacity uint64) int64 {\n\t\t\treturn c.balance.EstimatePriority(capacity, add, 0, bias, false) / int64(capacity)\n\t\t})\n\t\tif add <= 0 && uint64(-add) >= pb && result[i] > f.minCap {\n\t\t\tresult[i] = f.minCap\n\t\t}\n\t\tif result[i] < f.minCap {\n\t\t\tresult[i] = 0\n\t\t}\n\t}\n\t// add first result to metrics (don't care about priority client multi-queries yet)\n\tif result[0] == 0 {\n\t\tcapacityQueryZeroMeter.Mark(1)\n\t} else {\n\t\tcapacityQueryNonZeroMeter.Mark(1)\n\t}\n\treply, _ := rlp.EncodeToBytes(&result)\n\treturn reply\n}\n"
  },
  {
    "path": "les/clientpool_test.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\tvfs \"github.com/ethereum/go-ethereum/les/vflux/server\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/enr\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n)\n\nfunc TestClientPoolL10C100Free(t *testing.T) {\n\ttestClientPool(t, 10, 100, 0, true)\n}\n\nfunc TestClientPoolL40C200Free(t *testing.T) {\n\ttestClientPool(t, 40, 200, 0, true)\n}\n\nfunc TestClientPoolL100C300Free(t *testing.T) {\n\ttestClientPool(t, 100, 300, 0, true)\n}\n\nfunc TestClientPoolL10C100P4(t *testing.T) {\n\ttestClientPool(t, 10, 100, 4, false)\n}\n\nfunc TestClientPoolL40C200P30(t *testing.T) {\n\ttestClientPool(t, 40, 200, 30, false)\n}\n\nfunc TestClientPoolL100C300P20(t *testing.T) {\n\ttestClientPool(t, 100, 300, 20, false)\n}\n\nconst testClientPoolTicks = 100000\n\ntype poolTestPeer struct {\n\tnode            *enode.Node\n\tindex           int\n\tdisconnCh       chan int\n\tcap             uint64\n\tinactiveAllowed bool\n}\n\nfunc testStateMachine() *nodestate.NodeStateMachine {\n\treturn nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup)\n\n}\n\nfunc newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer {\n\treturn &poolTestPeer{\n\t\tindex:     i,\n\t\tdisconnCh: disconnCh,\n\t\tnode:      enode.SignNull(&enr.Record{}, enode.ID{byte(i % 256), byte(i >> 8)}),\n\t}\n}\n\nfunc (i *poolTestPeer) Node() *enode.Node {\n\treturn i.node\n}\n\nfunc (i *poolTestPeer) freeClientId() string {\n\treturn fmt.Sprintf(\"addr #%d\", i.index)\n}\n\nfunc (i *poolTestPeer) updateCapacity(cap uint64) {\n\ti.cap = cap\n}\n\nfunc (i *poolTestPeer) freeze() {}\n\nfunc (i *poolTestPeer) allowInactive() bool {\n\treturn i.inactiveAllowed\n}\n\nfunc getBalance(pool *clientPool, p *poolTestPeer) (pos, neg uint64) {\n\ttemp := pool.ns.GetField(p.node, clientInfoField) == nil\n\tif temp {\n\t\tpool.ns.SetField(p.node, connAddressField, p.freeClientId())\n\t}\n\tn, _ := pool.ns.GetField(p.node, pool.BalanceField).(*vfs.NodeBalance)\n\tpos, neg = n.GetBalance()\n\tif temp {\n\t\tpool.ns.SetField(p.node, connAddressField, nil)\n\t}\n\treturn\n}\n\nfunc addBalance(pool *clientPool, id enode.ID, amount int64) {\n\tpool.forClients([]enode.ID{id}, func(c *clientInfo) {\n\t\tc.balance.AddBalance(amount)\n\t})\n}\n\nfunc checkDiff(a, b uint64) bool {\n\tmaxDiff := (a + b) / 2000\n\tif maxDiff < 1 {\n\t\tmaxDiff = 1\n\t}\n\treturn a > b+maxDiff || b > a+maxDiff\n}\n\nfunc testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) {\n\trand.Seed(time.Now().UnixNano())\n\tvar (\n\t\tclock     mclock.Simulated\n\t\tdb        = rawdb.NewMemoryDatabase()\n\t\tconnected = make([]bool, clientCount)\n\t\tconnTicks = make([]int, clientCount)\n\t\tdisconnCh = make(chan int, clientCount)\n\t\tdisconnFn = func(id enode.ID) {\n\t\t\tdisconnCh <- int(id[0]) + int(id[1])<<8\n\t\t}\n\t\tpool = newClientPool(testStateMachine(), db, 1, 0, &clock, disconnFn, alwaysTrueFn)\n\t)\n\tpool.ns.Start()\n\n\tpool.setLimits(activeLimit, uint64(activeLimit))\n\tpool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})\n\n\t// pool should accept new peers up to its connected limit\n\tfor i := 0; i < activeLimit; i++ {\n\t\tif cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 {\n\t\t\tconnected[i] = true\n\t\t} else {\n\t\t\tt.Fatalf(\"Test peer #%d rejected\", i)\n\t\t}\n\t}\n\t// randomly connect and disconnect peers, expect to have a similar total connection time at the end\n\tfor tickCounter := 0; tickCounter < testClientPoolTicks; tickCounter++ {\n\t\tclock.Run(1 * time.Second)\n\n\t\tif tickCounter == testClientPoolTicks/4 {\n\t\t\t// give a positive balance to some of the peers\n\t\t\tamount := testClientPoolTicks / 2 * int64(time.Second) // enough for half of the simulation period\n\t\t\tfor i := 0; i < paidCount; i++ {\n\t\t\t\taddBalance(pool, newPoolTestPeer(i, disconnCh).node.ID(), amount)\n\t\t\t}\n\t\t}\n\n\t\ti := rand.Intn(clientCount)\n\t\tif connected[i] {\n\t\t\tif randomDisconnect {\n\t\t\t\tpool.disconnect(newPoolTestPeer(i, disconnCh))\n\t\t\t\tconnected[i] = false\n\t\t\t\tconnTicks[i] += tickCounter\n\t\t\t}\n\t\t} else {\n\t\t\tif cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 {\n\t\t\t\tconnected[i] = true\n\t\t\t\tconnTicks[i] -= tickCounter\n\t\t\t} else {\n\t\t\t\tpool.disconnect(newPoolTestPeer(i, disconnCh))\n\t\t\t}\n\t\t}\n\tpollDisconnects:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase i := <-disconnCh:\n\t\t\t\tpool.disconnect(newPoolTestPeer(i, disconnCh))\n\t\t\t\tif connected[i] {\n\t\t\t\t\tconnTicks[i] += tickCounter\n\t\t\t\t\tconnected[i] = false\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tbreak pollDisconnects\n\t\t\t}\n\t\t}\n\t}\n\n\texpTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2*(activeLimit-paidCount)/(clientCount-paidCount)\n\texpMin := expTicks - expTicks/5\n\texpMax := expTicks + expTicks/5\n\tpaidTicks := testClientPoolTicks/2*activeLimit/clientCount + testClientPoolTicks/2\n\tpaidMin := paidTicks - paidTicks/5\n\tpaidMax := paidTicks + paidTicks/5\n\n\t// check if the total connected time of peers are all in the expected range\n\tfor i, c := range connected {\n\t\tif c {\n\t\t\tconnTicks[i] += testClientPoolTicks\n\t\t}\n\t\tmin, max := expMin, expMax\n\t\tif i < paidCount {\n\t\t\t// expect a higher amount for clients with a positive balance\n\t\t\tmin, max = paidMin, paidMax\n\t\t}\n\t\tif connTicks[i] < min || connTicks[i] > max {\n\t\t\tt.Errorf(\"Total connected time of test node #%d (%d) outside expected range (%d to %d)\", i, connTicks[i], min, max)\n\t\t}\n\t}\n\tpool.stop()\n}\n\nfunc testPriorityConnect(t *testing.T, pool *clientPool, p *poolTestPeer, cap uint64, expSuccess bool) {\n\tif cap, _ := pool.connect(p); cap == 0 {\n\t\tif expSuccess {\n\t\t\tt.Fatalf(\"Failed to connect paid client\")\n\t\t} else {\n\t\t\treturn\n\t\t}\n\t}\n\tif _, err := pool.setCapacity(p.node, \"\", cap, defaultConnectedBias, true); err != nil {\n\t\tif expSuccess {\n\t\t\tt.Fatalf(\"Failed to raise capacity of paid client\")\n\t\t} else {\n\t\t\treturn\n\t\t}\n\t}\n\tif !expSuccess {\n\t\tt.Fatalf(\"Should reject high capacity paid client\")\n\t}\n}\n\nfunc TestConnectPaidClient(t *testing.T) {\n\tvar (\n\t\tclock mclock.Simulated\n\t\tdb    = rawdb.NewMemoryDatabase()\n\t)\n\tpool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}, alwaysTrueFn)\n\tpool.ns.Start()\n\tdefer pool.stop()\n\tpool.setLimits(10, uint64(10))\n\tpool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})\n\n\t// Add balance for an external client and mark it as paid client\n\taddBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))\n\ttestPriorityConnect(t, pool, newPoolTestPeer(0, nil), 10, true)\n}\n\nfunc TestConnectPaidClientToSmallPool(t *testing.T) {\n\tvar (\n\t\tclock mclock.Simulated\n\t\tdb    = rawdb.NewMemoryDatabase()\n\t)\n\tpool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}, alwaysTrueFn)\n\tpool.ns.Start()\n\tdefer pool.stop()\n\tpool.setLimits(10, uint64(10)) // Total capacity limit is 10\n\tpool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})\n\n\t// Add balance for an external client and mark it as paid client\n\taddBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute))\n\n\t// Connect a fat paid client to pool, should reject it.\n\ttestPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false)\n}\n\nfunc TestConnectPaidClientToFullPool(t *testing.T) {\n\tvar (\n\t\tclock mclock.Simulated\n\t\tdb    = rawdb.NewMemoryDatabase()\n\t)\n\tremoveFn := func(enode.ID) {} // Noop\n\tpool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn, alwaysTrueFn)\n\tpool.ns.Start()\n\tdefer pool.stop()\n\tpool.setLimits(10, uint64(10)) // Total capacity limit is 10\n\tpool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})\n\n\tfor i := 0; i < 10; i++ {\n\t\taddBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20))\n\t\tpool.connect(newPoolTestPeer(i, nil))\n\t}\n\taddBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client\n\tif cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 {\n\t\tt.Fatalf(\"Low balance paid client should be rejected\")\n\t}\n\tclock.Run(time.Second)\n\taddBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client\n\tif cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap == 0 {\n\t\tt.Fatalf(\"High balance paid client should be accepted\")\n\t}\n}\n\nfunc TestPaidClientKickedOut(t *testing.T) {\n\tvar (\n\t\tclock    mclock.Simulated\n\t\tdb       = rawdb.NewMemoryDatabase()\n\t\tkickedCh = make(chan int, 100)\n\t)\n\tremoveFn := func(id enode.ID) {\n\t\tkickedCh <- int(id[0])\n\t}\n\tpool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn, alwaysTrueFn)\n\tpool.ns.Start()\n\tpool.bt.SetExpirationTCs(0, 0)\n\tdefer pool.stop()\n\tpool.setLimits(10, uint64(10)) // Total capacity limit is 10\n\tpool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})\n\n\tfor i := 0; i < 10; i++ {\n\t\taddBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance\n\t\tpool.connect(newPoolTestPeer(i, kickedCh))\n\t\tclock.Run(time.Millisecond)\n\t}\n\tclock.Run(defaultConnectedBias + time.Second*11)\n\tif cap, _ := pool.connect(newPoolTestPeer(11, kickedCh)); cap == 0 {\n\t\tt.Fatalf(\"Free client should be accepted\")\n\t}\n\tselect {\n\tcase id := <-kickedCh:\n\t\tif id != 0 {\n\t\t\tt.Fatalf(\"Kicked client mismatch, want %v, got %v\", 0, id)\n\t\t}\n\tcase <-time.NewTimer(time.Second).C:\n\t\tt.Fatalf(\"timeout\")\n\t}\n}\n\nfunc TestConnectFreeClient(t *testing.T) {\n\tvar (\n\t\tclock mclock.Simulated\n\t\tdb    = rawdb.NewMemoryDatabase()\n\t)\n\tpool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}, alwaysTrueFn)\n\tpool.ns.Start()\n\tdefer pool.stop()\n\tpool.setLimits(10, uint64(10))\n\tpool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})\n\tif cap, _ := pool.connect(newPoolTestPeer(0, nil)); cap == 0 {\n\t\tt.Fatalf(\"Failed to connect free client\")\n\t}\n\ttestPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false)\n}\n\nfunc TestConnectFreeClientToFullPool(t *testing.T) {\n\tvar (\n\t\tclock mclock.Simulated\n\t\tdb    = rawdb.NewMemoryDatabase()\n\t)\n\tremoveFn := func(enode.ID) {} // Noop\n\tpool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn, alwaysTrueFn)\n\tpool.ns.Start()\n\tdefer pool.stop()\n\tpool.setLimits(10, uint64(10)) // Total capacity limit is 10\n\tpool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})\n\n\tfor i := 0; i < 10; i++ {\n\t\tpool.connect(newPoolTestPeer(i, nil))\n\t}\n\tif cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 {\n\t\tt.Fatalf(\"New free client should be rejected\")\n\t}\n\tclock.Run(time.Minute)\n\tif cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap != 0 {\n\t\tt.Fatalf(\"New free client should be rejected\")\n\t}\n\tclock.Run(time.Millisecond)\n\tclock.Run(4 * time.Minute)\n\tif cap, _ := pool.connect(newPoolTestPeer(13, nil)); cap == 0 {\n\t\tt.Fatalf(\"Old client connects more than 5min should be kicked\")\n\t}\n}\n\nfunc TestFreeClientKickedOut(t *testing.T) {\n\tvar (\n\t\tclock  mclock.Simulated\n\t\tdb     = rawdb.NewMemoryDatabase()\n\t\tkicked = make(chan int, 100)\n\t)\n\tremoveFn := func(id enode.ID) { kicked <- int(id[0]) }\n\tpool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn, alwaysTrueFn)\n\tpool.ns.Start()\n\tdefer pool.stop()\n\tpool.setLimits(10, uint64(10)) // Total capacity limit is 10\n\tpool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})\n\n\tfor i := 0; i < 10; i++ {\n\t\tpool.connect(newPoolTestPeer(i, kicked))\n\t\tclock.Run(time.Millisecond)\n\t}\n\tif cap, _ := pool.connect(newPoolTestPeer(10, kicked)); cap != 0 {\n\t\tt.Fatalf(\"New free client should be rejected\")\n\t}\n\tselect {\n\tcase <-kicked:\n\tcase <-time.NewTimer(time.Second).C:\n\t\tt.Fatalf(\"timeout\")\n\t}\n\tpool.disconnect(newPoolTestPeer(10, kicked))\n\tclock.Run(5 * time.Minute)\n\tfor i := 0; i < 10; i++ {\n\t\tpool.connect(newPoolTestPeer(i+10, kicked))\n\t}\n\tfor i := 0; i < 10; i++ {\n\t\tselect {\n\t\tcase id := <-kicked:\n\t\t\tif id >= 10 {\n\t\t\t\tt.Fatalf(\"Old client should be kicked, now got: %d\", id)\n\t\t\t}\n\t\tcase <-time.NewTimer(time.Second).C:\n\t\t\tt.Fatalf(\"timeout\")\n\t\t}\n\t}\n}\n\nfunc TestPositiveBalanceCalculation(t *testing.T) {\n\tvar (\n\t\tclock  mclock.Simulated\n\t\tdb     = rawdb.NewMemoryDatabase()\n\t\tkicked = make(chan int, 10)\n\t)\n\tremoveFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop\n\tpool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn, alwaysTrueFn)\n\tpool.ns.Start()\n\tdefer pool.stop()\n\tpool.setLimits(10, uint64(10)) // Total capacity limit is 10\n\tpool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})\n\n\taddBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3))\n\ttestPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true)\n\tclock.Run(time.Minute)\n\n\tpool.disconnect(newPoolTestPeer(0, kicked))\n\tpb, _ := getBalance(pool, newPoolTestPeer(0, kicked))\n\tif checkDiff(pb, uint64(time.Minute*2)) {\n\t\tt.Fatalf(\"Positive balance mismatch, want %v, got %v\", uint64(time.Minute*2), pb)\n\t}\n}\n\nfunc TestDowngradePriorityClient(t *testing.T) {\n\tvar (\n\t\tclock  mclock.Simulated\n\t\tdb     = rawdb.NewMemoryDatabase()\n\t\tkicked = make(chan int, 10)\n\t)\n\tremoveFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop\n\tpool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn, alwaysTrueFn)\n\tpool.ns.Start()\n\tdefer pool.stop()\n\tpool.setLimits(10, uint64(10)) // Total capacity limit is 10\n\tpool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1})\n\n\tp := newPoolTestPeer(0, kicked)\n\taddBalance(pool, p.node.ID(), int64(time.Minute))\n\ttestPriorityConnect(t, pool, p, 10, true)\n\tif p.cap != 10 {\n\t\tt.Fatalf(\"The capacity of priority peer hasn't been updated, got: %d\", p.cap)\n\t}\n\n\tclock.Run(time.Minute)             // All positive balance should be used up.\n\ttime.Sleep(300 * time.Millisecond) // Ensure the callback is called\n\tif p.cap != 1 {\n\t\tt.Fatalf(\"The capcacity of peer should be downgraded, got: %d\", p.cap)\n\t}\n\tpb, _ := getBalance(pool, newPoolTestPeer(0, kicked))\n\tif pb != 0 {\n\t\tt.Fatalf(\"Positive balance mismatch, want %v, got %v\", 0, pb)\n\t}\n\n\taddBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute))\n\tpb, _ = getBalance(pool, newPoolTestPeer(0, kicked))\n\tif checkDiff(pb, uint64(time.Minute)) {\n\t\tt.Fatalf(\"Positive balance mismatch, want %v, got %v\", uint64(time.Minute), pb)\n\t}\n}\n\nfunc TestNegativeBalanceCalculation(t *testing.T) {\n\tvar (\n\t\tclock mclock.Simulated\n\t\tdb    = rawdb.NewMemoryDatabase()\n\t)\n\tpool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}, alwaysTrueFn)\n\tpool.ns.Start()\n\tdefer pool.stop()\n\tpool.setLimits(10, uint64(10)) // Total capacity limit is 10\n\tpool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1})\n\n\tfor i := 0; i < 10; i++ {\n\t\tpool.connect(newPoolTestPeer(i, nil))\n\t}\n\tclock.Run(time.Second)\n\n\tfor i := 0; i < 10; i++ {\n\t\tpool.disconnect(newPoolTestPeer(i, nil))\n\t\t_, nb := getBalance(pool, newPoolTestPeer(i, nil))\n\t\tif nb != 0 {\n\t\t\tt.Fatalf(\"Short connection shouldn't be recorded\")\n\t\t}\n\t}\n\tfor i := 0; i < 10; i++ {\n\t\tpool.connect(newPoolTestPeer(i, nil))\n\t}\n\tclock.Run(time.Minute)\n\tfor i := 0; i < 10; i++ {\n\t\tpool.disconnect(newPoolTestPeer(i, nil))\n\t\t_, nb := getBalance(pool, newPoolTestPeer(i, nil))\n\t\texp := uint64(time.Minute) / 1000\n\t\texp -= exp / 120 // correct for negative balance expiration\n\t\tif checkDiff(nb, exp) {\n\t\t\tt.Fatalf(\"Negative balance mismatch, want %v, got %v\", exp, nb)\n\t\t}\n\t}\n}\n\nfunc TestInactiveClient(t *testing.T) {\n\tvar (\n\t\tclock mclock.Simulated\n\t\tdb    = rawdb.NewMemoryDatabase()\n\t)\n\tpool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}, alwaysTrueFn)\n\tpool.ns.Start()\n\tdefer pool.stop()\n\tpool.setLimits(2, uint64(2))\n\n\tp1 := newPoolTestPeer(1, nil)\n\tp1.inactiveAllowed = true\n\tp2 := newPoolTestPeer(2, nil)\n\tp2.inactiveAllowed = true\n\tp3 := newPoolTestPeer(3, nil)\n\tp3.inactiveAllowed = true\n\taddBalance(pool, p1.node.ID(), 1000*int64(time.Second))\n\taddBalance(pool, p3.node.ID(), 2000*int64(time.Second))\n\t// p1: 1000  p2: 0  p3: 2000\n\tp1.cap, _ = pool.connect(p1)\n\tif p1.cap != 1 {\n\t\tt.Fatalf(\"Failed to connect peer #1\")\n\t}\n\tp2.cap, _ = pool.connect(p2)\n\tif p2.cap != 1 {\n\t\tt.Fatalf(\"Failed to connect peer #2\")\n\t}\n\tp3.cap, _ = pool.connect(p3)\n\tif p3.cap != 1 {\n\t\tt.Fatalf(\"Failed to connect peer #3\")\n\t}\n\tif p2.cap != 0 {\n\t\tt.Fatalf(\"Failed to deactivate peer #2\")\n\t}\n\taddBalance(pool, p2.node.ID(), 3000*int64(time.Second))\n\t// p1: 1000  p2: 3000  p3: 2000\n\tif p2.cap != 1 {\n\t\tt.Fatalf(\"Failed to activate peer #2\")\n\t}\n\tif p1.cap != 0 {\n\t\tt.Fatalf(\"Failed to deactivate peer #1\")\n\t}\n\taddBalance(pool, p2.node.ID(), -2500*int64(time.Second))\n\t// p1: 1000  p2: 500  p3: 2000\n\tif p1.cap != 1 {\n\t\tt.Fatalf(\"Failed to activate peer #1\")\n\t}\n\tif p2.cap != 0 {\n\t\tt.Fatalf(\"Failed to deactivate peer #2\")\n\t}\n\tpool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0})\n\tp4 := newPoolTestPeer(4, nil)\n\taddBalance(pool, p4.node.ID(), 1500*int64(time.Second))\n\t// p1: 1000  p2: 500  p3: 2000  p4: 1500\n\tp4.cap, _ = pool.connect(p4)\n\tif p4.cap != 1 {\n\t\tt.Fatalf(\"Failed to activate peer #4\")\n\t}\n\tif p1.cap != 0 {\n\t\tt.Fatalf(\"Failed to deactivate peer #1\")\n\t}\n\tclock.Run(time.Second * 600)\n\t// manually trigger a check to avoid a long real-time wait\n\tpool.ns.SetState(p1.node, pool.UpdateFlag, nodestate.Flags{}, 0)\n\tpool.ns.SetState(p1.node, nodestate.Flags{}, pool.UpdateFlag, 0)\n\t// p1: 1000  p2: 500  p3: 2000  p4: 900\n\tif p1.cap != 1 {\n\t\tt.Fatalf(\"Failed to activate peer #1\")\n\t}\n\tif p4.cap != 0 {\n\t\tt.Fatalf(\"Failed to deactivate peer #4\")\n\t}\n\tpool.disconnect(p2)\n\tpool.disconnect(p4)\n\taddBalance(pool, p1.node.ID(), -1000*int64(time.Second))\n\tif p1.cap != 1 {\n\t\tt.Fatalf(\"Should not deactivate peer #1\")\n\t}\n\tif p2.cap != 0 {\n\t\tt.Fatalf(\"Should not activate peer #2\")\n\t}\n}\n"
  },
  {
    "path": "les/commons.go",
    "content": "// Copyright 2018 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"sync\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/eth/ethconfig\"\n\t\"github.com/ethereum/go-ethereum/ethclient\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/les/checkpointoracle\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/node\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/params\"\n)\n\nfunc errResp(code errCode, format string, v ...interface{}) error {\n\treturn fmt.Errorf(\"%v - %v\", code, fmt.Sprintf(format, v...))\n}\n\ntype chainReader interface {\n\tCurrentHeader() *types.Header\n}\n\n// lesCommons contains fields needed by both server and client.\ntype lesCommons struct {\n\tgenesis                      common.Hash\n\tconfig                       *ethconfig.Config\n\tchainConfig                  *params.ChainConfig\n\tiConfig                      *light.IndexerConfig\n\tchainDb, lesDb               ethdb.Database\n\tchainReader                  chainReader\n\tchtIndexer, bloomTrieIndexer *core.ChainIndexer\n\toracle                       *checkpointoracle.CheckpointOracle\n\n\tcloseCh chan struct{}\n\twg      sync.WaitGroup\n}\n\n// NodeInfo represents a short summary of the Ethereum sub-protocol metadata\n// known about the host peer.\ntype NodeInfo struct {\n\tNetwork    uint64                   `json:\"network\"`    // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)\n\tDifficulty *big.Int                 `json:\"difficulty\"` // Total difficulty of the host's blockchain\n\tGenesis    common.Hash              `json:\"genesis\"`    // SHA3 hash of the host's genesis block\n\tConfig     *params.ChainConfig      `json:\"config\"`     // Chain configuration for the fork rules\n\tHead       common.Hash              `json:\"head\"`       // SHA3 hash of the host's best owned block\n\tCHT        params.TrustedCheckpoint `json:\"cht\"`        // Trused CHT checkpoint for fast catchup\n}\n\n// makeProtocols creates protocol descriptors for the given LES versions.\nfunc (c *lesCommons) makeProtocols(versions []uint, runPeer func(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error, peerInfo func(id enode.ID) interface{}, dialCandidates enode.Iterator) []p2p.Protocol {\n\tprotos := make([]p2p.Protocol, len(versions))\n\tfor i, version := range versions {\n\t\tversion := version\n\t\tprotos[i] = p2p.Protocol{\n\t\t\tName:     \"les\",\n\t\t\tVersion:  version,\n\t\t\tLength:   ProtocolLengths[version],\n\t\t\tNodeInfo: c.nodeInfo,\n\t\t\tRun: func(peer *p2p.Peer, rw p2p.MsgReadWriter) error {\n\t\t\t\treturn runPeer(version, peer, rw)\n\t\t\t},\n\t\t\tPeerInfo:       peerInfo,\n\t\t\tDialCandidates: dialCandidates,\n\t\t}\n\t}\n\treturn protos\n}\n\n// nodeInfo retrieves some protocol metadata about the running host node.\nfunc (c *lesCommons) nodeInfo() interface{} {\n\thead := c.chainReader.CurrentHeader()\n\thash := head.Hash()\n\treturn &NodeInfo{\n\t\tNetwork:    c.config.NetworkId,\n\t\tDifficulty: rawdb.ReadTd(c.chainDb, hash, head.Number.Uint64()),\n\t\tGenesis:    c.genesis,\n\t\tConfig:     c.chainConfig,\n\t\tHead:       hash,\n\t\tCHT:        c.latestLocalCheckpoint(),\n\t}\n}\n\n// latestLocalCheckpoint finds the common stored section index and returns a set\n// of post-processed trie roots (CHT and BloomTrie) associated with the appropriate\n// section index and head hash as a local checkpoint package.\nfunc (c *lesCommons) latestLocalCheckpoint() params.TrustedCheckpoint {\n\tsections, _, _ := c.chtIndexer.Sections()\n\tsections2, _, _ := c.bloomTrieIndexer.Sections()\n\t// Cap the section index if the two sections are not consistent.\n\tif sections > sections2 {\n\t\tsections = sections2\n\t}\n\tif sections == 0 {\n\t\t// No checkpoint information can be provided.\n\t\treturn params.TrustedCheckpoint{}\n\t}\n\treturn c.localCheckpoint(sections - 1)\n}\n\n// localCheckpoint returns a set of post-processed trie roots (CHT and BloomTrie)\n// associated with the appropriate head hash by specific section index.\n//\n// The returned checkpoint is only the checkpoint generated by the local indexers,\n// not the stable checkpoint registered in the registrar contract.\nfunc (c *lesCommons) localCheckpoint(index uint64) params.TrustedCheckpoint {\n\tsectionHead := c.chtIndexer.SectionHead(index)\n\treturn params.TrustedCheckpoint{\n\t\tSectionIndex: index,\n\t\tSectionHead:  sectionHead,\n\t\tCHTRoot:      light.GetChtRoot(c.chainDb, index, sectionHead),\n\t\tBloomRoot:    light.GetBloomTrieRoot(c.chainDb, index, sectionHead),\n\t}\n}\n\n// setupOracle sets up the checkpoint oracle contract client.\nfunc (c *lesCommons) setupOracle(node *node.Node, genesis common.Hash, ethconfig *ethconfig.Config) *checkpointoracle.CheckpointOracle {\n\tconfig := ethconfig.CheckpointOracle\n\tif config == nil {\n\t\t// Try loading default config.\n\t\tconfig = params.CheckpointOracles[genesis]\n\t}\n\tif config == nil {\n\t\tlog.Info(\"Checkpoint oracle is not enabled\")\n\t\treturn nil\n\t}\n\tif config.Address == (common.Address{}) || uint64(len(config.Signers)) < config.Threshold {\n\t\tlog.Warn(\"Invalid checkpoint oracle config\")\n\t\treturn nil\n\t}\n\toracle := checkpointoracle.New(config, c.localCheckpoint)\n\trpcClient, _ := node.Attach()\n\tclient := ethclient.NewClient(rpcClient)\n\toracle.Start(client)\n\tlog.Info(\"Configured checkpoint oracle\", \"address\", config.Address, \"signers\", len(config.Signers), \"threshold\", config.Threshold)\n\treturn oracle\n}\n"
  },
  {
    "path": "les/costtracker.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"encoding/binary\"\n\t\"math\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/eth/ethconfig\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/les/flowcontrol\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/metrics\"\n)\n\nconst makeCostStats = false // make request cost statistics during operation\n\nvar (\n\t// average request cost estimates based on serving time\n\treqAvgTimeCost = requestCostTable{\n\t\tGetBlockHeadersMsg:     {150000, 30000},\n\t\tGetBlockBodiesMsg:      {0, 700000},\n\t\tGetReceiptsMsg:         {0, 1000000},\n\t\tGetCodeMsg:             {0, 450000},\n\t\tGetProofsV2Msg:         {0, 600000},\n\t\tGetHelperTrieProofsMsg: {0, 1000000},\n\t\tSendTxV2Msg:            {0, 450000},\n\t\tGetTxStatusMsg:         {0, 250000},\n\t}\n\t// maximum incoming message size estimates\n\treqMaxInSize = requestCostTable{\n\t\tGetBlockHeadersMsg:     {40, 0},\n\t\tGetBlockBodiesMsg:      {0, 40},\n\t\tGetReceiptsMsg:         {0, 40},\n\t\tGetCodeMsg:             {0, 80},\n\t\tGetProofsV2Msg:         {0, 80},\n\t\tGetHelperTrieProofsMsg: {0, 20},\n\t\tSendTxV2Msg:            {0, 16500},\n\t\tGetTxStatusMsg:         {0, 50},\n\t}\n\t// maximum outgoing message size estimates\n\treqMaxOutSize = requestCostTable{\n\t\tGetBlockHeadersMsg:     {0, 556},\n\t\tGetBlockBodiesMsg:      {0, 100000},\n\t\tGetReceiptsMsg:         {0, 200000},\n\t\tGetCodeMsg:             {0, 50000},\n\t\tGetProofsV2Msg:         {0, 4000},\n\t\tGetHelperTrieProofsMsg: {0, 4000},\n\t\tSendTxV2Msg:            {0, 100},\n\t\tGetTxStatusMsg:         {0, 100},\n\t}\n\t// request amounts that have to fit into the minimum buffer size minBufferMultiplier times\n\tminBufferReqAmount = map[uint64]uint64{\n\t\tGetBlockHeadersMsg:     192,\n\t\tGetBlockBodiesMsg:      1,\n\t\tGetReceiptsMsg:         1,\n\t\tGetCodeMsg:             1,\n\t\tGetProofsV2Msg:         1,\n\t\tGetHelperTrieProofsMsg: 16,\n\t\tSendTxV2Msg:            8,\n\t\tGetTxStatusMsg:         64,\n\t}\n\tminBufferMultiplier = 3\n)\n\nconst (\n\tmaxCostFactor    = 2    // ratio of maximum and average cost estimates\n\tbufLimitRatio    = 6000 // fixed bufLimit/MRR ratio\n\tgfUsageThreshold = 0.5\n\tgfUsageTC        = time.Second\n\tgfRaiseTC        = time.Second * 200\n\tgfDropTC         = time.Second * 50\n\tgfDbKey          = \"_globalCostFactorV6\"\n)\n\n// costTracker is responsible for calculating costs and cost estimates on the\n// server side. It continuously updates the global cost factor which is defined\n// as the number of cost units per nanosecond of serving time in a single thread.\n// It is based on statistics collected during serving requests in high-load periods\n// and practically acts as a one-dimension request price scaling factor over the\n// pre-defined cost estimate table.\n//\n// The reason for dynamically maintaining the global factor on the server side is:\n// the estimated time cost of the request is fixed(hardcoded) but the configuration\n// of the machine running the server is really different. Therefore, the request serving\n// time in different machine will vary greatly. And also, the request serving time\n// in same machine may vary greatly with different request pressure.\n//\n// In order to more effectively limit resources, we apply the global factor to serving\n// time to make the result as close as possible to the estimated time cost no matter\n// the server is slow or fast. And also we scale the totalRecharge with global factor\n// so that fast server can serve more requests than estimation and slow server can\n// reduce request pressure.\n//\n// Instead of scaling the cost values, the real value of cost units is changed by\n// applying the factor to the serving times. This is more convenient because the\n// changes in the cost factor can be applied immediately without always notifying\n// the clients about the changed cost tables.\ntype costTracker struct {\n\tdb     ethdb.Database\n\tstopCh chan chan struct{}\n\n\tinSizeFactor  float64\n\toutSizeFactor float64\n\tfactor        float64\n\tutilTarget    float64\n\tminBufLimit   uint64\n\n\tgfLock          sync.RWMutex\n\treqInfoCh       chan reqInfo\n\ttotalRechargeCh chan uint64\n\n\tstats map[uint64][]uint64 // Used for testing purpose.\n\n\t// TestHooks\n\ttesting      bool            // Disable real cost evaluation for testing purpose.\n\ttestCostList RequestCostList // Customized cost table for testing purpose.\n}\n\n// newCostTracker creates a cost tracker and loads the cost factor statistics from the database.\n// It also returns the minimum capacity that can be assigned to any peer.\nfunc newCostTracker(db ethdb.Database, config *ethconfig.Config) (*costTracker, uint64) {\n\tutilTarget := float64(config.LightServ) * flowcontrol.FixedPointMultiplier / 100\n\tct := &costTracker{\n\t\tdb:         db,\n\t\tstopCh:     make(chan chan struct{}),\n\t\treqInfoCh:  make(chan reqInfo, 100),\n\t\tutilTarget: utilTarget,\n\t}\n\tif config.LightIngress > 0 {\n\t\tct.inSizeFactor = utilTarget / float64(config.LightIngress)\n\t}\n\tif config.LightEgress > 0 {\n\t\tct.outSizeFactor = utilTarget / float64(config.LightEgress)\n\t}\n\tif makeCostStats {\n\t\tct.stats = make(map[uint64][]uint64)\n\t\tfor code := range reqAvgTimeCost {\n\t\t\tct.stats[code] = make([]uint64, 10)\n\t\t}\n\t}\n\tct.gfLoop()\n\tcostList := ct.makeCostList(ct.globalFactor() * 1.25)\n\tfor _, c := range costList {\n\t\tamount := minBufferReqAmount[c.MsgCode]\n\t\tcost := c.BaseCost + amount*c.ReqCost\n\t\tif cost > ct.minBufLimit {\n\t\t\tct.minBufLimit = cost\n\t\t}\n\t}\n\tct.minBufLimit *= uint64(minBufferMultiplier)\n\treturn ct, (ct.minBufLimit-1)/bufLimitRatio + 1\n}\n\n// stop stops the cost tracker and saves the cost factor statistics to the database\nfunc (ct *costTracker) stop() {\n\tstopCh := make(chan struct{})\n\tct.stopCh <- stopCh\n\t<-stopCh\n\tif makeCostStats {\n\t\tct.printStats()\n\t}\n}\n\n// makeCostList returns upper cost estimates based on the hardcoded cost estimate\n// tables and the optionally specified incoming/outgoing bandwidth limits\nfunc (ct *costTracker) makeCostList(globalFactor float64) RequestCostList {\n\tmaxCost := func(avgTimeCost, inSize, outSize uint64) uint64 {\n\t\tcost := avgTimeCost * maxCostFactor\n\t\tinSizeCost := uint64(float64(inSize) * ct.inSizeFactor * globalFactor)\n\t\tif inSizeCost > cost {\n\t\t\tcost = inSizeCost\n\t\t}\n\t\toutSizeCost := uint64(float64(outSize) * ct.outSizeFactor * globalFactor)\n\t\tif outSizeCost > cost {\n\t\t\tcost = outSizeCost\n\t\t}\n\t\treturn cost\n\t}\n\tvar list RequestCostList\n\tfor code, data := range reqAvgTimeCost {\n\t\tbaseCost := maxCost(data.baseCost, reqMaxInSize[code].baseCost, reqMaxOutSize[code].baseCost)\n\t\treqCost := maxCost(data.reqCost, reqMaxInSize[code].reqCost, reqMaxOutSize[code].reqCost)\n\t\tif ct.minBufLimit != 0 {\n\t\t\t// if minBufLimit is set then always enforce maximum request cost <= minBufLimit\n\t\t\tmaxCost := baseCost + reqCost*minBufferReqAmount[code]\n\t\t\tif maxCost > ct.minBufLimit {\n\t\t\t\tmul := 0.999 * float64(ct.minBufLimit) / float64(maxCost)\n\t\t\t\tbaseCost = uint64(float64(baseCost) * mul)\n\t\t\t\treqCost = uint64(float64(reqCost) * mul)\n\t\t\t}\n\t\t}\n\n\t\tlist = append(list, requestCostListItem{\n\t\t\tMsgCode:  code,\n\t\t\tBaseCost: baseCost,\n\t\t\tReqCost:  reqCost,\n\t\t})\n\t}\n\treturn list\n}\n\n// reqInfo contains the estimated time cost and the actual request serving time\n// which acts as a feed source to update factor maintained by costTracker.\ntype reqInfo struct {\n\t// avgTimeCost is the estimated time cost corresponding to maxCostTable.\n\tavgTimeCost float64\n\n\t// servingTime is the CPU time corresponding to the actual processing of\n\t// the request.\n\tservingTime float64\n\n\t// msgCode indicates the type of request.\n\tmsgCode uint64\n}\n\n// gfLoop starts an event loop which updates the global cost factor which is\n// calculated as a weighted average of the average estimate / serving time ratio.\n// The applied weight equals the serving time if gfUsage is over a threshold,\n// zero otherwise. gfUsage is the recent average serving time per time unit in\n// an exponential moving window. This ensures that statistics are collected only\n// under high-load circumstances where the measured serving times are relevant.\n// The total recharge parameter of the flow control system which controls the\n// total allowed serving time per second but nominated in cost units, should\n// also be scaled with the cost factor and is also updated by this loop.\nfunc (ct *costTracker) gfLoop() {\n\tvar (\n\t\tfactor, totalRecharge        float64\n\t\tgfLog, recentTime, recentAvg float64\n\n\t\tlastUpdate, expUpdate = mclock.Now(), mclock.Now()\n\t)\n\n\t// Load historical cost factor statistics from the database.\n\tdata, _ := ct.db.Get([]byte(gfDbKey))\n\tif len(data) == 8 {\n\t\tgfLog = math.Float64frombits(binary.BigEndian.Uint64(data[:]))\n\t}\n\tct.factor = math.Exp(gfLog)\n\tfactor, totalRecharge = ct.factor, ct.utilTarget*ct.factor\n\n\t// In order to perform factor data statistics under the high request pressure,\n\t// we only adjust factor when recent factor usage beyond the threshold.\n\tthreshold := gfUsageThreshold * float64(gfUsageTC) * ct.utilTarget / flowcontrol.FixedPointMultiplier\n\n\tgo func() {\n\t\tsaveCostFactor := func() {\n\t\t\tvar data [8]byte\n\t\t\tbinary.BigEndian.PutUint64(data[:], math.Float64bits(gfLog))\n\t\t\tct.db.Put([]byte(gfDbKey), data[:])\n\t\t\tlog.Debug(\"global cost factor saved\", \"value\", factor)\n\t\t}\n\t\tsaveTicker := time.NewTicker(time.Minute * 10)\n\t\tdefer saveTicker.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase r := <-ct.reqInfoCh:\n\t\t\t\trelCost := int64(factor * r.servingTime * 100 / r.avgTimeCost) // Convert the value to a percentage form\n\n\t\t\t\t// Record more metrics if we are debugging\n\t\t\t\tif metrics.EnabledExpensive {\n\t\t\t\t\tswitch r.msgCode {\n\t\t\t\t\tcase GetBlockHeadersMsg:\n\t\t\t\t\t\trelativeCostHeaderHistogram.Update(relCost)\n\t\t\t\t\tcase GetBlockBodiesMsg:\n\t\t\t\t\t\trelativeCostBodyHistogram.Update(relCost)\n\t\t\t\t\tcase GetReceiptsMsg:\n\t\t\t\t\t\trelativeCostReceiptHistogram.Update(relCost)\n\t\t\t\t\tcase GetCodeMsg:\n\t\t\t\t\t\trelativeCostCodeHistogram.Update(relCost)\n\t\t\t\t\tcase GetProofsV2Msg:\n\t\t\t\t\t\trelativeCostProofHistogram.Update(relCost)\n\t\t\t\t\tcase GetHelperTrieProofsMsg:\n\t\t\t\t\t\trelativeCostHelperProofHistogram.Update(relCost)\n\t\t\t\t\tcase SendTxV2Msg:\n\t\t\t\t\t\trelativeCostSendTxHistogram.Update(relCost)\n\t\t\t\t\tcase GetTxStatusMsg:\n\t\t\t\t\t\trelativeCostTxStatusHistogram.Update(relCost)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// SendTxV2 and GetTxStatus requests are two special cases.\n\t\t\t\t// All other requests will only put pressure on the database, and\n\t\t\t\t// the corresponding delay is relatively stable. While these two\n\t\t\t\t// requests involve txpool query, which is usually unstable.\n\t\t\t\t//\n\t\t\t\t// TODO(rjl493456442) fixes this.\n\t\t\t\tif r.msgCode == SendTxV2Msg || r.msgCode == GetTxStatusMsg {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\trequestServedMeter.Mark(int64(r.servingTime))\n\t\t\t\trequestServedTimer.Update(time.Duration(r.servingTime))\n\t\t\t\trequestEstimatedMeter.Mark(int64(r.avgTimeCost / factor))\n\t\t\t\trequestEstimatedTimer.Update(time.Duration(r.avgTimeCost / factor))\n\t\t\t\trelativeCostHistogram.Update(relCost)\n\n\t\t\t\tnow := mclock.Now()\n\t\t\t\tdt := float64(now - expUpdate)\n\t\t\t\texpUpdate = now\n\t\t\t\texp := math.Exp(-dt / float64(gfUsageTC))\n\n\t\t\t\t// calculate factor correction until now, based on previous values\n\t\t\t\tvar gfCorr float64\n\t\t\t\tmax := recentTime\n\t\t\t\tif recentAvg > max {\n\t\t\t\t\tmax = recentAvg\n\t\t\t\t}\n\t\t\t\t// we apply continuous correction when MAX(recentTime, recentAvg) > threshold\n\t\t\t\tif max > threshold {\n\t\t\t\t\t// calculate correction time between last expUpdate and now\n\t\t\t\t\tif max*exp >= threshold {\n\t\t\t\t\t\tgfCorr = dt\n\t\t\t\t\t} else {\n\t\t\t\t\t\tgfCorr = math.Log(max/threshold) * float64(gfUsageTC)\n\t\t\t\t\t}\n\t\t\t\t\t// calculate log(factor) correction with the right direction and time constant\n\t\t\t\t\tif recentTime > recentAvg {\n\t\t\t\t\t\t// drop factor if actual serving times are larger than average estimates\n\t\t\t\t\t\tgfCorr /= -float64(gfDropTC)\n\t\t\t\t\t} else {\n\t\t\t\t\t\t// raise factor if actual serving times are smaller than average estimates\n\t\t\t\t\t\tgfCorr /= float64(gfRaiseTC)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// update recent cost values with current request\n\t\t\t\trecentTime = recentTime*exp + r.servingTime\n\t\t\t\trecentAvg = recentAvg*exp + r.avgTimeCost/factor\n\n\t\t\t\tif gfCorr != 0 {\n\t\t\t\t\t// Apply the correction to factor\n\t\t\t\t\tgfLog += gfCorr\n\t\t\t\t\tfactor = math.Exp(gfLog)\n\t\t\t\t\t// Notify outside modules the new factor and totalRecharge.\n\t\t\t\t\tif time.Duration(now-lastUpdate) > time.Second {\n\t\t\t\t\t\ttotalRecharge, lastUpdate = ct.utilTarget*factor, now\n\t\t\t\t\t\tct.gfLock.Lock()\n\t\t\t\t\t\tct.factor = factor\n\t\t\t\t\t\tch := ct.totalRechargeCh\n\t\t\t\t\t\tct.gfLock.Unlock()\n\t\t\t\t\t\tif ch != nil {\n\t\t\t\t\t\t\tselect {\n\t\t\t\t\t\t\tcase ct.totalRechargeCh <- uint64(totalRecharge):\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tglobalFactorGauge.Update(int64(1000 * factor))\n\t\t\t\t\t\tlog.Debug(\"global cost factor updated\", \"factor\", factor)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\trecentServedGauge.Update(int64(recentTime))\n\t\t\t\trecentEstimatedGauge.Update(int64(recentAvg))\n\n\t\t\tcase <-saveTicker.C:\n\t\t\t\tsaveCostFactor()\n\n\t\t\tcase stopCh := <-ct.stopCh:\n\t\t\t\tsaveCostFactor()\n\t\t\t\tclose(stopCh)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\n// globalFactor returns the current value of the global cost factor\nfunc (ct *costTracker) globalFactor() float64 {\n\tct.gfLock.RLock()\n\tdefer ct.gfLock.RUnlock()\n\n\treturn ct.factor\n}\n\n// totalRecharge returns the current total recharge parameter which is used by\n// flowcontrol.ClientManager and is scaled by the global cost factor\nfunc (ct *costTracker) totalRecharge() uint64 {\n\tct.gfLock.RLock()\n\tdefer ct.gfLock.RUnlock()\n\n\treturn uint64(ct.factor * ct.utilTarget)\n}\n\n// subscribeTotalRecharge returns all future updates to the total recharge value\n// through a channel and also returns the current value\nfunc (ct *costTracker) subscribeTotalRecharge(ch chan uint64) uint64 {\n\tct.gfLock.Lock()\n\tdefer ct.gfLock.Unlock()\n\n\tct.totalRechargeCh = ch\n\treturn uint64(ct.factor * ct.utilTarget)\n}\n\n// updateStats updates the global cost factor and (if enabled) the real cost vs.\n// average estimate statistics\nfunc (ct *costTracker) updateStats(code, amount, servingTime, realCost uint64) {\n\tavg := reqAvgTimeCost[code]\n\tavgTimeCost := avg.baseCost + amount*avg.reqCost\n\tselect {\n\tcase ct.reqInfoCh <- reqInfo{float64(avgTimeCost), float64(servingTime), code}:\n\tdefault:\n\t}\n\tif makeCostStats {\n\t\trealCost <<= 4\n\t\tl := 0\n\t\tfor l < 9 && realCost > avgTimeCost {\n\t\t\tl++\n\t\t\trealCost >>= 1\n\t\t}\n\t\tatomic.AddUint64(&ct.stats[code][l], 1)\n\t}\n}\n\n// realCost calculates the final cost of a request based on actual serving time,\n// incoming and outgoing message size\n//\n// Note: message size is only taken into account if bandwidth limitation is applied\n// and the cost based on either message size is greater than the cost based on\n// serving time. A maximum of the three costs is applied instead of their sum\n// because the three limited resources (serving thread time and i/o bandwidth) can\n// also be maxed out simultaneously.\nfunc (ct *costTracker) realCost(servingTime uint64, inSize, outSize uint32) uint64 {\n\tcost := float64(servingTime)\n\tinSizeCost := float64(inSize) * ct.inSizeFactor\n\tif inSizeCost > cost {\n\t\tcost = inSizeCost\n\t}\n\toutSizeCost := float64(outSize) * ct.outSizeFactor\n\tif outSizeCost > cost {\n\t\tcost = outSizeCost\n\t}\n\treturn uint64(cost * ct.globalFactor())\n}\n\n// printStats prints the distribution of real request cost relative to the average estimates\nfunc (ct *costTracker) printStats() {\n\tif ct.stats == nil {\n\t\treturn\n\t}\n\tfor code, arr := range ct.stats {\n\t\tlog.Info(\"Request cost statistics\", \"code\", code, \"1/16\", arr[0], \"1/8\", arr[1], \"1/4\", arr[2], \"1/2\", arr[3], \"1\", arr[4], \"2\", arr[5], \"4\", arr[6], \"8\", arr[7], \"16\", arr[8], \">16\", arr[9])\n\t}\n}\n\ntype (\n\t// requestCostTable assigns a cost estimate function to each request type\n\t// which is a linear function of the requested amount\n\t// (cost = baseCost + reqCost * amount)\n\trequestCostTable map[uint64]*requestCosts\n\trequestCosts     struct {\n\t\tbaseCost, reqCost uint64\n\t}\n\n\t// RequestCostList is a list representation of request costs which is used for\n\t// database storage and communication through the network\n\tRequestCostList     []requestCostListItem\n\trequestCostListItem struct {\n\t\tMsgCode, BaseCost, ReqCost uint64\n\t}\n)\n\n// getMaxCost calculates the estimated cost for a given request type and amount\nfunc (table requestCostTable) getMaxCost(code, amount uint64) uint64 {\n\tcosts := table[code]\n\treturn costs.baseCost + amount*costs.reqCost\n}\n\n// decode converts a cost list to a cost table\nfunc (list RequestCostList) decode(protocolLength uint64) requestCostTable {\n\ttable := make(requestCostTable)\n\tfor _, e := range list {\n\t\tif e.MsgCode < protocolLength {\n\t\t\ttable[e.MsgCode] = &requestCosts{\n\t\t\t\tbaseCost: e.BaseCost,\n\t\t\t\treqCost:  e.ReqCost,\n\t\t\t}\n\t\t}\n\t}\n\treturn table\n}\n\n// testCostList returns a dummy request cost list used by tests\nfunc testCostList(testCost uint64) RequestCostList {\n\tcl := make(RequestCostList, len(reqAvgTimeCost))\n\tvar max uint64\n\tfor code := range reqAvgTimeCost {\n\t\tif code > max {\n\t\t\tmax = code\n\t\t}\n\t}\n\ti := 0\n\tfor code := uint64(0); code <= max; code++ {\n\t\tif _, ok := reqAvgTimeCost[code]; ok {\n\t\t\tcl[i].MsgCode = code\n\t\t\tcl[i].BaseCost = testCost\n\t\t\tcl[i].ReqCost = 0\n\t\t\ti++\n\t\t}\n\t}\n\treturn cl\n}\n"
  },
  {
    "path": "les/distributor.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"container/list\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n)\n\n// requestDistributor implements a mechanism that distributes requests to\n// suitable peers, obeying flow control rules and prioritizing them in creation\n// order (even when a resend is necessary).\ntype requestDistributor struct {\n\tclock        mclock.Clock\n\treqQueue     *list.List\n\tlastReqOrder uint64\n\tpeers        map[distPeer]struct{}\n\tpeerLock     sync.RWMutex\n\tloopChn      chan struct{}\n\tloopNextSent bool\n\tlock         sync.Mutex\n\n\tcloseCh chan struct{}\n\twg      sync.WaitGroup\n}\n\n// distPeer is an LES server peer interface for the request distributor.\n// waitBefore returns either the necessary waiting time before sending a request\n// with the given upper estimated cost or the estimated remaining relative buffer\n// value after sending such a request (in which case the request can be sent\n// immediately). At least one of these values is always zero.\ntype distPeer interface {\n\twaitBefore(uint64) (time.Duration, float64)\n\tcanQueue() bool\n\tqueueSend(f func()) bool\n}\n\n// distReq is the request abstraction used by the distributor. It is based on\n// three callback functions:\n// - getCost returns the upper estimate of the cost of sending the request to a given peer\n// - canSend tells if the server peer is suitable to serve the request\n// - request prepares sending the request to the given peer and returns a function that\n// does the actual sending. Request order should be preserved but the callback itself should not\n// block until it is sent because other peers might still be able to receive requests while\n// one of them is blocking. Instead, the returned function is put in the peer's send queue.\ntype distReq struct {\n\tgetCost func(distPeer) uint64\n\tcanSend func(distPeer) bool\n\trequest func(distPeer) func()\n\n\treqOrder     uint64\n\tsentChn      chan distPeer\n\telement      *list.Element\n\twaitForPeers mclock.AbsTime\n\tenterQueue   mclock.AbsTime\n}\n\n// newRequestDistributor creates a new request distributor\nfunc newRequestDistributor(peers *serverPeerSet, clock mclock.Clock) *requestDistributor {\n\td := &requestDistributor{\n\t\tclock:    clock,\n\t\treqQueue: list.New(),\n\t\tloopChn:  make(chan struct{}, 2),\n\t\tcloseCh:  make(chan struct{}),\n\t\tpeers:    make(map[distPeer]struct{}),\n\t}\n\tif peers != nil {\n\t\tpeers.subscribe(d)\n\t}\n\td.wg.Add(1)\n\tgo d.loop()\n\treturn d\n}\n\n// registerPeer implements peerSetNotify\nfunc (d *requestDistributor) registerPeer(p *serverPeer) {\n\td.peerLock.Lock()\n\td.peers[p] = struct{}{}\n\td.peerLock.Unlock()\n}\n\n// unregisterPeer implements peerSetNotify\nfunc (d *requestDistributor) unregisterPeer(p *serverPeer) {\n\td.peerLock.Lock()\n\tdelete(d.peers, p)\n\td.peerLock.Unlock()\n}\n\n// registerTestPeer adds a new test peer\nfunc (d *requestDistributor) registerTestPeer(p distPeer) {\n\td.peerLock.Lock()\n\td.peers[p] = struct{}{}\n\td.peerLock.Unlock()\n}\n\nvar (\n\t// distMaxWait is the maximum waiting time after which further necessary waiting\n\t// times are recalculated based on new feedback from the servers\n\tdistMaxWait = time.Millisecond * 50\n\n\t// waitForPeers is the time window in which a request does not fail even if it\n\t// has no suitable peers to send to at the moment\n\twaitForPeers = time.Second * 3\n)\n\n// main event loop\nfunc (d *requestDistributor) loop() {\n\tdefer d.wg.Done()\n\tfor {\n\t\tselect {\n\t\tcase <-d.closeCh:\n\t\t\td.lock.Lock()\n\t\t\telem := d.reqQueue.Front()\n\t\t\tfor elem != nil {\n\t\t\t\treq := elem.Value.(*distReq)\n\t\t\t\tclose(req.sentChn)\n\t\t\t\treq.sentChn = nil\n\t\t\t\telem = elem.Next()\n\t\t\t}\n\t\t\td.lock.Unlock()\n\t\t\treturn\n\t\tcase <-d.loopChn:\n\t\t\td.lock.Lock()\n\t\t\td.loopNextSent = false\n\t\tloop:\n\t\t\tfor {\n\t\t\t\tpeer, req, wait := d.nextRequest()\n\t\t\t\tif req != nil && wait == 0 {\n\t\t\t\t\tchn := req.sentChn // save sentChn because remove sets it to nil\n\t\t\t\t\td.remove(req)\n\t\t\t\t\tsend := req.request(peer)\n\t\t\t\t\tif send != nil {\n\t\t\t\t\t\tpeer.queueSend(send)\n\t\t\t\t\t\trequestSendDelay.Update(time.Duration(d.clock.Now() - req.enterQueue))\n\t\t\t\t\t}\n\t\t\t\t\tchn <- peer\n\t\t\t\t\tclose(chn)\n\t\t\t\t} else {\n\t\t\t\t\tif wait == 0 {\n\t\t\t\t\t\t// no request to send and nothing to wait for; the next\n\t\t\t\t\t\t// queued request will wake up the loop\n\t\t\t\t\t\tbreak loop\n\t\t\t\t\t}\n\t\t\t\t\td.loopNextSent = true // a \"next\" signal has been sent, do not send another one until this one has been received\n\t\t\t\t\tif wait > distMaxWait {\n\t\t\t\t\t\t// waiting times may be reduced by incoming request replies, if it is too long, recalculate it periodically\n\t\t\t\t\t\twait = distMaxWait\n\t\t\t\t\t}\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\td.clock.Sleep(wait)\n\t\t\t\t\t\td.loopChn <- struct{}{}\n\t\t\t\t\t}()\n\t\t\t\t\tbreak loop\n\t\t\t\t}\n\t\t\t}\n\t\t\td.lock.Unlock()\n\t\t}\n\t}\n}\n\n// selectPeerItem represents a peer to be selected for a request by weightedRandomSelect\ntype selectPeerItem struct {\n\tpeer   distPeer\n\treq    *distReq\n\tweight uint64\n}\n\nfunc selectPeerWeight(i interface{}) uint64 {\n\treturn i.(selectPeerItem).weight\n}\n\n// nextRequest returns the next possible request from any peer, along with the\n// associated peer and necessary waiting time\nfunc (d *requestDistributor) nextRequest() (distPeer, *distReq, time.Duration) {\n\tcheckedPeers := make(map[distPeer]struct{})\n\telem := d.reqQueue.Front()\n\tvar (\n\t\tbestWait time.Duration\n\t\tsel      *utils.WeightedRandomSelect\n\t)\n\n\td.peerLock.RLock()\n\tdefer d.peerLock.RUnlock()\n\n\tpeerCount := len(d.peers)\n\tfor (len(checkedPeers) < peerCount || elem == d.reqQueue.Front()) && elem != nil {\n\t\treq := elem.Value.(*distReq)\n\t\tcanSend := false\n\t\tnow := d.clock.Now()\n\t\tif req.waitForPeers > now {\n\t\t\tcanSend = true\n\t\t\twait := time.Duration(req.waitForPeers - now)\n\t\t\tif bestWait == 0 || wait < bestWait {\n\t\t\t\tbestWait = wait\n\t\t\t}\n\t\t}\n\t\tfor peer := range d.peers {\n\t\t\tif _, ok := checkedPeers[peer]; !ok && peer.canQueue() && req.canSend(peer) {\n\t\t\t\tcanSend = true\n\t\t\t\tcost := req.getCost(peer)\n\t\t\t\twait, bufRemain := peer.waitBefore(cost)\n\t\t\t\tif wait == 0 {\n\t\t\t\t\tif sel == nil {\n\t\t\t\t\t\tsel = utils.NewWeightedRandomSelect(selectPeerWeight)\n\t\t\t\t\t}\n\t\t\t\t\tsel.Update(selectPeerItem{peer: peer, req: req, weight: uint64(bufRemain*1000000) + 1})\n\t\t\t\t} else {\n\t\t\t\t\tif bestWait == 0 || wait < bestWait {\n\t\t\t\t\t\tbestWait = wait\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcheckedPeers[peer] = struct{}{}\n\t\t\t}\n\t\t}\n\t\tnext := elem.Next()\n\t\tif !canSend && elem == d.reqQueue.Front() {\n\t\t\tclose(req.sentChn)\n\t\t\td.remove(req)\n\t\t}\n\t\telem = next\n\t}\n\n\tif sel != nil {\n\t\tc := sel.Choose().(selectPeerItem)\n\t\treturn c.peer, c.req, 0\n\t}\n\treturn nil, nil, bestWait\n}\n\n// queue adds a request to the distribution queue, returns a channel where the\n// receiving peer is sent once the request has been sent (request callback returned).\n// If the request is cancelled or timed out without suitable peers, the channel is\n// closed without sending any peer references to it.\nfunc (d *requestDistributor) queue(r *distReq) chan distPeer {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tif r.reqOrder == 0 {\n\t\td.lastReqOrder++\n\t\tr.reqOrder = d.lastReqOrder\n\t\tr.waitForPeers = d.clock.Now() + mclock.AbsTime(waitForPeers)\n\t}\n\t// Assign the timestamp when the request is queued no matter it's\n\t// a new one or re-queued one.\n\tr.enterQueue = d.clock.Now()\n\n\tback := d.reqQueue.Back()\n\tif back == nil || r.reqOrder > back.Value.(*distReq).reqOrder {\n\t\tr.element = d.reqQueue.PushBack(r)\n\t} else {\n\t\tbefore := d.reqQueue.Front()\n\t\tfor before.Value.(*distReq).reqOrder < r.reqOrder {\n\t\t\tbefore = before.Next()\n\t\t}\n\t\tr.element = d.reqQueue.InsertBefore(r, before)\n\t}\n\n\tif !d.loopNextSent {\n\t\td.loopNextSent = true\n\t\td.loopChn <- struct{}{}\n\t}\n\n\tr.sentChn = make(chan distPeer, 1)\n\treturn r.sentChn\n}\n\n// cancel removes a request from the queue if it has not been sent yet (returns\n// false if it has been sent already). It is guaranteed that the callback functions\n// will not be called after cancel returns.\nfunc (d *requestDistributor) cancel(r *distReq) bool {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tif r.sentChn == nil {\n\t\treturn false\n\t}\n\n\tclose(r.sentChn)\n\td.remove(r)\n\treturn true\n}\n\n// remove removes a request from the queue\nfunc (d *requestDistributor) remove(r *distReq) {\n\tr.sentChn = nil\n\tif r.element != nil {\n\t\td.reqQueue.Remove(r.element)\n\t\tr.element = nil\n\t}\n}\n\nfunc (d *requestDistributor) close() {\n\tclose(d.closeCh)\n\td.wg.Wait()\n}\n"
  },
  {
    "path": "les/distributor_test.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"math/rand\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n)\n\ntype testDistReq struct {\n\tcost, procTime, order uint64\n\tcanSendTo             map[*testDistPeer]struct{}\n}\n\nfunc (r *testDistReq) getCost(dp distPeer) uint64 {\n\treturn r.cost\n}\n\nfunc (r *testDistReq) canSend(dp distPeer) bool {\n\t_, ok := r.canSendTo[dp.(*testDistPeer)]\n\treturn ok\n}\n\nfunc (r *testDistReq) request(dp distPeer) func() {\n\treturn func() { dp.(*testDistPeer).send(r) }\n}\n\ntype testDistPeer struct {\n\tsent    []*testDistReq\n\tsumCost uint64\n\tlock    sync.RWMutex\n}\n\nfunc (p *testDistPeer) send(r *testDistReq) {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\n\tp.sent = append(p.sent, r)\n\tp.sumCost += r.cost\n}\n\nfunc (p *testDistPeer) worker(t *testing.T, checkOrder bool, stop chan struct{}) {\n\tvar last uint64\n\tfor {\n\t\twait := time.Millisecond\n\t\tp.lock.Lock()\n\t\tif len(p.sent) > 0 {\n\t\t\trq := p.sent[0]\n\t\t\twait = time.Duration(rq.procTime)\n\t\t\tp.sumCost -= rq.cost\n\t\t\tif checkOrder {\n\t\t\t\tif rq.order <= last {\n\t\t\t\t\tt.Errorf(\"Requests processed in wrong order\")\n\t\t\t\t}\n\t\t\t\tlast = rq.order\n\t\t\t}\n\t\t\tp.sent = p.sent[1:]\n\t\t}\n\t\tp.lock.Unlock()\n\t\tselect {\n\t\tcase <-stop:\n\t\t\treturn\n\t\tcase <-time.After(wait):\n\t\t}\n\t}\n}\n\nconst (\n\ttestDistBufLimit       = 10000000\n\ttestDistMaxCost        = 1000000\n\ttestDistPeerCount      = 2\n\ttestDistReqCount       = 10\n\ttestDistMaxResendCount = 3\n)\n\nfunc (p *testDistPeer) waitBefore(cost uint64) (time.Duration, float64) {\n\tp.lock.RLock()\n\tsumCost := p.sumCost + cost\n\tp.lock.RUnlock()\n\tif sumCost < testDistBufLimit {\n\t\treturn 0, float64(testDistBufLimit-sumCost) / float64(testDistBufLimit)\n\t}\n\treturn time.Duration(sumCost - testDistBufLimit), 0\n}\n\nfunc (p *testDistPeer) canQueue() bool {\n\treturn true\n}\n\nfunc (p *testDistPeer) queueSend(f func()) bool {\n\tf()\n\treturn true\n}\n\nfunc TestRequestDistributor(t *testing.T) {\n\ttestRequestDistributor(t, false)\n}\n\nfunc TestRequestDistributorResend(t *testing.T) {\n\ttestRequestDistributor(t, true)\n}\n\nfunc testRequestDistributor(t *testing.T, resend bool) {\n\tstop := make(chan struct{})\n\tdefer close(stop)\n\n\tdist := newRequestDistributor(nil, &mclock.System{})\n\tvar peers [testDistPeerCount]*testDistPeer\n\tfor i := range peers {\n\t\tpeers[i] = &testDistPeer{}\n\t\tgo peers[i].worker(t, !resend, stop)\n\t\tdist.registerTestPeer(peers[i])\n\t}\n\t// Disable the mechanism that we will wait a few time for request\n\t// even there is no suitable peer to send right now.\n\twaitForPeers = 0\n\n\tvar wg sync.WaitGroup\n\n\tfor i := 1; i <= testDistReqCount; i++ {\n\t\tcost := uint64(rand.Int63n(testDistMaxCost))\n\t\tprocTime := uint64(rand.Int63n(int64(cost + 1)))\n\t\trq := &testDistReq{\n\t\t\tcost:      cost,\n\t\t\tprocTime:  procTime,\n\t\t\torder:     uint64(i),\n\t\t\tcanSendTo: make(map[*testDistPeer]struct{}),\n\t\t}\n\t\tfor _, peer := range peers {\n\t\t\tif rand.Intn(2) != 0 {\n\t\t\t\trq.canSendTo[peer] = struct{}{}\n\t\t\t}\n\t\t}\n\n\t\twg.Add(1)\n\t\treq := &distReq{\n\t\t\tgetCost: rq.getCost,\n\t\t\tcanSend: rq.canSend,\n\t\t\trequest: rq.request,\n\t\t}\n\t\tchn := dist.queue(req)\n\t\tgo func() {\n\t\t\tcnt := 1\n\t\t\tif resend && len(rq.canSendTo) != 0 {\n\t\t\t\tcnt = rand.Intn(testDistMaxResendCount) + 1\n\t\t\t}\n\t\t\tfor i := 0; i < cnt; i++ {\n\t\t\t\tif i != 0 {\n\t\t\t\t\tchn = dist.queue(req)\n\t\t\t\t}\n\t\t\t\tp := <-chn\n\t\t\t\tif p == nil {\n\t\t\t\t\tif len(rq.canSendTo) != 0 {\n\t\t\t\t\t\tt.Errorf(\"Request that could have been sent was dropped\")\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tpeer := p.(*testDistPeer)\n\t\t\t\t\tif _, ok := rq.canSendTo[peer]; !ok {\n\t\t\t\t\t\tt.Errorf(\"Request sent to wrong peer\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t\tif rand.Intn(1000) == 0 {\n\t\t\ttime.Sleep(time.Duration(rand.Intn(5000000)))\n\t\t}\n\t}\n\n\twg.Wait()\n}\n"
  },
  {
    "path": "les/enr_entry.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"github.com/ethereum/go-ethereum/core/forkid\"\n\t\"github.com/ethereum/go-ethereum/p2p/dnsdisc\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\n// lesEntry is the \"les\" ENR entry. This is set for LES servers only.\ntype lesEntry struct {\n\t// Ignore additional fields (for forward compatibility).\n\tVfxVersion uint\n\tRest       []rlp.RawValue `rlp:\"tail\"`\n}\n\nfunc (lesEntry) ENRKey() string { return \"les\" }\n\n// ethEntry is the \"eth\" ENR entry. This is redeclared here to avoid depending on package eth.\ntype ethEntry struct {\n\tForkID forkid.ID\n\tTail   []rlp.RawValue `rlp:\"tail\"`\n}\n\nfunc (ethEntry) ENRKey() string { return \"eth\" }\n\n// setupDiscovery creates the node discovery source for the eth protocol.\nfunc (eth *LightEthereum) setupDiscovery() (enode.Iterator, error) {\n\tit := enode.NewFairMix(0)\n\n\t// Enable DNS discovery.\n\tif len(eth.config.EthDiscoveryURLs) != 0 {\n\t\tclient := dnsdisc.NewClient(dnsdisc.Config{})\n\t\tdns, err := client.NewIterator(eth.config.EthDiscoveryURLs...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tit.AddSource(dns)\n\t}\n\n\t// Enable DHT.\n\tif eth.udpEnabled {\n\t\tit.AddSource(eth.p2pServer.DiscV5.RandomNodes())\n\t}\n\n\tforkFilter := forkid.NewFilter(eth.blockchain)\n\titerator := enode.Filter(it, func(n *enode.Node) bool { return nodeIsServer(forkFilter, n) })\n\treturn iterator, nil\n}\n\n// nodeIsServer checks whether n is an LES server node.\nfunc nodeIsServer(forkFilter forkid.Filter, n *enode.Node) bool {\n\tvar les lesEntry\n\tvar eth ethEntry\n\treturn n.Load(&les) == nil && n.Load(&eth) == nil && forkFilter(eth.ForkID) == nil\n}\n"
  },
  {
    "path": "les/fetcher.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"math/big\"\n\t\"math/rand\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/consensus\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/eth/fetcher\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n)\n\nconst (\n\tblockDelayTimeout    = 10 * time.Second       // Timeout for retrieving the headers from the peer\n\tgatherSlack          = 100 * time.Millisecond // Interval used to collate almost-expired requests\n\tcachedAnnosThreshold = 64                     // The maximum queued announcements\n)\n\n// announce represents an new block announcement from the les server.\ntype announce struct {\n\tdata   *announceData\n\ttrust  bool\n\tpeerid enode.ID\n}\n\n// request represents a record when the header request is sent.\ntype request struct {\n\treqid  uint64\n\tpeerid enode.ID\n\tsendAt time.Time\n\thash   common.Hash\n}\n\n// response represents a response packet from network as well as a channel\n// to return all un-requested data.\ntype response struct {\n\treqid   uint64\n\theaders []*types.Header\n\tpeerid  enode.ID\n\tremain  chan []*types.Header\n}\n\n// fetcherPeer holds the fetcher-specific information for each active peer\ntype fetcherPeer struct {\n\tlatest *announceData // The latest announcement sent from the peer\n\n\t// These following two fields can track the latest announces\n\t// from the peer with limited size for caching. We hold the\n\t// assumption that all enqueued announces are td-monotonic.\n\tannounces     map[common.Hash]*announce // Announcement map\n\tannouncesList []common.Hash             // FIFO announces list\n}\n\n// addAnno enqueues an new trusted announcement. If the queued announces overflow,\n// evict from the oldest.\nfunc (fp *fetcherPeer) addAnno(anno *announce) {\n\t// Short circuit if the anno already exists. In normal case it should\n\t// never happen since only monotonic anno is accepted. But the adversary\n\t// may feed us fake announces with higher td but same hash. In this case,\n\t// ignore the anno anyway.\n\thash := anno.data.Hash\n\tif _, exist := fp.announces[hash]; exist {\n\t\treturn\n\t}\n\tfp.announces[hash] = anno\n\tfp.announcesList = append(fp.announcesList, hash)\n\n\t// Evict oldest if the announces are oversized.\n\tif len(fp.announcesList)-cachedAnnosThreshold > 0 {\n\t\tfor i := 0; i < len(fp.announcesList)-cachedAnnosThreshold; i++ {\n\t\t\tdelete(fp.announces, fp.announcesList[i])\n\t\t}\n\t\tcopy(fp.announcesList, fp.announcesList[len(fp.announcesList)-cachedAnnosThreshold:])\n\t\tfp.announcesList = fp.announcesList[:cachedAnnosThreshold]\n\t}\n}\n\n// forwardAnno removes all announces from the map with a number lower than\n// the provided threshold.\nfunc (fp *fetcherPeer) forwardAnno(td *big.Int) []*announce {\n\tvar (\n\t\tcutset  int\n\t\tevicted []*announce\n\t)\n\tfor ; cutset < len(fp.announcesList); cutset++ {\n\t\tanno := fp.announces[fp.announcesList[cutset]]\n\t\tif anno == nil {\n\t\t\tcontinue // In theory it should never ever happen\n\t\t}\n\t\tif anno.data.Td.Cmp(td) > 0 {\n\t\t\tbreak\n\t\t}\n\t\tevicted = append(evicted, anno)\n\t\tdelete(fp.announces, anno.data.Hash)\n\t}\n\tif cutset > 0 {\n\t\tcopy(fp.announcesList, fp.announcesList[cutset:])\n\t\tfp.announcesList = fp.announcesList[:len(fp.announcesList)-cutset]\n\t}\n\treturn evicted\n}\n\n// lightFetcher implements retrieval of newly announced headers. It reuses\n// the eth.BlockFetcher as the underlying fetcher but adding more additional\n// rules: e.g. evict \"timeout\" peers.\ntype lightFetcher struct {\n\t// Various handlers\n\tulc     *ulc\n\tchaindb ethdb.Database\n\treqDist *requestDistributor\n\tpeerset *serverPeerSet        // The global peerset of light client which shared by all components\n\tchain   *light.LightChain     // The local light chain which maintains the canonical header chain.\n\tfetcher *fetcher.BlockFetcher // The underlying fetcher which takes care block header retrieval.\n\n\t// Peerset maintained by fetcher\n\tplock sync.RWMutex\n\tpeers map[enode.ID]*fetcherPeer\n\n\t// Various channels\n\tannounceCh chan *announce\n\trequestCh  chan *request\n\tdeliverCh  chan *response\n\tsyncDone   chan *types.Header\n\n\tcloseCh chan struct{}\n\twg      sync.WaitGroup\n\n\t// Callback\n\tsynchronise func(peer *serverPeer)\n\n\t// Test fields or hooks\n\tnoAnnounce  bool\n\tnewHeadHook func(*types.Header)\n\tnewAnnounce func(*serverPeer, *announceData)\n}\n\n// newLightFetcher creates a light fetcher instance.\nfunc newLightFetcher(chain *light.LightChain, engine consensus.Engine, peers *serverPeerSet, ulc *ulc, chaindb ethdb.Database, reqDist *requestDistributor, syncFn func(p *serverPeer)) *lightFetcher {\n\t// Construct the fetcher by offering all necessary APIs\n\tvalidator := func(header *types.Header) error {\n\t\t// Disable seal verification explicitly if we are running in ulc mode.\n\t\treturn engine.VerifyHeader(chain, header, ulc == nil)\n\t}\n\theighter := func() uint64 { return chain.CurrentHeader().Number.Uint64() }\n\tdropper := func(id string) { peers.unregister(id) }\n\tinserter := func(headers []*types.Header) (int, error) {\n\t\t// Disable PoW checking explicitly if we are running in ulc mode.\n\t\tcheckFreq := 1\n\t\tif ulc != nil {\n\t\t\tcheckFreq = 0\n\t\t}\n\t\treturn chain.InsertHeaderChain(headers, checkFreq)\n\t}\n\tf := &lightFetcher{\n\t\tulc:         ulc,\n\t\tpeerset:     peers,\n\t\tchaindb:     chaindb,\n\t\tchain:       chain,\n\t\treqDist:     reqDist,\n\t\tfetcher:     fetcher.NewBlockFetcher(true, chain.GetHeaderByHash, nil, validator, nil, heighter, inserter, nil, dropper),\n\t\tpeers:       make(map[enode.ID]*fetcherPeer),\n\t\tsynchronise: syncFn,\n\t\tannounceCh:  make(chan *announce),\n\t\trequestCh:   make(chan *request),\n\t\tdeliverCh:   make(chan *response),\n\t\tsyncDone:    make(chan *types.Header),\n\t\tcloseCh:     make(chan struct{}),\n\t}\n\tpeers.subscribe(f)\n\treturn f\n}\n\nfunc (f *lightFetcher) start() {\n\tf.wg.Add(1)\n\tf.fetcher.Start()\n\tgo f.mainloop()\n}\n\nfunc (f *lightFetcher) stop() {\n\tclose(f.closeCh)\n\tf.fetcher.Stop()\n\tf.wg.Wait()\n}\n\n// registerPeer adds an new peer to the fetcher's peer set\nfunc (f *lightFetcher) registerPeer(p *serverPeer) {\n\tf.plock.Lock()\n\tdefer f.plock.Unlock()\n\n\tf.peers[p.ID()] = &fetcherPeer{announces: make(map[common.Hash]*announce)}\n}\n\n// unregisterPeer removes the specified peer from the fetcher's peer set\nfunc (f *lightFetcher) unregisterPeer(p *serverPeer) {\n\tf.plock.Lock()\n\tdefer f.plock.Unlock()\n\n\tdelete(f.peers, p.ID())\n}\n\n// peer returns the peer from the fetcher peerset.\nfunc (f *lightFetcher) peer(id enode.ID) *fetcherPeer {\n\tf.plock.RLock()\n\tdefer f.plock.RUnlock()\n\n\treturn f.peers[id]\n}\n\n// forEachPeer iterates the fetcher peerset, abort the iteration if the\n// callback returns false.\nfunc (f *lightFetcher) forEachPeer(check func(id enode.ID, p *fetcherPeer) bool) {\n\tf.plock.RLock()\n\tdefer f.plock.RUnlock()\n\n\tfor id, peer := range f.peers {\n\t\tif !check(id, peer) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// mainloop is the main event loop of the light fetcher, which is responsible for\n// - announcement maintenance(ulc)\n//   If we are running in ultra light client mode, then all announcements from\n//   the trusted servers are maintained. If the same announcements from trusted\n//   servers reach the threshold, then the relevant header is requested for retrieval.\n//\n// - block header retrieval\n//   Whenever we receive announce with higher td compared with local chain, the\n//   request will be made for header retrieval.\n//\n// - re-sync trigger\n//   If the local chain lags too much, then the fetcher will enter \"synnchronise\"\n//   mode to retrieve missing headers in batch.\nfunc (f *lightFetcher) mainloop() {\n\tdefer f.wg.Done()\n\n\tvar (\n\t\tsyncInterval = uint64(1) // Interval used to trigger a light resync.\n\t\tsyncing      bool        // Indicator whether the client is syncing\n\n\t\tulc          = f.ulc != nil\n\t\theadCh       = make(chan core.ChainHeadEvent, 100)\n\t\tfetching     = make(map[uint64]*request)\n\t\trequestTimer = time.NewTimer(0)\n\n\t\t// Local status\n\t\tlocalHead = f.chain.CurrentHeader()\n\t\tlocalTd   = f.chain.GetTd(localHead.Hash(), localHead.Number.Uint64())\n\t)\n\tsub := f.chain.SubscribeChainHeadEvent(headCh)\n\tdefer sub.Unsubscribe()\n\n\t// reset updates the local status with given header.\n\treset := func(header *types.Header) {\n\t\tlocalHead = header\n\t\tlocalTd = f.chain.GetTd(header.Hash(), header.Number.Uint64())\n\t}\n\t// trustedHeader returns an indicator whether the header is regarded as\n\t// trusted. If we are running in the ulc mode, only when we receive enough\n\t// same announcement from trusted server, the header will be trusted.\n\ttrustedHeader := func(hash common.Hash, number uint64) (bool, []enode.ID) {\n\t\tvar (\n\t\t\tagreed  []enode.ID\n\t\t\ttrusted bool\n\t\t)\n\t\tf.forEachPeer(func(id enode.ID, p *fetcherPeer) bool {\n\t\t\tif anno := p.announces[hash]; anno != nil && anno.trust && anno.data.Number == number {\n\t\t\t\tagreed = append(agreed, id)\n\t\t\t\tif 100*len(agreed)/len(f.ulc.keys) >= f.ulc.fraction {\n\t\t\t\t\ttrusted = true\n\t\t\t\t\treturn false // abort iteration\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true\n\t\t})\n\t\treturn trusted, agreed\n\t}\n\tfor {\n\t\tselect {\n\t\tcase anno := <-f.announceCh:\n\t\t\tpeerid, data := anno.peerid, anno.data\n\t\t\tlog.Debug(\"Received new announce\", \"peer\", peerid, \"number\", data.Number, \"hash\", data.Hash, \"reorg\", data.ReorgDepth)\n\n\t\t\tpeer := f.peer(peerid)\n\t\t\tif peer == nil {\n\t\t\t\tlog.Debug(\"Receive announce from unknown peer\", \"peer\", peerid)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Announced tds should be strictly monotonic, drop the peer if\n\t\t\t// the announce is out-of-order.\n\t\t\tif peer.latest != nil && data.Td.Cmp(peer.latest.Td) <= 0 {\n\t\t\t\tf.peerset.unregister(peerid.String())\n\t\t\t\tlog.Debug(\"Non-monotonic td\", \"peer\", peerid, \"current\", data.Td, \"previous\", peer.latest.Td)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tpeer.latest = data\n\n\t\t\t// Filter out any stale announce, the local chain is ahead of announce\n\t\t\tif localTd != nil && data.Td.Cmp(localTd) <= 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tpeer.addAnno(anno)\n\n\t\t\t// If we are not syncing, try to trigger a single retrieval or re-sync\n\t\t\tif !ulc && !syncing {\n\t\t\t\t// Two scenarios lead to re-sync:\n\t\t\t\t// - reorg happens\n\t\t\t\t// - local chain lags\n\t\t\t\t// We can't retrieve the parent of the announce by single retrieval\n\t\t\t\t// in both cases, so resync is necessary.\n\t\t\t\tif data.Number > localHead.Number.Uint64()+syncInterval || data.ReorgDepth > 0 {\n\t\t\t\t\tsyncing = true\n\t\t\t\t\tgo f.startSync(peerid)\n\t\t\t\t\tlog.Debug(\"Trigger light sync\", \"peer\", peerid, \"local\", localHead.Number, \"localhash\", localHead.Hash(), \"remote\", data.Number, \"remotehash\", data.Hash)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tf.fetcher.Notify(peerid.String(), data.Hash, data.Number, time.Now(), f.requestHeaderByHash(peerid), nil)\n\t\t\t\tlog.Debug(\"Trigger header retrieval\", \"peer\", peerid, \"number\", data.Number, \"hash\", data.Hash)\n\t\t\t}\n\t\t\t// Keep collecting announces from trusted server even we are syncing.\n\t\t\tif ulc && anno.trust {\n\t\t\t\t// Notify underlying fetcher to retrieve header or trigger a resync if\n\t\t\t\t// we have receive enough announcements from trusted server.\n\t\t\t\ttrusted, agreed := trustedHeader(data.Hash, data.Number)\n\t\t\t\tif trusted && !syncing {\n\t\t\t\t\tif data.Number > localHead.Number.Uint64()+syncInterval || data.ReorgDepth > 0 {\n\t\t\t\t\t\tsyncing = true\n\t\t\t\t\t\tgo f.startSync(peerid)\n\t\t\t\t\t\tlog.Debug(\"Trigger trusted light sync\", \"local\", localHead.Number, \"localhash\", localHead.Hash(), \"remote\", data.Number, \"remotehash\", data.Hash)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tp := agreed[rand.Intn(len(agreed))]\n\t\t\t\t\tf.fetcher.Notify(p.String(), data.Hash, data.Number, time.Now(), f.requestHeaderByHash(p), nil)\n\t\t\t\t\tlog.Debug(\"Trigger trusted header retrieval\", \"number\", data.Number, \"hash\", data.Hash)\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase req := <-f.requestCh:\n\t\t\tfetching[req.reqid] = req // Tracking all in-flight requests for response latency statistic.\n\t\t\tif len(fetching) == 1 {\n\t\t\t\tf.rescheduleTimer(fetching, requestTimer)\n\t\t\t}\n\n\t\tcase <-requestTimer.C:\n\t\t\tfor reqid, request := range fetching {\n\t\t\t\tif time.Since(request.sendAt) > blockDelayTimeout-gatherSlack {\n\t\t\t\t\tdelete(fetching, reqid)\n\t\t\t\t\tf.peerset.unregister(request.peerid.String())\n\t\t\t\t\tlog.Debug(\"Request timeout\", \"peer\", request.peerid, \"reqid\", reqid)\n\t\t\t\t}\n\t\t\t}\n\t\t\tf.rescheduleTimer(fetching, requestTimer)\n\n\t\tcase resp := <-f.deliverCh:\n\t\t\tif req := fetching[resp.reqid]; req != nil {\n\t\t\t\tdelete(fetching, resp.reqid)\n\t\t\t\tf.rescheduleTimer(fetching, requestTimer)\n\n\t\t\t\t// The underlying fetcher does not check the consistency of request and response.\n\t\t\t\t// The adversary can send the fake announces with invalid hash and number but always\n\t\t\t\t// delivery some mismatched header. So it can't be punished by the underlying fetcher.\n\t\t\t\t// We have to add two more rules here to detect.\n\t\t\t\tif len(resp.headers) != 1 {\n\t\t\t\t\tf.peerset.unregister(req.peerid.String())\n\t\t\t\t\tlog.Debug(\"Deliver more than requested\", \"peer\", req.peerid, \"reqid\", req.reqid)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif resp.headers[0].Hash() != req.hash {\n\t\t\t\t\tf.peerset.unregister(req.peerid.String())\n\t\t\t\t\tlog.Debug(\"Deliver invalid header\", \"peer\", req.peerid, \"reqid\", req.reqid)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tresp.remain <- f.fetcher.FilterHeaders(resp.peerid.String(), resp.headers, time.Now())\n\t\t\t} else {\n\t\t\t\t// Discard the entire packet no matter it's a timeout response or unexpected one.\n\t\t\t\tresp.remain <- resp.headers\n\t\t\t}\n\n\t\tcase ev := <-headCh:\n\t\t\t// Short circuit if we are still syncing.\n\t\t\tif syncing {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treset(ev.Block.Header())\n\n\t\t\t// Clean stale announcements from les-servers.\n\t\t\tvar droplist []enode.ID\n\t\t\tf.forEachPeer(func(id enode.ID, p *fetcherPeer) bool {\n\t\t\t\tremoved := p.forwardAnno(localTd)\n\t\t\t\tfor _, anno := range removed {\n\t\t\t\t\tif header := f.chain.GetHeaderByHash(anno.data.Hash); header != nil {\n\t\t\t\t\t\tif header.Number.Uint64() != anno.data.Number {\n\t\t\t\t\t\t\tdroplist = append(droplist, id)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// In theory td should exists.\n\t\t\t\t\t\ttd := f.chain.GetTd(anno.data.Hash, anno.data.Number)\n\t\t\t\t\t\tif td != nil && td.Cmp(anno.data.Td) != 0 {\n\t\t\t\t\t\t\tdroplist = append(droplist, id)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn true\n\t\t\t})\n\t\t\tfor _, id := range droplist {\n\t\t\t\tf.peerset.unregister(id.String())\n\t\t\t\tlog.Debug(\"Kicked out peer for invalid announcement\")\n\t\t\t}\n\t\t\tif f.newHeadHook != nil {\n\t\t\t\tf.newHeadHook(localHead)\n\t\t\t}\n\n\t\tcase origin := <-f.syncDone:\n\t\t\tsyncing = false // Reset the status\n\n\t\t\t// Rewind all untrusted headers for ulc mode.\n\t\t\tif ulc {\n\t\t\t\thead := f.chain.CurrentHeader()\n\t\t\t\tancestor := rawdb.FindCommonAncestor(f.chaindb, origin, head)\n\t\t\t\tvar untrusted []common.Hash\n\t\t\t\tfor head.Number.Cmp(ancestor.Number) > 0 {\n\t\t\t\t\thash, number := head.Hash(), head.Number.Uint64()\n\t\t\t\t\tif trusted, _ := trustedHeader(hash, number); trusted {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tuntrusted = append(untrusted, hash)\n\t\t\t\t\thead = f.chain.GetHeader(head.ParentHash, number-1)\n\t\t\t\t}\n\t\t\t\tif len(untrusted) > 0 {\n\t\t\t\t\tfor i, j := 0, len(untrusted)-1; i < j; i, j = i+1, j-1 {\n\t\t\t\t\t\tuntrusted[i], untrusted[j] = untrusted[j], untrusted[i]\n\t\t\t\t\t}\n\t\t\t\t\tf.chain.Rollback(untrusted)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Reset local status.\n\t\t\treset(f.chain.CurrentHeader())\n\t\t\tif f.newHeadHook != nil {\n\t\t\t\tf.newHeadHook(localHead)\n\t\t\t}\n\t\t\tlog.Debug(\"light sync finished\", \"number\", localHead.Number, \"hash\", localHead.Hash())\n\n\t\tcase <-f.closeCh:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// announce processes a new announcement message received from a peer.\nfunc (f *lightFetcher) announce(p *serverPeer, head *announceData) {\n\tif f.newAnnounce != nil {\n\t\tf.newAnnounce(p, head)\n\t}\n\tif f.noAnnounce {\n\t\treturn\n\t}\n\tselect {\n\tcase f.announceCh <- &announce{peerid: p.ID(), trust: p.trusted, data: head}:\n\tcase <-f.closeCh:\n\t\treturn\n\t}\n}\n\n// trackRequest sends a reqID to main loop for in-flight request tracking.\nfunc (f *lightFetcher) trackRequest(peerid enode.ID, reqid uint64, hash common.Hash) {\n\tselect {\n\tcase f.requestCh <- &request{reqid: reqid, peerid: peerid, sendAt: time.Now(), hash: hash}:\n\tcase <-f.closeCh:\n\t}\n}\n\n// requestHeaderByHash constructs a header retrieval request and sends it to\n// local request distributor.\n//\n// Note, we rely on the underlying eth/fetcher to retrieve and validate the\n// response, so that we have to obey the rule of eth/fetcher which only accepts\n// the response from given peer.\nfunc (f *lightFetcher) requestHeaderByHash(peerid enode.ID) func(common.Hash) error {\n\treturn func(hash common.Hash) error {\n\t\treq := &distReq{\n\t\t\tgetCost: func(dp distPeer) uint64 { return dp.(*serverPeer).getRequestCost(GetBlockHeadersMsg, 1) },\n\t\t\tcanSend: func(dp distPeer) bool { return dp.(*serverPeer).ID() == peerid },\n\t\t\trequest: func(dp distPeer) func() {\n\t\t\t\tpeer, id := dp.(*serverPeer), genReqID()\n\t\t\t\tcost := peer.getRequestCost(GetBlockHeadersMsg, 1)\n\t\t\t\tpeer.fcServer.QueuedRequest(id, cost)\n\n\t\t\t\treturn func() {\n\t\t\t\t\tf.trackRequest(peer.ID(), id, hash)\n\t\t\t\t\tpeer.requestHeadersByHash(id, hash, 1, 0, false)\n\t\t\t\t}\n\t\t\t},\n\t\t}\n\t\tf.reqDist.queue(req)\n\t\treturn nil\n\t}\n}\n\n// requestResync invokes synchronisation callback to start syncing.\nfunc (f *lightFetcher) startSync(id enode.ID) {\n\tdefer func(header *types.Header) {\n\t\tf.syncDone <- header\n\t}(f.chain.CurrentHeader())\n\n\tpeer := f.peerset.peer(id.String())\n\tif peer == nil || peer.onlyAnnounce {\n\t\treturn\n\t}\n\tf.synchronise(peer)\n}\n\n// deliverHeaders delivers header download request responses for processing\nfunc (f *lightFetcher) deliverHeaders(peer *serverPeer, reqid uint64, headers []*types.Header) []*types.Header {\n\tremain := make(chan []*types.Header, 1)\n\tselect {\n\tcase f.deliverCh <- &response{reqid: reqid, headers: headers, peerid: peer.ID(), remain: remain}:\n\tcase <-f.closeCh:\n\t\treturn nil\n\t}\n\treturn <-remain\n}\n\n// rescheduleTimer resets the specified timeout timer to the next request timeout.\nfunc (f *lightFetcher) rescheduleTimer(requests map[uint64]*request, timer *time.Timer) {\n\t// Short circuit if no inflight requests\n\tif len(requests) == 0 {\n\t\ttimer.Stop()\n\t\treturn\n\t}\n\t// Otherwise find the earliest expiring request\n\tearliest := time.Now()\n\tfor _, req := range requests {\n\t\tif earliest.After(req.sendAt) {\n\t\t\tearliest = req.sendAt\n\t\t}\n\t}\n\ttimer.Reset(blockDelayTimeout - time.Since(earliest))\n}\n"
  },
  {
    "path": "les/fetcher_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"math/big\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n)\n\n// verifyImportEvent verifies that one single event arrive on an import channel.\nfunc verifyImportEvent(t *testing.T, imported chan interface{}, arrive bool) {\n\tif arrive {\n\t\tselect {\n\t\tcase <-imported:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Fatalf(\"import timeout\")\n\t\t}\n\t} else {\n\t\tselect {\n\t\tcase <-imported:\n\t\t\tt.Fatalf(\"import invoked\")\n\t\tcase <-time.After(20 * time.Millisecond):\n\t\t}\n\t}\n}\n\n// verifyImportDone verifies that no more events are arriving on an import channel.\nfunc verifyImportDone(t *testing.T, imported chan interface{}) {\n\tselect {\n\tcase <-imported:\n\t\tt.Fatalf(\"extra block imported\")\n\tcase <-time.After(50 * time.Millisecond):\n\t}\n}\n\n// verifyChainHeight verifies the chain height is as expected.\nfunc verifyChainHeight(t *testing.T, fetcher *lightFetcher, height uint64) {\n\tlocal := fetcher.chain.CurrentHeader().Number.Uint64()\n\tif local != height {\n\t\tt.Fatalf(\"chain height mismatch, got %d, want %d\", local, height)\n\t}\n}\n\nfunc TestSequentialAnnouncementsLes2(t *testing.T) { testSequentialAnnouncements(t, 2) }\nfunc TestSequentialAnnouncementsLes3(t *testing.T) { testSequentialAnnouncements(t, 3) }\n\nfunc testSequentialAnnouncements(t *testing.T, protocol int) {\n\tnetconfig := testnetConfig{\n\t\tblocks:    4,\n\t\tprotocol:  protocol,\n\t\tnopruning: true,\n\t}\n\ts, c, teardown := newClientServerEnv(t, netconfig)\n\tdefer teardown()\n\n\t// Create connected peer pair.\n\tc.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.\n\tp1, _, err := newTestPeerPair(\"peer\", protocol, s.handler, c.handler)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create peer pair %v\", err)\n\t}\n\tc.handler.fetcher.noAnnounce = false\n\n\timportCh := make(chan interface{})\n\tc.handler.fetcher.newHeadHook = func(header *types.Header) {\n\t\timportCh <- header\n\t}\n\tfor i := uint64(1); i <= s.backend.Blockchain().CurrentHeader().Number.Uint64(); i++ {\n\t\theader := s.backend.Blockchain().GetHeaderByNumber(i)\n\t\thash, number := header.Hash(), header.Number.Uint64()\n\t\ttd := rawdb.ReadTd(s.db, hash, number)\n\n\t\tannounce := announceData{hash, number, td, 0, nil}\n\t\tif p1.cpeer.announceType == announceTypeSigned {\n\t\t\tannounce.sign(s.handler.server.privateKey)\n\t\t}\n\t\tp1.cpeer.sendAnnounce(announce)\n\t\tverifyImportEvent(t, importCh, true)\n\t}\n\tverifyImportDone(t, importCh)\n\tverifyChainHeight(t, c.handler.fetcher, 4)\n}\n\nfunc TestGappedAnnouncementsLes2(t *testing.T) { testGappedAnnouncements(t, 2) }\nfunc TestGappedAnnouncementsLes3(t *testing.T) { testGappedAnnouncements(t, 3) }\n\nfunc testGappedAnnouncements(t *testing.T, protocol int) {\n\tnetconfig := testnetConfig{\n\t\tblocks:    4,\n\t\tprotocol:  protocol,\n\t\tnopruning: true,\n\t}\n\ts, c, teardown := newClientServerEnv(t, netconfig)\n\tdefer teardown()\n\n\t// Create connected peer pair.\n\tc.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.\n\tpeer, _, err := newTestPeerPair(\"peer\", protocol, s.handler, c.handler)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create peer pair %v\", err)\n\t}\n\tc.handler.fetcher.noAnnounce = false\n\n\tdone := make(chan *types.Header, 1)\n\tc.handler.fetcher.newHeadHook = func(header *types.Header) { done <- header }\n\n\t// Prepare announcement by latest header.\n\tlatest := s.backend.Blockchain().CurrentHeader()\n\thash, number := latest.Hash(), latest.Number.Uint64()\n\ttd := rawdb.ReadTd(s.db, hash, number)\n\n\t// Sign the announcement if necessary.\n\tannounce := announceData{hash, number, td, 0, nil}\n\tif peer.cpeer.announceType == announceTypeSigned {\n\t\tannounce.sign(s.handler.server.privateKey)\n\t}\n\tpeer.cpeer.sendAnnounce(announce)\n\n\t<-done // Wait syncing\n\tverifyChainHeight(t, c.handler.fetcher, 4)\n\n\t// Send a reorged announcement\n\tvar newAnno = make(chan struct{}, 1)\n\tc.handler.fetcher.noAnnounce = true\n\tc.handler.fetcher.newAnnounce = func(*serverPeer, *announceData) {\n\t\tnewAnno <- struct{}{}\n\t}\n\tblocks, _ := core.GenerateChain(rawdb.ReadChainConfig(s.db, s.backend.Blockchain().Genesis().Hash()), s.backend.Blockchain().GetBlockByNumber(3),\n\t\tethash.NewFaker(), s.db, 2, func(i int, gen *core.BlockGen) {\n\t\t\tgen.OffsetTime(-9) // higher block difficulty\n\t\t})\n\ts.backend.Blockchain().InsertChain(blocks)\n\t<-newAnno\n\tc.handler.fetcher.noAnnounce = false\n\tc.handler.fetcher.newAnnounce = nil\n\n\tlatest = blocks[len(blocks)-1].Header()\n\thash, number = latest.Hash(), latest.Number.Uint64()\n\ttd = rawdb.ReadTd(s.db, hash, number)\n\n\tannounce = announceData{hash, number, td, 1, nil}\n\tif peer.cpeer.announceType == announceTypeSigned {\n\t\tannounce.sign(s.handler.server.privateKey)\n\t}\n\tpeer.cpeer.sendAnnounce(announce)\n\n\t<-done // Wait syncing\n\tverifyChainHeight(t, c.handler.fetcher, 5)\n}\n\nfunc TestTrustedAnnouncementsLes2(t *testing.T) { testTrustedAnnouncement(t, 2) }\nfunc TestTrustedAnnouncementsLes3(t *testing.T) { testTrustedAnnouncement(t, 3) }\n\nfunc testTrustedAnnouncement(t *testing.T, protocol int) {\n\tvar (\n\t\tservers   []*testServer\n\t\tteardowns []func()\n\t\tnodes     []*enode.Node\n\t\tids       []string\n\t\tcpeers    []*clientPeer\n\t\tspeers    []*serverPeer\n\t)\n\tfor i := 0; i < 10; i++ {\n\t\ts, n, teardown := newTestServerPeer(t, 10, protocol)\n\n\t\tservers = append(servers, s)\n\t\tnodes = append(nodes, n)\n\t\tteardowns = append(teardowns, teardown)\n\n\t\t// A half of them are trusted servers.\n\t\tif i < 5 {\n\t\t\tids = append(ids, n.String())\n\t\t}\n\t}\n\tnetconfig := testnetConfig{\n\t\tprotocol:    protocol,\n\t\tnopruning:   true,\n\t\tulcServers:  ids,\n\t\tulcFraction: 60,\n\t}\n\t_, c, teardown := newClientServerEnv(t, netconfig)\n\tdefer teardown()\n\tdefer func() {\n\t\tfor i := 0; i < len(teardowns); i++ {\n\t\t\tteardowns[i]()\n\t\t}\n\t}()\n\n\tc.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.\n\n\t// Connect all server instances.\n\tfor i := 0; i < len(servers); i++ {\n\t\tsp, cp, err := connect(servers[i].handler, nodes[i].ID(), c.handler, protocol)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"connect server and client failed, err %s\", err)\n\t\t}\n\t\tcpeers = append(cpeers, cp)\n\t\tspeers = append(speers, sp)\n\t}\n\tc.handler.fetcher.noAnnounce = false\n\n\tnewHead := make(chan *types.Header, 1)\n\tc.handler.fetcher.newHeadHook = func(header *types.Header) { newHead <- header }\n\n\tcheck := func(height []uint64, expected uint64, callback func()) {\n\t\tfor i := 0; i < len(height); i++ {\n\t\t\tfor j := 0; j < len(servers); j++ {\n\t\t\t\th := servers[j].backend.Blockchain().GetHeaderByNumber(height[i])\n\t\t\t\thash, number := h.Hash(), h.Number.Uint64()\n\t\t\t\ttd := rawdb.ReadTd(servers[j].db, hash, number)\n\n\t\t\t\t// Sign the announcement if necessary.\n\t\t\t\tannounce := announceData{hash, number, td, 0, nil}\n\t\t\t\tp := cpeers[j]\n\t\t\t\tif p.announceType == announceTypeSigned {\n\t\t\t\t\tannounce.sign(servers[j].handler.server.privateKey)\n\t\t\t\t}\n\t\t\t\tp.sendAnnounce(announce)\n\t\t\t}\n\t\t}\n\t\tif callback != nil {\n\t\t\tcallback()\n\t\t}\n\t\tverifyChainHeight(t, c.handler.fetcher, expected)\n\t}\n\tcheck([]uint64{1}, 1, func() { <-newHead })   // Sequential announcements\n\tcheck([]uint64{4}, 4, func() { <-newHead })   // ULC-style light syncing, rollback untrusted headers\n\tcheck([]uint64{10}, 10, func() { <-newHead }) // Sync the whole chain.\n}\n\nfunc TestInvalidAnnouncesLES2(t *testing.T) { testInvalidAnnounces(t, lpv2) }\nfunc TestInvalidAnnouncesLES3(t *testing.T) { testInvalidAnnounces(t, lpv3) }\nfunc TestInvalidAnnouncesLES4(t *testing.T) { testInvalidAnnounces(t, lpv4) }\n\nfunc testInvalidAnnounces(t *testing.T, protocol int) {\n\tnetconfig := testnetConfig{\n\t\tblocks:    4,\n\t\tprotocol:  protocol,\n\t\tnopruning: true,\n\t}\n\ts, c, teardown := newClientServerEnv(t, netconfig)\n\tdefer teardown()\n\n\t// Create connected peer pair.\n\tc.handler.fetcher.noAnnounce = true // Ignore the first announce from peer which can trigger a resync.\n\tpeer, _, err := newTestPeerPair(\"peer\", lpv3, s.handler, c.handler)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create peer pair %v\", err)\n\t}\n\tc.handler.fetcher.noAnnounce = false\n\n\tdone := make(chan *types.Header, 1)\n\tc.handler.fetcher.newHeadHook = func(header *types.Header) { done <- header }\n\n\t// Prepare announcement by latest header.\n\theaderOne := s.backend.Blockchain().GetHeaderByNumber(1)\n\thash, number := headerOne.Hash(), headerOne.Number.Uint64()\n\ttd := big.NewInt(200) // bad td\n\n\t// Sign the announcement if necessary.\n\tannounce := announceData{hash, number, td, 0, nil}\n\tif peer.cpeer.announceType == announceTypeSigned {\n\t\tannounce.sign(s.handler.server.privateKey)\n\t}\n\tpeer.cpeer.sendAnnounce(announce)\n\t<-done // Wait syncing\n\n\t// Ensure the bad peer is evicited\n\tif c.handler.backend.peers.len() != 0 {\n\t\tt.Fatalf(\"Failed to evict invalid peer\")\n\t}\n}\n"
  },
  {
    "path": "les/flowcontrol/control.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// Package flowcontrol implements a client side flow control mechanism\npackage flowcontrol\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/log\"\n)\n\nconst (\n\t// fcTimeConst is the time constant applied for MinRecharge during linear\n\t// buffer recharge period\n\tfcTimeConst = time.Millisecond\n\t// DecParamDelay is applied at server side when decreasing capacity in order to\n\t// avoid a buffer underrun error due to requests sent by the client before\n\t// receiving the capacity update announcement\n\tDecParamDelay = time.Second * 2\n\t// keepLogs is the duration of keeping logs; logging is not used if zero\n\tkeepLogs = 0\n)\n\n// ServerParams are the flow control parameters specified by a server for a client\n//\n// Note: a server can assign different amounts of capacity to each client by giving\n// different parameters to them.\ntype ServerParams struct {\n\tBufLimit, MinRecharge uint64\n}\n\n// scheduledUpdate represents a delayed flow control parameter update\ntype scheduledUpdate struct {\n\ttime   mclock.AbsTime\n\tparams ServerParams\n}\n\n// ClientNode is the flow control system's representation of a client\n// (used in server mode only)\ntype ClientNode struct {\n\tparams         ServerParams\n\tbufValue       int64\n\tlastTime       mclock.AbsTime\n\tupdateSchedule []scheduledUpdate\n\tsumCost        uint64            // sum of req costs received from this client\n\taccepted       map[uint64]uint64 // value = sumCost after accepting the given req\n\tconnected      bool\n\tlock           sync.Mutex\n\tcm             *ClientManager\n\tlog            *logger\n\tcmNodeFields\n}\n\n// NewClientNode returns a new ClientNode\nfunc NewClientNode(cm *ClientManager, params ServerParams) *ClientNode {\n\tnode := &ClientNode{\n\t\tcm:        cm,\n\t\tparams:    params,\n\t\tbufValue:  int64(params.BufLimit),\n\t\tlastTime:  cm.clock.Now(),\n\t\taccepted:  make(map[uint64]uint64),\n\t\tconnected: true,\n\t}\n\tif keepLogs > 0 {\n\t\tnode.log = newLogger(keepLogs)\n\t}\n\tcm.connect(node)\n\treturn node\n}\n\n// Disconnect should be called when a client is disconnected\nfunc (node *ClientNode) Disconnect() {\n\tnode.lock.Lock()\n\tdefer node.lock.Unlock()\n\n\tnode.connected = false\n\tnode.cm.disconnect(node)\n}\n\n// BufferStatus returns the current buffer value and limit\nfunc (node *ClientNode) BufferStatus() (uint64, uint64) {\n\tnode.lock.Lock()\n\tdefer node.lock.Unlock()\n\n\tif !node.connected {\n\t\treturn 0, 0\n\t}\n\tnow := node.cm.clock.Now()\n\tnode.update(now)\n\tnode.cm.updateBuffer(node, 0, now)\n\tbv := node.bufValue\n\tif bv < 0 {\n\t\tbv = 0\n\t}\n\treturn uint64(bv), node.params.BufLimit\n}\n\n// OneTimeCost subtracts the given amount from the node's buffer.\n//\n// Note: this call can take the buffer into the negative region internally.\n// In this case zero buffer value is returned by exported calls and no requests\n// are accepted.\nfunc (node *ClientNode) OneTimeCost(cost uint64) {\n\tnode.lock.Lock()\n\tdefer node.lock.Unlock()\n\n\tnow := node.cm.clock.Now()\n\tnode.update(now)\n\tnode.bufValue -= int64(cost)\n\tnode.cm.updateBuffer(node, -int64(cost), now)\n}\n\n// Freeze notifies the client manager about a client freeze event in which case\n// the total capacity allowance is slightly reduced.\nfunc (node *ClientNode) Freeze() {\n\tnode.lock.Lock()\n\tfrozenCap := node.params.MinRecharge\n\tnode.lock.Unlock()\n\tnode.cm.reduceTotalCapacity(frozenCap)\n}\n\n// update recalculates the buffer value at a specified time while also performing\n// scheduled flow control parameter updates if necessary\nfunc (node *ClientNode) update(now mclock.AbsTime) {\n\tfor len(node.updateSchedule) > 0 && node.updateSchedule[0].time <= now {\n\t\tnode.recalcBV(node.updateSchedule[0].time)\n\t\tnode.updateParams(node.updateSchedule[0].params, now)\n\t\tnode.updateSchedule = node.updateSchedule[1:]\n\t}\n\tnode.recalcBV(now)\n}\n\n// recalcBV recalculates the buffer value at a specified time\nfunc (node *ClientNode) recalcBV(now mclock.AbsTime) {\n\tdt := uint64(now - node.lastTime)\n\tif now < node.lastTime {\n\t\tdt = 0\n\t}\n\tnode.bufValue += int64(node.params.MinRecharge * dt / uint64(fcTimeConst))\n\tif node.bufValue > int64(node.params.BufLimit) {\n\t\tnode.bufValue = int64(node.params.BufLimit)\n\t}\n\tif node.log != nil {\n\t\tnode.log.add(now, fmt.Sprintf(\"updated  bv=%d  MRR=%d  BufLimit=%d\", node.bufValue, node.params.MinRecharge, node.params.BufLimit))\n\t}\n\tnode.lastTime = now\n}\n\n// UpdateParams updates the flow control parameters of a client node\nfunc (node *ClientNode) UpdateParams(params ServerParams) {\n\tnode.lock.Lock()\n\tdefer node.lock.Unlock()\n\n\tnow := node.cm.clock.Now()\n\tnode.update(now)\n\tif params.MinRecharge >= node.params.MinRecharge {\n\t\tnode.updateSchedule = nil\n\t\tnode.updateParams(params, now)\n\t} else {\n\t\tfor i, s := range node.updateSchedule {\n\t\t\tif params.MinRecharge >= s.params.MinRecharge {\n\t\t\t\ts.params = params\n\t\t\t\tnode.updateSchedule = node.updateSchedule[:i+1]\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tnode.updateSchedule = append(node.updateSchedule, scheduledUpdate{time: now + mclock.AbsTime(DecParamDelay), params: params})\n\t}\n}\n\n// updateParams updates the flow control parameters of the node\nfunc (node *ClientNode) updateParams(params ServerParams, now mclock.AbsTime) {\n\tdiff := int64(params.BufLimit - node.params.BufLimit)\n\tif diff > 0 {\n\t\tnode.bufValue += diff\n\t} else if node.bufValue > int64(params.BufLimit) {\n\t\tnode.bufValue = int64(params.BufLimit)\n\t}\n\tnode.cm.updateParams(node, params, now)\n}\n\n// AcceptRequest returns whether a new request can be accepted and the missing\n// buffer amount if it was rejected due to a buffer underrun. If accepted, maxCost\n// is deducted from the flow control buffer.\nfunc (node *ClientNode) AcceptRequest(reqID, index, maxCost uint64) (accepted bool, bufShort uint64, priority int64) {\n\tnode.lock.Lock()\n\tdefer node.lock.Unlock()\n\n\tnow := node.cm.clock.Now()\n\tnode.update(now)\n\tif int64(maxCost) > node.bufValue {\n\t\tif node.log != nil {\n\t\t\tnode.log.add(now, fmt.Sprintf(\"rejected  reqID=%d  bv=%d  maxCost=%d\", reqID, node.bufValue, maxCost))\n\t\t\tnode.log.dump(now)\n\t\t}\n\t\treturn false, maxCost - uint64(node.bufValue), 0\n\t}\n\tnode.bufValue -= int64(maxCost)\n\tnode.sumCost += maxCost\n\tif node.log != nil {\n\t\tnode.log.add(now, fmt.Sprintf(\"accepted  reqID=%d  bv=%d  maxCost=%d  sumCost=%d\", reqID, node.bufValue, maxCost, node.sumCost))\n\t}\n\tnode.accepted[index] = node.sumCost\n\treturn true, 0, node.cm.accepted(node, maxCost, now)\n}\n\n// RequestProcessed should be called when the request has been processed\nfunc (node *ClientNode) RequestProcessed(reqID, index, maxCost, realCost uint64) uint64 {\n\tnode.lock.Lock()\n\tdefer node.lock.Unlock()\n\n\tnow := node.cm.clock.Now()\n\tnode.update(now)\n\tnode.cm.processed(node, maxCost, realCost, now)\n\tbv := node.bufValue + int64(node.sumCost-node.accepted[index])\n\tif node.log != nil {\n\t\tnode.log.add(now, fmt.Sprintf(\"processed  reqID=%d  bv=%d  maxCost=%d  realCost=%d  sumCost=%d  oldSumCost=%d  reportedBV=%d\", reqID, node.bufValue, maxCost, realCost, node.sumCost, node.accepted[index], bv))\n\t}\n\tdelete(node.accepted, index)\n\tif bv < 0 {\n\t\treturn 0\n\t}\n\treturn uint64(bv)\n}\n\n// ServerNode is the flow control system's representation of a server\n// (used in client mode only)\ntype ServerNode struct {\n\tclock       mclock.Clock\n\tbufEstimate uint64\n\tbufRecharge bool\n\tlastTime    mclock.AbsTime\n\tparams      ServerParams\n\tsumCost     uint64            // sum of req costs sent to this server\n\tpending     map[uint64]uint64 // value = sumCost after sending the given req\n\tlog         *logger\n\tlock        sync.RWMutex\n}\n\n// NewServerNode returns a new ServerNode\nfunc NewServerNode(params ServerParams, clock mclock.Clock) *ServerNode {\n\tnode := &ServerNode{\n\t\tclock:       clock,\n\t\tbufEstimate: params.BufLimit,\n\t\tbufRecharge: false,\n\t\tlastTime:    clock.Now(),\n\t\tparams:      params,\n\t\tpending:     make(map[uint64]uint64),\n\t}\n\tif keepLogs > 0 {\n\t\tnode.log = newLogger(keepLogs)\n\t}\n\treturn node\n}\n\n// UpdateParams updates the flow control parameters of the node\nfunc (node *ServerNode) UpdateParams(params ServerParams) {\n\tnode.lock.Lock()\n\tdefer node.lock.Unlock()\n\n\tnode.recalcBLE(mclock.Now())\n\tif params.BufLimit > node.params.BufLimit {\n\t\tnode.bufEstimate += params.BufLimit - node.params.BufLimit\n\t} else {\n\t\tif node.bufEstimate > params.BufLimit {\n\t\t\tnode.bufEstimate = params.BufLimit\n\t\t}\n\t}\n\tnode.params = params\n}\n\n// recalcBLE recalculates the lowest estimate for the client's buffer value at\n// the given server at the specified time\nfunc (node *ServerNode) recalcBLE(now mclock.AbsTime) {\n\tif now < node.lastTime {\n\t\treturn\n\t}\n\tif node.bufRecharge {\n\t\tdt := uint64(now - node.lastTime)\n\t\tnode.bufEstimate += node.params.MinRecharge * dt / uint64(fcTimeConst)\n\t\tif node.bufEstimate >= node.params.BufLimit {\n\t\t\tnode.bufEstimate = node.params.BufLimit\n\t\t\tnode.bufRecharge = false\n\t\t}\n\t}\n\tnode.lastTime = now\n\tif node.log != nil {\n\t\tnode.log.add(now, fmt.Sprintf(\"updated  bufEst=%d  MRR=%d  BufLimit=%d\", node.bufEstimate, node.params.MinRecharge, node.params.BufLimit))\n\t}\n}\n\n// safetyMargin is added to the flow control waiting time when estimated buffer value is low\nconst safetyMargin = time.Millisecond\n\n// CanSend returns the minimum waiting time required before sending a request\n// with the given maximum estimated cost. Second return value is the relative\n// estimated buffer level after sending the request (divided by BufLimit).\nfunc (node *ServerNode) CanSend(maxCost uint64) (time.Duration, float64) {\n\tnode.lock.RLock()\n\tdefer node.lock.RUnlock()\n\n\tif node.params.BufLimit == 0 {\n\t\treturn time.Duration(math.MaxInt64), 0\n\t}\n\tnow := node.clock.Now()\n\tnode.recalcBLE(now)\n\tmaxCost += uint64(safetyMargin) * node.params.MinRecharge / uint64(fcTimeConst)\n\tif maxCost > node.params.BufLimit {\n\t\tmaxCost = node.params.BufLimit\n\t}\n\tif node.bufEstimate >= maxCost {\n\t\trelBuf := float64(node.bufEstimate-maxCost) / float64(node.params.BufLimit)\n\t\tif node.log != nil {\n\t\t\tnode.log.add(now, fmt.Sprintf(\"canSend  bufEst=%d  maxCost=%d  true  relBuf=%f\", node.bufEstimate, maxCost, relBuf))\n\t\t}\n\t\treturn 0, relBuf\n\t}\n\ttimeLeft := time.Duration((maxCost - node.bufEstimate) * uint64(fcTimeConst) / node.params.MinRecharge)\n\tif node.log != nil {\n\t\tnode.log.add(now, fmt.Sprintf(\"canSend  bufEst=%d  maxCost=%d  false  timeLeft=%v\", node.bufEstimate, maxCost, timeLeft))\n\t}\n\treturn timeLeft, 0\n}\n\n// QueuedRequest should be called when the request has been assigned to the given\n// server node, before putting it in the send queue. It is mandatory that requests\n// are sent in the same order as the QueuedRequest calls are made.\nfunc (node *ServerNode) QueuedRequest(reqID, maxCost uint64) {\n\tnode.lock.Lock()\n\tdefer node.lock.Unlock()\n\n\tnow := node.clock.Now()\n\tnode.recalcBLE(now)\n\t// Note: we do not know when requests actually arrive to the server so bufRecharge\n\t// is not turned on here if buffer was full; in this case it is going to be turned\n\t// on by the first reply's bufValue feedback\n\tif node.bufEstimate >= maxCost {\n\t\tnode.bufEstimate -= maxCost\n\t} else {\n\t\tlog.Error(\"Queued request with insufficient buffer estimate\")\n\t\tnode.bufEstimate = 0\n\t}\n\tnode.sumCost += maxCost\n\tnode.pending[reqID] = node.sumCost\n\tif node.log != nil {\n\t\tnode.log.add(now, fmt.Sprintf(\"queued  reqID=%d  bufEst=%d  maxCost=%d  sumCost=%d\", reqID, node.bufEstimate, maxCost, node.sumCost))\n\t}\n}\n\n// ReceivedReply adjusts estimated buffer value according to the value included in\n// the latest request reply.\nfunc (node *ServerNode) ReceivedReply(reqID, bv uint64) {\n\tnode.lock.Lock()\n\tdefer node.lock.Unlock()\n\n\tnow := node.clock.Now()\n\tnode.recalcBLE(now)\n\tif bv > node.params.BufLimit {\n\t\tbv = node.params.BufLimit\n\t}\n\tsc, ok := node.pending[reqID]\n\tif !ok {\n\t\treturn\n\t}\n\tdelete(node.pending, reqID)\n\tcc := node.sumCost - sc\n\tnewEstimate := uint64(0)\n\tif bv > cc {\n\t\tnewEstimate = bv - cc\n\t}\n\tif newEstimate > node.bufEstimate {\n\t\t// Note: we never reduce the buffer estimate based on the reported value because\n\t\t// this can only happen because of the delayed delivery of the latest reply.\n\t\t// The lowest estimate based on the previous reply can still be considered valid.\n\t\tnode.bufEstimate = newEstimate\n\t}\n\n\tnode.bufRecharge = node.bufEstimate < node.params.BufLimit\n\tnode.lastTime = now\n\tif node.log != nil {\n\t\tnode.log.add(now, fmt.Sprintf(\"received  reqID=%d  bufEst=%d  reportedBv=%d  sumCost=%d  oldSumCost=%d\", reqID, node.bufEstimate, bv, node.sumCost, sc))\n\t}\n}\n\n// ResumeFreeze cleans all pending requests and sets the buffer estimate to the\n// reported value after resuming from a frozen state\nfunc (node *ServerNode) ResumeFreeze(bv uint64) {\n\tnode.lock.Lock()\n\tdefer node.lock.Unlock()\n\n\tfor reqID := range node.pending {\n\t\tdelete(node.pending, reqID)\n\t}\n\tnow := node.clock.Now()\n\tnode.recalcBLE(now)\n\tif bv > node.params.BufLimit {\n\t\tbv = node.params.BufLimit\n\t}\n\tnode.bufEstimate = bv\n\tnode.bufRecharge = node.bufEstimate < node.params.BufLimit\n\tnode.lastTime = now\n\tif node.log != nil {\n\t\tnode.log.add(now, fmt.Sprintf(\"unfreeze  bv=%d  sumCost=%d\", bv, node.sumCost))\n\t}\n}\n\n// DumpLogs dumps the event log if logging is used\nfunc (node *ServerNode) DumpLogs() {\n\tnode.lock.Lock()\n\tdefer node.lock.Unlock()\n\n\tif node.log != nil {\n\t\tnode.log.dump(node.clock.Now())\n\t}\n}\n"
  },
  {
    "path": "les/flowcontrol/manager.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage flowcontrol\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/common/prque\"\n)\n\n// cmNodeFields are ClientNode fields used by the client manager\n// Note: these fields are locked by the client manager's mutex\ntype cmNodeFields struct {\n\tcorrBufValue   int64 // buffer value adjusted with the extra recharge amount\n\trcLastIntValue int64 // past recharge integrator value when corrBufValue was last updated\n\trcFullIntValue int64 // future recharge integrator value when corrBufValue will reach maximum\n\tqueueIndex     int   // position in the recharge queue (-1 if not queued)\n}\n\n// FixedPointMultiplier is applied to the recharge integrator and the recharge curve.\n//\n// Note: fixed point arithmetic is required for the integrator because it is a\n// constantly increasing value that can wrap around int64 limits (which behavior is\n// also supported by the priority queue). A floating point value would gradually lose\n// precision in this application.\n// The recharge curve and all recharge values are encoded as fixed point because\n// sumRecharge is frequently updated by adding or subtracting individual recharge\n// values and perfect precision is required.\nconst FixedPointMultiplier = 1000000\n\nvar (\n\tcapacityDropFactor          = 0.1\n\tcapacityRaiseTC             = 1 / (3 * float64(time.Hour)) // time constant for raising the capacity factor\n\tcapacityRaiseThresholdRatio = 1.125                        // total/connected capacity ratio threshold for raising the capacity factor\n)\n\n// ClientManager controls the capacity assigned to the clients of a server.\n// Since ServerParams guarantee a safe lower estimate for processable requests\n// even in case of all clients being active, ClientManager calculates a\n// corrigated buffer value and usually allows a higher remaining buffer value\n// to be returned with each reply.\ntype ClientManager struct {\n\tclock     mclock.Clock\n\tlock      sync.Mutex\n\tenabledCh chan struct{}\n\tstop      chan chan struct{}\n\n\tcurve                                      PieceWiseLinear\n\tsumRecharge, totalRecharge, totalConnected uint64\n\tlogTotalCap, totalCapacity                 float64\n\tlogTotalCapRaiseLimit                      float64\n\tminLogTotalCap, maxLogTotalCap             float64\n\tcapacityRaiseThreshold                     uint64\n\tcapLastUpdate                              mclock.AbsTime\n\ttotalCapacityCh                            chan uint64\n\n\t// recharge integrator is increasing in each moment with a rate of\n\t// (totalRecharge / sumRecharge)*FixedPointMultiplier or 0 if sumRecharge==0\n\trcLastUpdate   mclock.AbsTime // last time the recharge integrator was updated\n\trcLastIntValue int64          // last updated value of the recharge integrator\n\t// recharge queue is a priority queue with currently recharging client nodes\n\t// as elements. The priority value is rcFullIntValue which allows to quickly\n\t// determine which client will first finish recharge.\n\trcQueue *prque.Prque\n}\n\n// NewClientManager returns a new client manager.\n// Client manager enhances flow control performance by allowing client buffers\n// to recharge quicker than the minimum guaranteed recharge rate if possible.\n// The sum of all minimum recharge rates (sumRecharge) is updated each time\n// a clients starts or finishes buffer recharging. Then an adjusted total\n// recharge rate is calculated using a piecewise linear recharge curve:\n//\n// totalRecharge = curve(sumRecharge)\n// (totalRecharge >= sumRecharge is enforced)\n//\n// Then the \"bonus\" buffer recharge is distributed between currently recharging\n// clients proportionally to their minimum recharge rates.\n//\n// Note: total recharge is proportional to the average number of parallel running\n// serving threads. A recharge value of 1000000 corresponds to one thread in average.\n// The maximum number of allowed serving threads should always be considerably\n// higher than the targeted average number.\n//\n// Note 2: although it is possible to specify a curve allowing the total target\n// recharge starting from zero sumRecharge, it makes sense to add a linear ramp\n// starting from zero in order to not let a single low-priority client use up\n// the entire server capacity and thus ensure quick availability for others at\n// any moment.\nfunc NewClientManager(curve PieceWiseLinear, clock mclock.Clock) *ClientManager {\n\tcm := &ClientManager{\n\t\tclock:         clock,\n\t\trcQueue:       prque.New(func(a interface{}, i int) { a.(*ClientNode).queueIndex = i }),\n\t\tcapLastUpdate: clock.Now(),\n\t\tstop:          make(chan chan struct{}),\n\t}\n\tif curve != nil {\n\t\tcm.SetRechargeCurve(curve)\n\t}\n\tgo func() {\n\t\t// regularly recalculate and update total capacity\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(time.Minute):\n\t\t\t\tcm.lock.Lock()\n\t\t\t\tcm.updateTotalCapacity(cm.clock.Now(), true)\n\t\t\t\tcm.lock.Unlock()\n\t\t\tcase stop := <-cm.stop:\n\t\t\t\tclose(stop)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn cm\n}\n\n// Stop stops the client manager\nfunc (cm *ClientManager) Stop() {\n\tstop := make(chan struct{})\n\tcm.stop <- stop\n\t<-stop\n}\n\n// SetRechargeCurve updates the recharge curve\nfunc (cm *ClientManager) SetRechargeCurve(curve PieceWiseLinear) {\n\tcm.lock.Lock()\n\tdefer cm.lock.Unlock()\n\n\tnow := cm.clock.Now()\n\tcm.updateRecharge(now)\n\tcm.curve = curve\n\tif len(curve) > 0 {\n\t\tcm.totalRecharge = curve[len(curve)-1].Y\n\t} else {\n\t\tcm.totalRecharge = 0\n\t}\n}\n\n// SetCapacityRaiseThreshold sets a threshold value used for raising capFactor.\n// Either if the difference between total allowed and connected capacity is less\n// than this threshold or if their ratio is less than capacityRaiseThresholdRatio\n// then capFactor is allowed to slowly raise.\nfunc (cm *ClientManager) SetCapacityLimits(min, max, raiseThreshold uint64) {\n\tif min < 1 {\n\t\tmin = 1\n\t}\n\tcm.minLogTotalCap = math.Log(float64(min))\n\tif max < 1 {\n\t\tmax = 1\n\t}\n\tcm.maxLogTotalCap = math.Log(float64(max))\n\tcm.logTotalCap = cm.maxLogTotalCap\n\tcm.capacityRaiseThreshold = raiseThreshold\n\tcm.refreshCapacity()\n}\n\n// connect should be called when a client is connected, before passing it to any\n// other ClientManager function\nfunc (cm *ClientManager) connect(node *ClientNode) {\n\tcm.lock.Lock()\n\tdefer cm.lock.Unlock()\n\n\tnow := cm.clock.Now()\n\tcm.updateRecharge(now)\n\tnode.corrBufValue = int64(node.params.BufLimit)\n\tnode.rcLastIntValue = cm.rcLastIntValue\n\tnode.queueIndex = -1\n\tcm.updateTotalCapacity(now, true)\n\tcm.totalConnected += node.params.MinRecharge\n\tcm.updateRaiseLimit()\n}\n\n// disconnect should be called when a client is disconnected\nfunc (cm *ClientManager) disconnect(node *ClientNode) {\n\tcm.lock.Lock()\n\tdefer cm.lock.Unlock()\n\n\tnow := cm.clock.Now()\n\tcm.updateRecharge(cm.clock.Now())\n\tcm.updateTotalCapacity(now, true)\n\tcm.totalConnected -= node.params.MinRecharge\n\tcm.updateRaiseLimit()\n}\n\n// accepted is called when a request with given maximum cost is accepted.\n// It returns a priority indicator for the request which is used to determine placement\n// in the serving queue. Older requests have higher priority by default. If the client\n// is almost out of buffer, request priority is reduced.\nfunc (cm *ClientManager) accepted(node *ClientNode, maxCost uint64, now mclock.AbsTime) (priority int64) {\n\tcm.lock.Lock()\n\tdefer cm.lock.Unlock()\n\n\tcm.updateNodeRc(node, -int64(maxCost), &node.params, now)\n\trcTime := (node.params.BufLimit - uint64(node.corrBufValue)) * FixedPointMultiplier / node.params.MinRecharge\n\treturn -int64(now) - int64(rcTime)\n}\n\n// processed updates the client buffer according to actual request cost after\n// serving has been finished.\n//\n// Note: processed should always be called for all accepted requests\nfunc (cm *ClientManager) processed(node *ClientNode, maxCost, realCost uint64, now mclock.AbsTime) {\n\tif realCost > maxCost {\n\t\trealCost = maxCost\n\t}\n\tcm.updateBuffer(node, int64(maxCost-realCost), now)\n}\n\n// updateBuffer recalulates the corrected buffer value, adds the given value to it\n// and updates the node's actual buffer value if possible\nfunc (cm *ClientManager) updateBuffer(node *ClientNode, add int64, now mclock.AbsTime) {\n\tcm.lock.Lock()\n\tdefer cm.lock.Unlock()\n\n\tcm.updateNodeRc(node, add, &node.params, now)\n\tif node.corrBufValue > node.bufValue {\n\t\tif node.log != nil {\n\t\t\tnode.log.add(now, fmt.Sprintf(\"corrected  bv=%d  oldBv=%d\", node.corrBufValue, node.bufValue))\n\t\t}\n\t\tnode.bufValue = node.corrBufValue\n\t}\n}\n\n// updateParams updates the flow control parameters of a client node\nfunc (cm *ClientManager) updateParams(node *ClientNode, params ServerParams, now mclock.AbsTime) {\n\tcm.lock.Lock()\n\tdefer cm.lock.Unlock()\n\n\tcm.updateRecharge(now)\n\tcm.updateTotalCapacity(now, true)\n\tcm.totalConnected += params.MinRecharge - node.params.MinRecharge\n\tcm.updateRaiseLimit()\n\tcm.updateNodeRc(node, 0, &params, now)\n}\n\n// updateRaiseLimit recalculates the limiting value until which logTotalCap\n// can be raised when no client freeze events occur\nfunc (cm *ClientManager) updateRaiseLimit() {\n\tif cm.capacityRaiseThreshold == 0 {\n\t\tcm.logTotalCapRaiseLimit = 0\n\t\treturn\n\t}\n\tlimit := float64(cm.totalConnected + cm.capacityRaiseThreshold)\n\tlimit2 := float64(cm.totalConnected) * capacityRaiseThresholdRatio\n\tif limit2 > limit {\n\t\tlimit = limit2\n\t}\n\tif limit < 1 {\n\t\tlimit = 1\n\t}\n\tcm.logTotalCapRaiseLimit = math.Log(limit)\n}\n\n// updateRecharge updates the recharge integrator and checks the recharge queue\n// for nodes with recently filled buffers\nfunc (cm *ClientManager) updateRecharge(now mclock.AbsTime) {\n\tlastUpdate := cm.rcLastUpdate\n\tcm.rcLastUpdate = now\n\t// updating is done in multiple steps if node buffers are filled and sumRecharge\n\t// is decreased before the given target time\n\tfor cm.sumRecharge > 0 {\n\t\tsumRecharge := cm.sumRecharge\n\t\tif sumRecharge > cm.totalRecharge {\n\t\t\tsumRecharge = cm.totalRecharge\n\t\t}\n\t\tbonusRatio := float64(1)\n\t\tv := cm.curve.ValueAt(sumRecharge)\n\t\ts := float64(sumRecharge)\n\t\tif v > s && s > 0 {\n\t\t\tbonusRatio = v / s\n\t\t}\n\t\tdt := now - lastUpdate\n\t\t// fetch the client that finishes first\n\t\trcqNode := cm.rcQueue.PopItem().(*ClientNode) // if sumRecharge > 0 then the queue cannot be empty\n\t\t// check whether it has already finished\n\t\tdtNext := mclock.AbsTime(float64(rcqNode.rcFullIntValue-cm.rcLastIntValue) / bonusRatio)\n\t\tif dt < dtNext {\n\t\t\t// not finished yet, put it back, update integrator according\n\t\t\t// to current bonusRatio and return\n\t\t\tcm.rcQueue.Push(rcqNode, -rcqNode.rcFullIntValue)\n\t\t\tcm.rcLastIntValue += int64(bonusRatio * float64(dt))\n\t\t\treturn\n\t\t}\n\t\tlastUpdate += dtNext\n\t\t// finished recharging, update corrBufValue and sumRecharge if necessary and do next step\n\t\tif rcqNode.corrBufValue < int64(rcqNode.params.BufLimit) {\n\t\t\trcqNode.corrBufValue = int64(rcqNode.params.BufLimit)\n\t\t\tcm.sumRecharge -= rcqNode.params.MinRecharge\n\t\t}\n\t\tcm.rcLastIntValue = rcqNode.rcFullIntValue\n\t}\n}\n\n// updateNodeRc updates a node's corrBufValue and adds an external correction value.\n// It also adds or removes the rcQueue entry and updates ServerParams and sumRecharge if necessary.\nfunc (cm *ClientManager) updateNodeRc(node *ClientNode, bvc int64, params *ServerParams, now mclock.AbsTime) {\n\tcm.updateRecharge(now)\n\twasFull := true\n\tif node.corrBufValue != int64(node.params.BufLimit) {\n\t\twasFull = false\n\t\tnode.corrBufValue += (cm.rcLastIntValue - node.rcLastIntValue) * int64(node.params.MinRecharge) / FixedPointMultiplier\n\t\tif node.corrBufValue > int64(node.params.BufLimit) {\n\t\t\tnode.corrBufValue = int64(node.params.BufLimit)\n\t\t}\n\t\tnode.rcLastIntValue = cm.rcLastIntValue\n\t}\n\tnode.corrBufValue += bvc\n\tdiff := int64(params.BufLimit - node.params.BufLimit)\n\tif diff > 0 {\n\t\tnode.corrBufValue += diff\n\t}\n\tisFull := false\n\tif node.corrBufValue >= int64(params.BufLimit) {\n\t\tnode.corrBufValue = int64(params.BufLimit)\n\t\tisFull = true\n\t}\n\tif !wasFull {\n\t\tcm.sumRecharge -= node.params.MinRecharge\n\t}\n\tif params != &node.params {\n\t\tnode.params = *params\n\t}\n\tif !isFull {\n\t\tcm.sumRecharge += node.params.MinRecharge\n\t\tif node.queueIndex != -1 {\n\t\t\tcm.rcQueue.Remove(node.queueIndex)\n\t\t}\n\t\tnode.rcLastIntValue = cm.rcLastIntValue\n\t\tnode.rcFullIntValue = cm.rcLastIntValue + (int64(node.params.BufLimit)-node.corrBufValue)*FixedPointMultiplier/int64(node.params.MinRecharge)\n\t\tcm.rcQueue.Push(node, -node.rcFullIntValue)\n\t}\n}\n\n// reduceTotalCapacity reduces the total capacity allowance in case of a client freeze event\nfunc (cm *ClientManager) reduceTotalCapacity(frozenCap uint64) {\n\tcm.lock.Lock()\n\tdefer cm.lock.Unlock()\n\n\tratio := float64(1)\n\tif frozenCap < cm.totalConnected {\n\t\tratio = float64(frozenCap) / float64(cm.totalConnected)\n\t}\n\tnow := cm.clock.Now()\n\tcm.updateTotalCapacity(now, false)\n\tcm.logTotalCap -= capacityDropFactor * ratio\n\tif cm.logTotalCap < cm.minLogTotalCap {\n\t\tcm.logTotalCap = cm.minLogTotalCap\n\t}\n\tcm.updateTotalCapacity(now, true)\n}\n\n// updateTotalCapacity updates the total capacity factor. The capacity factor allows\n// the total capacity of the system to go over the allowed total recharge value\n// if clients go to frozen state sufficiently rarely.\n// The capacity factor is dropped instantly by a small amount if a clients is frozen.\n// It is raised slowly (with a large time constant) if the total connected capacity\n// is close to the total allowed amount and no clients are frozen.\nfunc (cm *ClientManager) updateTotalCapacity(now mclock.AbsTime, refresh bool) {\n\tdt := now - cm.capLastUpdate\n\tcm.capLastUpdate = now\n\n\tif cm.logTotalCap < cm.logTotalCapRaiseLimit {\n\t\tcm.logTotalCap += capacityRaiseTC * float64(dt)\n\t\tif cm.logTotalCap > cm.logTotalCapRaiseLimit {\n\t\t\tcm.logTotalCap = cm.logTotalCapRaiseLimit\n\t\t}\n\t}\n\tif cm.logTotalCap > cm.maxLogTotalCap {\n\t\tcm.logTotalCap = cm.maxLogTotalCap\n\t}\n\tif refresh {\n\t\tcm.refreshCapacity()\n\t}\n}\n\n// refreshCapacity recalculates the total capacity value and sends an update to the subscription\n// channel if the relative change of the value since the last update is more than 0.1 percent\nfunc (cm *ClientManager) refreshCapacity() {\n\ttotalCapacity := math.Exp(cm.logTotalCap)\n\tif totalCapacity >= cm.totalCapacity*0.999 && totalCapacity <= cm.totalCapacity*1.001 {\n\t\treturn\n\t}\n\tcm.totalCapacity = totalCapacity\n\tif cm.totalCapacityCh != nil {\n\t\tselect {\n\t\tcase cm.totalCapacityCh <- uint64(cm.totalCapacity):\n\t\tdefault:\n\t\t}\n\t}\n}\n\n// SubscribeTotalCapacity returns all future updates to the total capacity value\n// through a channel and also returns the current value\nfunc (cm *ClientManager) SubscribeTotalCapacity(ch chan uint64) uint64 {\n\tcm.lock.Lock()\n\tdefer cm.lock.Unlock()\n\n\tcm.totalCapacityCh = ch\n\treturn uint64(cm.totalCapacity)\n}\n\n// PieceWiseLinear is used to describe recharge curves\ntype PieceWiseLinear []struct{ X, Y uint64 }\n\n// ValueAt returns the curve's value at a given point\nfunc (pwl PieceWiseLinear) ValueAt(x uint64) float64 {\n\tl := 0\n\th := len(pwl)\n\tif h == 0 {\n\t\treturn 0\n\t}\n\tfor h != l {\n\t\tm := (l + h) / 2\n\t\tif x > pwl[m].X {\n\t\t\tl = m + 1\n\t\t} else {\n\t\t\th = m\n\t\t}\n\t}\n\tif l == 0 {\n\t\treturn float64(pwl[0].Y)\n\t}\n\tl--\n\tif h == len(pwl) {\n\t\treturn float64(pwl[l].Y)\n\t}\n\tdx := pwl[h].X - pwl[l].X\n\tif dx < 1 {\n\t\treturn float64(pwl[l].Y)\n\t}\n\treturn float64(pwl[l].Y) + float64(pwl[h].Y-pwl[l].Y)*float64(x-pwl[l].X)/float64(dx)\n}\n\n// Valid returns true if the X coordinates of the curve points are non-strictly monotonic\nfunc (pwl PieceWiseLinear) Valid() bool {\n\tvar lastX uint64\n\tfor _, i := range pwl {\n\t\tif i.X < lastX {\n\t\t\treturn false\n\t\t}\n\t\tlastX = i.X\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "les/flowcontrol/manager_test.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage flowcontrol\n\nimport (\n\t\"math/rand\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n)\n\ntype testNode struct {\n\tnode               *ClientNode\n\tbufLimit, capacity uint64\n\twaitUntil          mclock.AbsTime\n\tindex, totalCost   uint64\n}\n\nconst (\n\ttestMaxCost = 1000000\n\ttestLength  = 100000\n)\n\n// testConstantTotalCapacity simulates multiple request sender nodes and verifies\n// whether the total amount of served requests matches the expected value based on\n// the total capacity and the duration of the test.\n// Some nodes are sending requests occasionally so that their buffer should regularly\n// reach the maximum while other nodes (the \"max capacity nodes\") are sending at the\n// maximum permitted rate. The max capacity nodes are changed multiple times during\n// a single test.\nfunc TestConstantTotalCapacity(t *testing.T) {\n\ttestConstantTotalCapacity(t, 10, 1, 0)\n\ttestConstantTotalCapacity(t, 10, 1, 1)\n\ttestConstantTotalCapacity(t, 30, 1, 0)\n\ttestConstantTotalCapacity(t, 30, 2, 3)\n\ttestConstantTotalCapacity(t, 100, 1, 0)\n\ttestConstantTotalCapacity(t, 100, 3, 5)\n\ttestConstantTotalCapacity(t, 100, 5, 10)\n}\n\nfunc testConstantTotalCapacity(t *testing.T, nodeCount, maxCapacityNodes, randomSend int) {\n\tclock := &mclock.Simulated{}\n\tnodes := make([]*testNode, nodeCount)\n\tvar totalCapacity uint64\n\tfor i := range nodes {\n\t\tnodes[i] = &testNode{capacity: uint64(50000 + rand.Intn(100000))}\n\t\ttotalCapacity += nodes[i].capacity\n\t}\n\tm := NewClientManager(PieceWiseLinear{{0, totalCapacity}}, clock)\n\tfor _, n := range nodes {\n\t\tn.bufLimit = n.capacity * 6000\n\t\tn.node = NewClientNode(m, ServerParams{BufLimit: n.bufLimit, MinRecharge: n.capacity})\n\t}\n\tmaxNodes := make([]int, maxCapacityNodes)\n\tfor i := range maxNodes {\n\t\t// we don't care if some indexes are selected multiple times\n\t\t// in that case we have fewer max nodes\n\t\tmaxNodes[i] = rand.Intn(nodeCount)\n\t}\n\n\tvar sendCount int\n\tfor i := 0; i < testLength; i++ {\n\t\tnow := clock.Now()\n\t\tfor _, idx := range maxNodes {\n\t\t\tfor nodes[idx].send(t, now) {\n\t\t\t}\n\t\t}\n\t\tif rand.Intn(testLength) < maxCapacityNodes*3 {\n\t\t\tmaxNodes[rand.Intn(maxCapacityNodes)] = rand.Intn(nodeCount)\n\t\t}\n\n\t\tsendCount += randomSend\n\t\tfailCount := randomSend * 10\n\t\tfor sendCount > 0 && failCount > 0 {\n\t\t\tif nodes[rand.Intn(nodeCount)].send(t, now) {\n\t\t\t\tsendCount--\n\t\t\t} else {\n\t\t\t\tfailCount--\n\t\t\t}\n\t\t}\n\t\tclock.Run(time.Millisecond)\n\t}\n\n\tvar totalCost uint64\n\tfor _, n := range nodes {\n\t\ttotalCost += n.totalCost\n\t}\n\tratio := float64(totalCost) / float64(totalCapacity) / testLength\n\tif ratio < 0.98 || ratio > 1.02 {\n\t\tt.Errorf(\"totalCost/totalCapacity/testLength ratio incorrect (expected: 1, got: %f)\", ratio)\n\t}\n\n}\n\nfunc (n *testNode) send(t *testing.T, now mclock.AbsTime) bool {\n\tif now < n.waitUntil {\n\t\treturn false\n\t}\n\tn.index++\n\tif ok, _, _ := n.node.AcceptRequest(0, n.index, testMaxCost); !ok {\n\t\tt.Fatalf(\"Rejected request after expected waiting time has passed\")\n\t}\n\trcost := uint64(rand.Int63n(testMaxCost))\n\tbv := n.node.RequestProcessed(0, n.index, testMaxCost, rcost)\n\tif bv < testMaxCost {\n\t\tn.waitUntil = now + mclock.AbsTime((testMaxCost-bv)*1001000/n.capacity)\n\t}\n\tn.totalCost += rcost\n\treturn true\n}\n"
  },
  {
    "path": "les/handler_test.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"encoding/binary\"\n\t\"math/big\"\n\t\"math/rand\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\nfunc expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{}) error {\n\ttype resp struct {\n\t\tReqID, BV uint64\n\t\tData      interface{}\n\t}\n\treturn p2p.ExpectMsg(r, msgcode, resp{reqID, bv, data})\n}\n\n// Tests that block headers can be retrieved from a remote chain based on user queries.\nfunc TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) }\nfunc TestGetBlockHeadersLes3(t *testing.T) { testGetBlockHeaders(t, 3) }\nfunc TestGetBlockHeadersLes4(t *testing.T) { testGetBlockHeaders(t, 4) }\n\nfunc testGetBlockHeaders(t *testing.T, protocol int) {\n\tnetconfig := testnetConfig{\n\t\tblocks:    downloader.MaxHeaderFetch + 15,\n\t\tprotocol:  protocol,\n\t\tnopruning: true,\n\t}\n\tserver, _, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\trawPeer, closePeer, _ := server.newRawPeer(t, \"peer\", protocol)\n\tdefer closePeer()\n\tbc := server.handler.blockchain\n\n\t// Create a \"random\" unknown hash for testing\n\tvar unknown common.Hash\n\tfor i := range unknown {\n\t\tunknown[i] = byte(i)\n\t}\n\t// Create a batch of tests for various scenarios\n\tlimit := uint64(MaxHeaderFetch)\n\ttests := []struct {\n\t\tquery  *GetBlockHeadersData // The query to execute for header retrieval\n\t\texpect []common.Hash        // The hashes of the block whose headers are expected\n\t}{\n\t\t// A single random block should be retrievable by hash and number too\n\t\t{\n\t\t\t&GetBlockHeadersData{Origin: hashOrNumber{Hash: bc.GetBlockByNumber(limit / 2).Hash()}, Amount: 1},\n\t\t\t[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},\n\t\t}, {\n\t\t\t&GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1},\n\t\t\t[]common.Hash{bc.GetBlockByNumber(limit / 2).Hash()},\n\t\t},\n\t\t// Multiple headers should be retrievable in both directions\n\t\t{\n\t\t\t&GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3},\n\t\t\t[]common.Hash{\n\t\t\t\tbc.GetBlockByNumber(limit / 2).Hash(),\n\t\t\t\tbc.GetBlockByNumber(limit/2 + 1).Hash(),\n\t\t\t\tbc.GetBlockByNumber(limit/2 + 2).Hash(),\n\t\t\t},\n\t\t}, {\n\t\t\t&GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true},\n\t\t\t[]common.Hash{\n\t\t\t\tbc.GetBlockByNumber(limit / 2).Hash(),\n\t\t\t\tbc.GetBlockByNumber(limit/2 - 1).Hash(),\n\t\t\t\tbc.GetBlockByNumber(limit/2 - 2).Hash(),\n\t\t\t},\n\t\t},\n\t\t// Multiple headers with skip lists should be retrievable\n\t\t{\n\t\t\t&GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3},\n\t\t\t[]common.Hash{\n\t\t\t\tbc.GetBlockByNumber(limit / 2).Hash(),\n\t\t\t\tbc.GetBlockByNumber(limit/2 + 4).Hash(),\n\t\t\t\tbc.GetBlockByNumber(limit/2 + 8).Hash(),\n\t\t\t},\n\t\t}, {\n\t\t\t&GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true},\n\t\t\t[]common.Hash{\n\t\t\t\tbc.GetBlockByNumber(limit / 2).Hash(),\n\t\t\t\tbc.GetBlockByNumber(limit/2 - 4).Hash(),\n\t\t\t\tbc.GetBlockByNumber(limit/2 - 8).Hash(),\n\t\t\t},\n\t\t},\n\t\t// The chain endpoints should be retrievable\n\t\t{\n\t\t\t&GetBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1},\n\t\t\t[]common.Hash{bc.GetBlockByNumber(0).Hash()},\n\t\t}, {\n\t\t\t&GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64()}, Amount: 1},\n\t\t\t[]common.Hash{bc.CurrentBlock().Hash()},\n\t\t},\n\t\t// Ensure protocol limits are honored\n\t\t//{\n\t\t//\t&GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true},\n\t\t//\t[]common.Hash{},\n\t\t//},\n\t\t// Check that requesting more than available is handled gracefully\n\t\t{\n\t\t\t&GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3},\n\t\t\t[]common.Hash{\n\t\t\t\tbc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),\n\t\t\t\tbc.GetBlockByNumber(bc.CurrentBlock().NumberU64()).Hash(),\n\t\t\t},\n\t\t}, {\n\t\t\t&GetBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true},\n\t\t\t[]common.Hash{\n\t\t\t\tbc.GetBlockByNumber(4).Hash(),\n\t\t\t\tbc.GetBlockByNumber(0).Hash(),\n\t\t\t},\n\t\t},\n\t\t// Check that requesting more than available is handled gracefully, even if mid skip\n\t\t{\n\t\t\t&GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3},\n\t\t\t[]common.Hash{\n\t\t\t\tbc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(),\n\t\t\t\tbc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1).Hash(),\n\t\t\t},\n\t\t}, {\n\t\t\t&GetBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true},\n\t\t\t[]common.Hash{\n\t\t\t\tbc.GetBlockByNumber(4).Hash(),\n\t\t\t\tbc.GetBlockByNumber(1).Hash(),\n\t\t\t},\n\t\t},\n\t\t// Check that non existing headers aren't returned\n\t\t{\n\t\t\t&GetBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1},\n\t\t\t[]common.Hash{},\n\t\t}, {\n\t\t\t&GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() + 1}, Amount: 1},\n\t\t\t[]common.Hash{},\n\t\t},\n\t}\n\t// Run each of the tests and verify the results against the chain\n\tvar reqID uint64\n\tfor i, tt := range tests {\n\t\t// Collect the headers to expect in the response\n\t\tvar headers []*types.Header\n\t\tfor _, hash := range tt.expect {\n\t\t\theaders = append(headers, bc.GetHeaderByHash(hash))\n\t\t}\n\t\t// Send the hash request and verify the response\n\t\treqID++\n\n\t\tsendRequest(rawPeer.app, GetBlockHeadersMsg, reqID, tt.query)\n\t\tif err := expectResponse(rawPeer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil {\n\t\t\tt.Errorf(\"test %d: headers mismatch: %v\", i, err)\n\t\t}\n\t}\n}\n\n// Tests that block contents can be retrieved from a remote chain based on their hashes.\nfunc TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) }\nfunc TestGetBlockBodiesLes3(t *testing.T) { testGetBlockBodies(t, 3) }\nfunc TestGetBlockBodiesLes4(t *testing.T) { testGetBlockBodies(t, 4) }\n\nfunc testGetBlockBodies(t *testing.T, protocol int) {\n\tnetconfig := testnetConfig{\n\t\tblocks:    downloader.MaxHeaderFetch + 15,\n\t\tprotocol:  protocol,\n\t\tnopruning: true,\n\t}\n\tserver, _, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\trawPeer, closePeer, _ := server.newRawPeer(t, \"peer\", protocol)\n\tdefer closePeer()\n\n\tbc := server.handler.blockchain\n\n\t// Create a batch of tests for various scenarios\n\tlimit := MaxBodyFetch\n\ttests := []struct {\n\t\trandom    int           // Number of blocks to fetch randomly from the chain\n\t\texplicit  []common.Hash // Explicitly requested blocks\n\t\tavailable []bool        // Availability of explicitly requested blocks\n\t\texpected  int           // Total number of existing blocks to expect\n\t}{\n\t\t{1, nil, nil, 1},         // A single random block should be retrievable\n\t\t{10, nil, nil, 10},       // Multiple random blocks should be retrievable\n\t\t{limit, nil, nil, limit}, // The maximum possible blocks should be retrievable\n\t\t//{limit + 1, nil, nil, limit},                                  // No more than the possible block count should be returned\n\t\t{0, []common.Hash{bc.Genesis().Hash()}, []bool{true}, 1},      // The genesis block should be retrievable\n\t\t{0, []common.Hash{bc.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable\n\t\t{0, []common.Hash{{}}, []bool{false}, 0},                      // A non existent block should not be returned\n\n\t\t// Existing and non-existing blocks interleaved should not cause problems\n\t\t{0, []common.Hash{\n\t\t\t{},\n\t\t\tbc.GetBlockByNumber(1).Hash(),\n\t\t\t{},\n\t\t\tbc.GetBlockByNumber(10).Hash(),\n\t\t\t{},\n\t\t\tbc.GetBlockByNumber(100).Hash(),\n\t\t\t{},\n\t\t}, []bool{false, true, false, true, false, true, false}, 3},\n\t}\n\t// Run each of the tests and verify the results against the chain\n\tvar reqID uint64\n\tfor i, tt := range tests {\n\t\t// Collect the hashes to request, and the response to expect\n\t\tvar hashes []common.Hash\n\t\tseen := make(map[int64]bool)\n\t\tvar bodies []*types.Body\n\n\t\tfor j := 0; j < tt.random; j++ {\n\t\t\tfor {\n\t\t\t\tnum := rand.Int63n(int64(bc.CurrentBlock().NumberU64()))\n\t\t\t\tif !seen[num] {\n\t\t\t\t\tseen[num] = true\n\n\t\t\t\t\tblock := bc.GetBlockByNumber(uint64(num))\n\t\t\t\t\thashes = append(hashes, block.Hash())\n\t\t\t\t\tif len(bodies) < tt.expected {\n\t\t\t\t\t\tbodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor j, hash := range tt.explicit {\n\t\t\thashes = append(hashes, hash)\n\t\t\tif tt.available[j] && len(bodies) < tt.expected {\n\t\t\t\tblock := bc.GetBlockByHash(hash)\n\t\t\t\tbodies = append(bodies, &types.Body{Transactions: block.Transactions(), Uncles: block.Uncles()})\n\t\t\t}\n\t\t}\n\t\treqID++\n\n\t\t// Send the hash request and verify the response\n\t\tsendRequest(rawPeer.app, GetBlockBodiesMsg, reqID, hashes)\n\t\tif err := expectResponse(rawPeer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil {\n\t\t\tt.Errorf(\"test %d: bodies mismatch: %v\", i, err)\n\t\t}\n\t}\n}\n\n// Tests that the contract codes can be retrieved based on account addresses.\nfunc TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) }\nfunc TestGetCodeLes3(t *testing.T) { testGetCode(t, 3) }\nfunc TestGetCodeLes4(t *testing.T) { testGetCode(t, 4) }\n\nfunc testGetCode(t *testing.T, protocol int) {\n\t// Assemble the test environment\n\tnetconfig := testnetConfig{\n\t\tblocks:    4,\n\t\tprotocol:  protocol,\n\t\tnopruning: true,\n\t}\n\tserver, _, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\trawPeer, closePeer, _ := server.newRawPeer(t, \"peer\", protocol)\n\tdefer closePeer()\n\n\tbc := server.handler.blockchain\n\n\tvar codereqs []*CodeReq\n\tvar codes [][]byte\n\tfor i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {\n\t\theader := bc.GetHeaderByNumber(i)\n\t\treq := &CodeReq{\n\t\t\tBHash:  header.Hash(),\n\t\t\tAccKey: crypto.Keccak256(testContractAddr[:]),\n\t\t}\n\t\tcodereqs = append(codereqs, req)\n\t\tif i >= testContractDeployed {\n\t\t\tcodes = append(codes, testContractCodeDeployed)\n\t\t}\n\t}\n\n\tsendRequest(rawPeer.app, GetCodeMsg, 42, codereqs)\n\tif err := expectResponse(rawPeer.app, CodeMsg, 42, testBufLimit, codes); err != nil {\n\t\tt.Errorf(\"codes mismatch: %v\", err)\n\t}\n}\n\n// Tests that the stale contract codes can't be retrieved based on account addresses.\nfunc TestGetStaleCodeLes2(t *testing.T) { testGetStaleCode(t, 2) }\nfunc TestGetStaleCodeLes3(t *testing.T) { testGetStaleCode(t, 3) }\nfunc TestGetStaleCodeLes4(t *testing.T) { testGetStaleCode(t, 4) }\n\nfunc testGetStaleCode(t *testing.T, protocol int) {\n\tnetconfig := testnetConfig{\n\t\tblocks:    core.TriesInMemory + 4,\n\t\tprotocol:  protocol,\n\t\tnopruning: true,\n\t}\n\tserver, _, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\trawPeer, closePeer, _ := server.newRawPeer(t, \"peer\", protocol)\n\tdefer closePeer()\n\n\tbc := server.handler.blockchain\n\n\tcheck := func(number uint64, expected [][]byte) {\n\t\treq := &CodeReq{\n\t\t\tBHash:  bc.GetHeaderByNumber(number).Hash(),\n\t\t\tAccKey: crypto.Keccak256(testContractAddr[:]),\n\t\t}\n\t\tsendRequest(rawPeer.app, GetCodeMsg, 42, []*CodeReq{req})\n\t\tif err := expectResponse(rawPeer.app, CodeMsg, 42, testBufLimit, expected); err != nil {\n\t\t\tt.Errorf(\"codes mismatch: %v\", err)\n\t\t}\n\t}\n\tcheck(0, [][]byte{})                                                          // Non-exist contract\n\tcheck(testContractDeployed, [][]byte{})                                       // Stale contract\n\tcheck(bc.CurrentHeader().Number.Uint64(), [][]byte{testContractCodeDeployed}) // Fresh contract\n}\n\n// Tests that the transaction receipts can be retrieved based on hashes.\nfunc TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) }\nfunc TestGetReceiptLes3(t *testing.T) { testGetReceipt(t, 3) }\nfunc TestGetReceiptLes4(t *testing.T) { testGetReceipt(t, 4) }\n\nfunc testGetReceipt(t *testing.T, protocol int) {\n\t// Assemble the test environment\n\tnetconfig := testnetConfig{\n\t\tblocks:    4,\n\t\tprotocol:  protocol,\n\t\tnopruning: true,\n\t}\n\tserver, _, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\trawPeer, closePeer, _ := server.newRawPeer(t, \"peer\", protocol)\n\tdefer closePeer()\n\n\tbc := server.handler.blockchain\n\n\t// Collect the hashes to request, and the response to expect\n\tvar receipts []types.Receipts\n\tvar hashes []common.Hash\n\tfor i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {\n\t\tblock := bc.GetBlockByNumber(i)\n\n\t\thashes = append(hashes, block.Hash())\n\t\treceipts = append(receipts, rawdb.ReadRawReceipts(server.db, block.Hash(), block.NumberU64()))\n\t}\n\t// Send the hash request and verify the response\n\tsendRequest(rawPeer.app, GetReceiptsMsg, 42, hashes)\n\tif err := expectResponse(rawPeer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil {\n\t\tt.Errorf(\"receipts mismatch: %v\", err)\n\t}\n}\n\n// Tests that trie merkle proofs can be retrieved\nfunc TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) }\nfunc TestGetProofsLes3(t *testing.T) { testGetProofs(t, 3) }\nfunc TestGetProofsLes4(t *testing.T) { testGetProofs(t, 4) }\n\nfunc testGetProofs(t *testing.T, protocol int) {\n\t// Assemble the test environment\n\tnetconfig := testnetConfig{\n\t\tblocks:    4,\n\t\tprotocol:  protocol,\n\t\tnopruning: true,\n\t}\n\tserver, _, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\trawPeer, closePeer, _ := server.newRawPeer(t, \"peer\", protocol)\n\tdefer closePeer()\n\n\tbc := server.handler.blockchain\n\n\tvar proofreqs []ProofReq\n\tproofsV2 := light.NewNodeSet()\n\n\taccounts := []common.Address{bankAddr, userAddr1, userAddr2, signerAddr, {}}\n\tfor i := uint64(0); i <= bc.CurrentBlock().NumberU64(); i++ {\n\t\theader := bc.GetHeaderByNumber(i)\n\t\ttrie, _ := trie.New(header.Root, trie.NewDatabase(server.db))\n\n\t\tfor _, acc := range accounts {\n\t\t\treq := ProofReq{\n\t\t\t\tBHash: header.Hash(),\n\t\t\t\tKey:   crypto.Keccak256(acc[:]),\n\t\t\t}\n\t\t\tproofreqs = append(proofreqs, req)\n\t\t\ttrie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2)\n\t\t}\n\t}\n\t// Send the proof request and verify the response\n\tsendRequest(rawPeer.app, GetProofsV2Msg, 42, proofreqs)\n\tif err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil {\n\t\tt.Errorf(\"proofs mismatch: %v\", err)\n\t}\n}\n\n// Tests that the stale contract codes can't be retrieved based on account addresses.\nfunc TestGetStaleProofLes2(t *testing.T) { testGetStaleProof(t, 2) }\nfunc TestGetStaleProofLes3(t *testing.T) { testGetStaleProof(t, 3) }\nfunc TestGetStaleProofLes4(t *testing.T) { testGetStaleProof(t, 4) }\n\nfunc testGetStaleProof(t *testing.T, protocol int) {\n\tnetconfig := testnetConfig{\n\t\tblocks:    core.TriesInMemory + 4,\n\t\tprotocol:  protocol,\n\t\tnopruning: true,\n\t}\n\tserver, _, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\trawPeer, closePeer, _ := server.newRawPeer(t, \"peer\", protocol)\n\tdefer closePeer()\n\n\tbc := server.handler.blockchain\n\n\tcheck := func(number uint64, wantOK bool) {\n\t\tvar (\n\t\t\theader  = bc.GetHeaderByNumber(number)\n\t\t\taccount = crypto.Keccak256(userAddr1.Bytes())\n\t\t)\n\t\treq := &ProofReq{\n\t\t\tBHash: header.Hash(),\n\t\t\tKey:   account,\n\t\t}\n\t\tsendRequest(rawPeer.app, GetProofsV2Msg, 42, []*ProofReq{req})\n\n\t\tvar expected []rlp.RawValue\n\t\tif wantOK {\n\t\t\tproofsV2 := light.NewNodeSet()\n\t\t\tt, _ := trie.New(header.Root, trie.NewDatabase(server.db))\n\t\t\tt.Prove(account, 0, proofsV2)\n\t\t\texpected = proofsV2.NodeList()\n\t\t}\n\t\tif err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil {\n\t\t\tt.Errorf(\"codes mismatch: %v\", err)\n\t\t}\n\t}\n\tcheck(0, false)                                 // Non-exist proof\n\tcheck(2, false)                                 // Stale proof\n\tcheck(bc.CurrentHeader().Number.Uint64(), true) // Fresh proof\n}\n\n// Tests that CHT proofs can be correctly retrieved.\nfunc TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) }\nfunc TestGetCHTProofsLes3(t *testing.T) { testGetCHTProofs(t, 3) }\nfunc TestGetCHTProofsLes4(t *testing.T) { testGetCHTProofs(t, 4) }\n\nfunc testGetCHTProofs(t *testing.T, protocol int) {\n\tvar (\n\t\tconfig       = light.TestServerIndexerConfig\n\t\twaitIndexers = func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {\n\t\t\tfor {\n\t\t\t\tcs, _, _ := cIndexer.Sections()\n\t\t\t\tif cs >= 1 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t\tnetconfig = testnetConfig{\n\t\t\tblocks:    int(config.ChtSize + config.ChtConfirms),\n\t\t\tprotocol:  protocol,\n\t\t\tindexFn:   waitIndexers,\n\t\t\tnopruning: true,\n\t\t}\n\t)\n\tserver, _, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\trawPeer, closePeer, _ := server.newRawPeer(t, \"peer\", protocol)\n\tdefer closePeer()\n\n\tbc := server.handler.blockchain\n\n\t// Assemble the proofs from the different protocols\n\theader := bc.GetHeaderByNumber(config.ChtSize - 1)\n\trlp, _ := rlp.EncodeToBytes(header)\n\n\tkey := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(key, config.ChtSize-1)\n\n\tproofsV2 := HelperTrieResps{\n\t\tAuxData: [][]byte{rlp},\n\t}\n\troot := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash())\n\ttrie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix)))\n\ttrie.Prove(key, 0, &proofsV2.Proofs)\n\t// Assemble the requests for the different protocols\n\trequestsV2 := []HelperTrieReq{{\n\t\tType:    htCanonical,\n\t\tTrieIdx: 0,\n\t\tKey:     key,\n\t\tAuxReq:  htAuxHeader,\n\t}}\n\t// Send the proof request and verify the response\n\tsendRequest(rawPeer.app, GetHelperTrieProofsMsg, 42, requestsV2)\n\tif err := expectResponse(rawPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil {\n\t\tt.Errorf(\"proofs mismatch: %v\", err)\n\t}\n}\n\nfunc TestGetBloombitsProofsLes2(t *testing.T) { testGetBloombitsProofs(t, 2) }\nfunc TestGetBloombitsProofsLes3(t *testing.T) { testGetBloombitsProofs(t, 3) }\nfunc TestGetBloombitsProofsLes4(t *testing.T) { testGetBloombitsProofs(t, 4) }\n\n// Tests that bloombits proofs can be correctly retrieved.\nfunc testGetBloombitsProofs(t *testing.T, protocol int) {\n\tvar (\n\t\tconfig       = light.TestServerIndexerConfig\n\t\twaitIndexers = func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {\n\t\t\tfor {\n\t\t\t\tbts, _, _ := btIndexer.Sections()\n\t\t\t\tif bts >= 1 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t\tnetconfig = testnetConfig{\n\t\t\tblocks:    int(config.BloomTrieSize + config.BloomTrieConfirms),\n\t\t\tprotocol:  protocol,\n\t\t\tindexFn:   waitIndexers,\n\t\t\tnopruning: true,\n\t\t}\n\t)\n\tserver, _, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\trawPeer, closePeer, _ := server.newRawPeer(t, \"peer\", protocol)\n\tdefer closePeer()\n\n\tbc := server.handler.blockchain\n\n\t// Request and verify each bit of the bloom bits proofs\n\tfor bit := 0; bit < 2048; bit++ {\n\t\t// Assemble the request and proofs for the bloombits\n\t\tkey := make([]byte, 10)\n\n\t\tbinary.BigEndian.PutUint16(key[:2], uint16(bit))\n\t\t// Only the first bloom section has data.\n\t\tbinary.BigEndian.PutUint64(key[2:], 0)\n\n\t\trequests := []HelperTrieReq{{\n\t\t\tType:    htBloomBits,\n\t\t\tTrieIdx: 0,\n\t\t\tKey:     key,\n\t\t}}\n\t\tvar proofs HelperTrieResps\n\n\t\troot := light.GetBloomTrieRoot(server.db, 0, bc.GetHeaderByNumber(config.BloomTrieSize-1).Hash())\n\t\ttrie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.BloomTrieTablePrefix)))\n\t\ttrie.Prove(key, 0, &proofs.Proofs)\n\n\t\t// Send the proof request and verify the response\n\t\tsendRequest(rawPeer.app, GetHelperTrieProofsMsg, 42, requests)\n\t\tif err := expectResponse(rawPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil {\n\t\t\tt.Errorf(\"bit %d: proofs mismatch: %v\", bit, err)\n\t\t}\n\t}\n}\n\nfunc TestTransactionStatusLes2(t *testing.T) { testTransactionStatus(t, lpv2) }\nfunc TestTransactionStatusLes3(t *testing.T) { testTransactionStatus(t, lpv3) }\nfunc TestTransactionStatusLes4(t *testing.T) { testTransactionStatus(t, lpv4) }\n\nfunc testTransactionStatus(t *testing.T, protocol int) {\n\tnetconfig := testnetConfig{\n\t\tprotocol:  protocol,\n\t\tnopruning: true,\n\t}\n\tserver, _, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\trawPeer, closePeer, _ := server.newRawPeer(t, \"peer\", protocol)\n\tdefer closePeer()\n\n\tserver.handler.addTxsSync = true\n\n\tchain := server.handler.blockchain\n\n\tvar reqID uint64\n\n\ttest := func(tx *types.Transaction, send bool, expStatus light.TxStatus) {\n\t\treqID++\n\t\tif send {\n\t\t\tsendRequest(rawPeer.app, SendTxV2Msg, reqID, types.Transactions{tx})\n\t\t} else {\n\t\t\tsendRequest(rawPeer.app, GetTxStatusMsg, reqID, []common.Hash{tx.Hash()})\n\t\t}\n\t\tif err := expectResponse(rawPeer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil {\n\t\t\tt.Errorf(\"transaction status mismatch\")\n\t\t}\n\t}\n\tsigner := types.HomesteadSigner{}\n\n\t// test error status by sending an underpriced transaction\n\ttx0, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, nil, nil), signer, bankKey)\n\ttest(tx0, true, light.TxStatus{Status: core.TxStatusUnknown, Error: core.ErrUnderpriced.Error()})\n\n\ttx1, _ := types.SignTx(types.NewTransaction(0, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)\n\ttest(tx1, false, light.TxStatus{Status: core.TxStatusUnknown}) // query before sending, should be unknown\n\ttest(tx1, true, light.TxStatus{Status: core.TxStatusPending})  // send valid processable tx, should return pending\n\ttest(tx1, true, light.TxStatus{Status: core.TxStatusPending})  // adding it again should not return an error\n\n\ttx2, _ := types.SignTx(types.NewTransaction(1, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)\n\ttx3, _ := types.SignTx(types.NewTransaction(2, userAddr1, big.NewInt(10000), params.TxGas, big.NewInt(100000000000), nil), signer, bankKey)\n\t// send transactions in the wrong order, tx3 should be queued\n\ttest(tx3, true, light.TxStatus{Status: core.TxStatusQueued})\n\ttest(tx2, true, light.TxStatus{Status: core.TxStatusPending})\n\t// query again, now tx3 should be pending too\n\ttest(tx3, false, light.TxStatus{Status: core.TxStatusPending})\n\n\t// generate and add a block with tx1 and tx2 included\n\tgchain, _ := core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 1, func(i int, block *core.BlockGen) {\n\t\tblock.AddTx(tx1)\n\t\tblock.AddTx(tx2)\n\t})\n\tif _, err := chain.InsertChain(gchain); err != nil {\n\t\tpanic(err)\n\t}\n\t// wait until TxPool processes the inserted block\n\tfor i := 0; i < 10; i++ {\n\t\tif pending, _ := server.handler.txpool.Stats(); pending == 1 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\tif pending, _ := server.handler.txpool.Stats(); pending != 1 {\n\t\tt.Fatalf(\"pending count mismatch: have %d, want 1\", pending)\n\t}\n\t// Discard new block announcement\n\tmsg, _ := rawPeer.app.ReadMsg()\n\tmsg.Discard()\n\n\t// check if their status is included now\n\tblock1hash := rawdb.ReadCanonicalHash(server.db, 1)\n\ttest(tx1, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 0}})\n\n\ttest(tx2, false, light.TxStatus{Status: core.TxStatusIncluded, Lookup: &rawdb.LegacyTxLookupEntry{BlockHash: block1hash, BlockIndex: 1, Index: 1}})\n\n\t// create a reorg that rolls them back\n\tgchain, _ = core.GenerateChain(params.TestChainConfig, chain.GetBlockByNumber(0), ethash.NewFaker(), server.db, 2, func(i int, block *core.BlockGen) {})\n\tif _, err := chain.InsertChain(gchain); err != nil {\n\t\tpanic(err)\n\t}\n\t// wait until TxPool processes the reorg\n\tfor i := 0; i < 10; i++ {\n\t\tif pending, _ := server.handler.txpool.Stats(); pending == 3 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\tif pending, _ := server.handler.txpool.Stats(); pending != 3 {\n\t\tt.Fatalf(\"pending count mismatch: have %d, want 3\", pending)\n\t}\n\t// Discard new block announcement\n\tmsg, _ = rawPeer.app.ReadMsg()\n\tmsg.Discard()\n\n\t// check if their status is pending again\n\ttest(tx1, false, light.TxStatus{Status: core.TxStatusPending})\n\ttest(tx2, false, light.TxStatus{Status: core.TxStatusPending})\n}\n\nfunc TestStopResumeLES3(t *testing.T) { testStopResume(t, lpv3) }\nfunc TestStopResumeLES4(t *testing.T) { testStopResume(t, lpv4) }\n\nfunc testStopResume(t *testing.T, protocol int) {\n\tnetconfig := testnetConfig{\n\t\tprotocol:  protocol,\n\t\tsimClock:  true,\n\t\tnopruning: true,\n\t}\n\tserver, _, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\tserver.handler.server.costTracker.testing = true\n\tserver.handler.server.costTracker.testCostList = testCostList(testBufLimit / 10)\n\n\trawPeer, closePeer, _ := server.newRawPeer(t, \"peer\", protocol)\n\tdefer closePeer()\n\n\tvar (\n\t\treqID    uint64\n\t\texpBuf   = testBufLimit\n\t\ttestCost = testBufLimit / 10\n\t)\n\theader := server.handler.blockchain.CurrentHeader()\n\treq := func() {\n\t\treqID++\n\t\tsendRequest(rawPeer.app, GetBlockHeadersMsg, reqID, &GetBlockHeadersData{Origin: hashOrNumber{Hash: header.Hash()}, Amount: 1})\n\t}\n\tfor i := 1; i <= 5; i++ {\n\t\t// send requests while we still have enough buffer and expect a response\n\t\tfor expBuf >= testCost {\n\t\t\treq()\n\t\t\texpBuf -= testCost\n\t\t\tif err := expectResponse(rawPeer.app, BlockHeadersMsg, reqID, expBuf, []*types.Header{header}); err != nil {\n\t\t\t\tt.Errorf(\"expected response and failed: %v\", err)\n\t\t\t}\n\t\t}\n\t\t// send some more requests in excess and expect a single StopMsg\n\t\tc := i\n\t\tfor c > 0 {\n\t\t\treq()\n\t\t\tc--\n\t\t}\n\t\tif err := p2p.ExpectMsg(rawPeer.app, StopMsg, nil); err != nil {\n\t\t\tt.Errorf(\"expected StopMsg and failed: %v\", err)\n\t\t}\n\t\t// wait until the buffer is recharged by half of the limit\n\t\twait := testBufLimit / testBufRecharge / 2\n\t\tserver.clock.(*mclock.Simulated).Run(time.Millisecond * time.Duration(wait))\n\n\t\t// expect a ResumeMsg with the partially recharged buffer value\n\t\texpBuf += testBufRecharge * wait\n\t\tif err := p2p.ExpectMsg(rawPeer.app, ResumeMsg, expBuf); err != nil {\n\t\t\tt.Errorf(\"expected ResumeMsg and failed: %v\", err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "les/metrics.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"github.com/ethereum/go-ethereum/metrics\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n)\n\nvar (\n\tmiscInPacketsMeter           = metrics.NewRegisteredMeter(\"les/misc/in/packets/total\", nil)\n\tmiscInTrafficMeter           = metrics.NewRegisteredMeter(\"les/misc/in/traffic/total\", nil)\n\tmiscInHeaderPacketsMeter     = metrics.NewRegisteredMeter(\"les/misc/in/packets/header\", nil)\n\tmiscInHeaderTrafficMeter     = metrics.NewRegisteredMeter(\"les/misc/in/traffic/header\", nil)\n\tmiscInBodyPacketsMeter       = metrics.NewRegisteredMeter(\"les/misc/in/packets/body\", nil)\n\tmiscInBodyTrafficMeter       = metrics.NewRegisteredMeter(\"les/misc/in/traffic/body\", nil)\n\tmiscInCodePacketsMeter       = metrics.NewRegisteredMeter(\"les/misc/in/packets/code\", nil)\n\tmiscInCodeTrafficMeter       = metrics.NewRegisteredMeter(\"les/misc/in/traffic/code\", nil)\n\tmiscInReceiptPacketsMeter    = metrics.NewRegisteredMeter(\"les/misc/in/packets/receipt\", nil)\n\tmiscInReceiptTrafficMeter    = metrics.NewRegisteredMeter(\"les/misc/in/traffic/receipt\", nil)\n\tmiscInTrieProofPacketsMeter  = metrics.NewRegisteredMeter(\"les/misc/in/packets/proof\", nil)\n\tmiscInTrieProofTrafficMeter  = metrics.NewRegisteredMeter(\"les/misc/in/traffic/proof\", nil)\n\tmiscInHelperTriePacketsMeter = metrics.NewRegisteredMeter(\"les/misc/in/packets/helperTrie\", nil)\n\tmiscInHelperTrieTrafficMeter = metrics.NewRegisteredMeter(\"les/misc/in/traffic/helperTrie\", nil)\n\tmiscInTxsPacketsMeter        = metrics.NewRegisteredMeter(\"les/misc/in/packets/txs\", nil)\n\tmiscInTxsTrafficMeter        = metrics.NewRegisteredMeter(\"les/misc/in/traffic/txs\", nil)\n\tmiscInTxStatusPacketsMeter   = metrics.NewRegisteredMeter(\"les/misc/in/packets/txStatus\", nil)\n\tmiscInTxStatusTrafficMeter   = metrics.NewRegisteredMeter(\"les/misc/in/traffic/txStatus\", nil)\n\n\tmiscOutPacketsMeter           = metrics.NewRegisteredMeter(\"les/misc/out/packets/total\", nil)\n\tmiscOutTrafficMeter           = metrics.NewRegisteredMeter(\"les/misc/out/traffic/total\", nil)\n\tmiscOutHeaderPacketsMeter     = metrics.NewRegisteredMeter(\"les/misc/out/packets/header\", nil)\n\tmiscOutHeaderTrafficMeter     = metrics.NewRegisteredMeter(\"les/misc/out/traffic/header\", nil)\n\tmiscOutBodyPacketsMeter       = metrics.NewRegisteredMeter(\"les/misc/out/packets/body\", nil)\n\tmiscOutBodyTrafficMeter       = metrics.NewRegisteredMeter(\"les/misc/out/traffic/body\", nil)\n\tmiscOutCodePacketsMeter       = metrics.NewRegisteredMeter(\"les/misc/out/packets/code\", nil)\n\tmiscOutCodeTrafficMeter       = metrics.NewRegisteredMeter(\"les/misc/out/traffic/code\", nil)\n\tmiscOutReceiptPacketsMeter    = metrics.NewRegisteredMeter(\"les/misc/out/packets/receipt\", nil)\n\tmiscOutReceiptTrafficMeter    = metrics.NewRegisteredMeter(\"les/misc/out/traffic/receipt\", nil)\n\tmiscOutTrieProofPacketsMeter  = metrics.NewRegisteredMeter(\"les/misc/out/packets/proof\", nil)\n\tmiscOutTrieProofTrafficMeter  = metrics.NewRegisteredMeter(\"les/misc/out/traffic/proof\", nil)\n\tmiscOutHelperTriePacketsMeter = metrics.NewRegisteredMeter(\"les/misc/out/packets/helperTrie\", nil)\n\tmiscOutHelperTrieTrafficMeter = metrics.NewRegisteredMeter(\"les/misc/out/traffic/helperTrie\", nil)\n\tmiscOutTxsPacketsMeter        = metrics.NewRegisteredMeter(\"les/misc/out/packets/txs\", nil)\n\tmiscOutTxsTrafficMeter        = metrics.NewRegisteredMeter(\"les/misc/out/traffic/txs\", nil)\n\tmiscOutTxStatusPacketsMeter   = metrics.NewRegisteredMeter(\"les/misc/out/packets/txStatus\", nil)\n\tmiscOutTxStatusTrafficMeter   = metrics.NewRegisteredMeter(\"les/misc/out/traffic/txStatus\", nil)\n\n\tmiscServingTimeHeaderTimer     = metrics.NewRegisteredTimer(\"les/misc/serve/header\", nil)\n\tmiscServingTimeBodyTimer       = metrics.NewRegisteredTimer(\"les/misc/serve/body\", nil)\n\tmiscServingTimeCodeTimer       = metrics.NewRegisteredTimer(\"les/misc/serve/code\", nil)\n\tmiscServingTimeReceiptTimer    = metrics.NewRegisteredTimer(\"les/misc/serve/receipt\", nil)\n\tmiscServingTimeTrieProofTimer  = metrics.NewRegisteredTimer(\"les/misc/serve/proof\", nil)\n\tmiscServingTimeHelperTrieTimer = metrics.NewRegisteredTimer(\"les/misc/serve/helperTrie\", nil)\n\tmiscServingTimeTxTimer         = metrics.NewRegisteredTimer(\"les/misc/serve/txs\", nil)\n\tmiscServingTimeTxStatusTimer   = metrics.NewRegisteredTimer(\"les/misc/serve/txStatus\", nil)\n\n\tconnectionTimer       = metrics.NewRegisteredTimer(\"les/connection/duration\", nil)\n\tserverConnectionGauge = metrics.NewRegisteredGauge(\"les/connection/server\", nil)\n\tclientConnectionGauge = metrics.NewRegisteredGauge(\"les/connection/client\", nil)\n\n\ttotalCapacityGauge        = metrics.NewRegisteredGauge(\"les/server/totalCapacity\", nil)\n\ttotalRechargeGauge        = metrics.NewRegisteredGauge(\"les/server/totalRecharge\", nil)\n\ttotalConnectedGauge       = metrics.NewRegisteredGauge(\"les/server/totalConnected\", nil)\n\tblockProcessingTimer      = metrics.NewRegisteredTimer(\"les/server/blockProcessingTime\", nil)\n\tcapacityQueryZeroMeter    = metrics.NewRegisteredMeter(\"les/server/capQueryZero\", nil)\n\tcapacityQueryNonZeroMeter = metrics.NewRegisteredMeter(\"les/server/capQueryNonZero\", nil)\n\n\trequestServedMeter               = metrics.NewRegisteredMeter(\"les/server/req/avgServedTime\", nil)\n\trequestServedTimer               = metrics.NewRegisteredTimer(\"les/server/req/servedTime\", nil)\n\trequestEstimatedMeter            = metrics.NewRegisteredMeter(\"les/server/req/avgEstimatedTime\", nil)\n\trequestEstimatedTimer            = metrics.NewRegisteredTimer(\"les/server/req/estimatedTime\", nil)\n\trelativeCostHistogram            = metrics.NewRegisteredHistogram(\"les/server/req/relative\", nil, metrics.NewExpDecaySample(1028, 0.015))\n\trelativeCostHeaderHistogram      = metrics.NewRegisteredHistogram(\"les/server/req/relative/header\", nil, metrics.NewExpDecaySample(1028, 0.015))\n\trelativeCostBodyHistogram        = metrics.NewRegisteredHistogram(\"les/server/req/relative/body\", nil, metrics.NewExpDecaySample(1028, 0.015))\n\trelativeCostReceiptHistogram     = metrics.NewRegisteredHistogram(\"les/server/req/relative/receipt\", nil, metrics.NewExpDecaySample(1028, 0.015))\n\trelativeCostCodeHistogram        = metrics.NewRegisteredHistogram(\"les/server/req/relative/code\", nil, metrics.NewExpDecaySample(1028, 0.015))\n\trelativeCostProofHistogram       = metrics.NewRegisteredHistogram(\"les/server/req/relative/proof\", nil, metrics.NewExpDecaySample(1028, 0.015))\n\trelativeCostHelperProofHistogram = metrics.NewRegisteredHistogram(\"les/server/req/relative/helperTrie\", nil, metrics.NewExpDecaySample(1028, 0.015))\n\trelativeCostSendTxHistogram      = metrics.NewRegisteredHistogram(\"les/server/req/relative/txs\", nil, metrics.NewExpDecaySample(1028, 0.015))\n\trelativeCostTxStatusHistogram    = metrics.NewRegisteredHistogram(\"les/server/req/relative/txStatus\", nil, metrics.NewExpDecaySample(1028, 0.015))\n\n\tglobalFactorGauge    = metrics.NewRegisteredGauge(\"les/server/globalFactor\", nil)\n\trecentServedGauge    = metrics.NewRegisteredGauge(\"les/server/recentRequestServed\", nil)\n\trecentEstimatedGauge = metrics.NewRegisteredGauge(\"les/server/recentRequestEstimated\", nil)\n\tsqServedGauge        = metrics.NewRegisteredGauge(\"les/server/servingQueue/served\", nil)\n\tsqQueuedGauge        = metrics.NewRegisteredGauge(\"les/server/servingQueue/queued\", nil)\n\n\tclientConnectedMeter    = metrics.NewRegisteredMeter(\"les/server/clientEvent/connected\", nil)\n\tclientActivatedMeter    = metrics.NewRegisteredMeter(\"les/server/clientEvent/activated\", nil)\n\tclientDeactivatedMeter  = metrics.NewRegisteredMeter(\"les/server/clientEvent/deactivated\", nil)\n\tclientDisconnectedMeter = metrics.NewRegisteredMeter(\"les/server/clientEvent/disconnected\", nil)\n\tclientFreezeMeter       = metrics.NewRegisteredMeter(\"les/server/clientEvent/freeze\", nil)\n\tclientErrorMeter        = metrics.NewRegisteredMeter(\"les/server/clientEvent/error\", nil)\n\n\trequestRTT       = metrics.NewRegisteredTimer(\"les/client/req/rtt\", nil)\n\trequestSendDelay = metrics.NewRegisteredTimer(\"les/client/req/sendDelay\", nil)\n\n\tserverSelectableGauge = metrics.NewRegisteredGauge(\"les/client/serverPool/selectable\", nil)\n\tserverDialedMeter     = metrics.NewRegisteredMeter(\"les/client/serverPool/dialed\", nil)\n\tserverConnectedGauge  = metrics.NewRegisteredGauge(\"les/client/serverPool/connected\", nil)\n\tsessionValueMeter     = metrics.NewRegisteredMeter(\"les/client/serverPool/sessionValue\", nil)\n\ttotalValueGauge       = metrics.NewRegisteredGauge(\"les/client/serverPool/totalValue\", nil)\n\tsuggestedTimeoutGauge = metrics.NewRegisteredGauge(\"les/client/serverPool/timeout\", nil)\n)\n\n// meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of\n// accumulating the above defined metrics based on the data stream contents.\ntype meteredMsgReadWriter struct {\n\tp2p.MsgReadWriter     // Wrapped message stream to meter\n\tversion           int // Protocol version to select correct meters\n}\n\n// newMeteredMsgWriter wraps a p2p MsgReadWriter with metering support. If the\n// metrics system is disabled, this function returns the original object.\nfunc newMeteredMsgWriter(rw p2p.MsgReadWriter, version int) p2p.MsgReadWriter {\n\tif !metrics.Enabled {\n\t\treturn rw\n\t}\n\treturn &meteredMsgReadWriter{MsgReadWriter: rw, version: version}\n}\n\nfunc (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) {\n\t// Read the message and short circuit in case of an error\n\tmsg, err := rw.MsgReadWriter.ReadMsg()\n\tif err != nil {\n\t\treturn msg, err\n\t}\n\t// Account for the data traffic\n\tpackets, traffic := miscInPacketsMeter, miscInTrafficMeter\n\tpackets.Mark(1)\n\ttraffic.Mark(int64(msg.Size))\n\n\treturn msg, err\n}\n\nfunc (rw *meteredMsgReadWriter) WriteMsg(msg p2p.Msg) error {\n\t// Account for the data traffic\n\tpackets, traffic := miscOutPacketsMeter, miscOutTrafficMeter\n\tpackets.Mark(1)\n\ttraffic.Mark(int64(msg.Size))\n\n\t// Send the packet to the p2p layer\n\treturn rw.MsgReadWriter.WriteMsg(msg)\n}\n"
  },
  {
    "path": "les/odr.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"context\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/light\"\n)\n\n// LesOdr implements light.OdrBackend\ntype LesOdr struct {\n\tdb                                         ethdb.Database\n\tindexerConfig                              *light.IndexerConfig\n\tchtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer\n\tpeers                                      *serverPeerSet\n\tretriever                                  *retrieveManager\n\tstop                                       chan struct{}\n}\n\nfunc NewLesOdr(db ethdb.Database, config *light.IndexerConfig, peers *serverPeerSet, retriever *retrieveManager) *LesOdr {\n\treturn &LesOdr{\n\t\tdb:            db,\n\t\tindexerConfig: config,\n\t\tpeers:         peers,\n\t\tretriever:     retriever,\n\t\tstop:          make(chan struct{}),\n\t}\n}\n\n// Stop cancels all pending retrievals\nfunc (odr *LesOdr) Stop() {\n\tclose(odr.stop)\n}\n\n// Database returns the backing database\nfunc (odr *LesOdr) Database() ethdb.Database {\n\treturn odr.db\n}\n\n// SetIndexers adds the necessary chain indexers to the ODR backend\nfunc (odr *LesOdr) SetIndexers(chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer) {\n\todr.chtIndexer = chtIndexer\n\todr.bloomTrieIndexer = bloomTrieIndexer\n\todr.bloomIndexer = bloomIndexer\n}\n\n// ChtIndexer returns the CHT chain indexer\nfunc (odr *LesOdr) ChtIndexer() *core.ChainIndexer {\n\treturn odr.chtIndexer\n}\n\n// BloomTrieIndexer returns the bloom trie chain indexer\nfunc (odr *LesOdr) BloomTrieIndexer() *core.ChainIndexer {\n\treturn odr.bloomTrieIndexer\n}\n\n// BloomIndexer returns the bloombits chain indexer\nfunc (odr *LesOdr) BloomIndexer() *core.ChainIndexer {\n\treturn odr.bloomIndexer\n}\n\n// IndexerConfig returns the indexer config.\nfunc (odr *LesOdr) IndexerConfig() *light.IndexerConfig {\n\treturn odr.indexerConfig\n}\n\nconst (\n\tMsgBlockHeaders = iota\n\tMsgBlockBodies\n\tMsgCode\n\tMsgReceipts\n\tMsgProofsV2\n\tMsgHelperTrieProofs\n\tMsgTxStatus\n)\n\n// Msg encodes a LES message that delivers reply data for a request\ntype Msg struct {\n\tMsgType int\n\tReqID   uint64\n\tObj     interface{}\n}\n\n// peerByTxHistory is a heap.Interface implementation which can sort\n// the peerset by transaction history.\ntype peerByTxHistory []*serverPeer\n\nfunc (h peerByTxHistory) Len() int { return len(h) }\nfunc (h peerByTxHistory) Less(i, j int) bool {\n\tif h[i].txHistory == txIndexUnlimited {\n\t\treturn false\n\t}\n\tif h[j].txHistory == txIndexUnlimited {\n\t\treturn true\n\t}\n\treturn h[i].txHistory < h[j].txHistory\n}\nfunc (h peerByTxHistory) Swap(i, j int) { h[i], h[j] = h[j], h[i] }\n\nconst (\n\tmaxTxStatusRetry      = 3 // The maximum retrys will be made for tx status request.\n\tmaxTxStatusCandidates = 5 // The maximum les servers the tx status requests will be sent to.\n)\n\n// RetrieveTxStatus retrieves the transaction status from the LES network.\n// There is no guarantee in the LES protocol that the mined transaction will\n// be retrieved back for sure because of different reasons(the transaction\n// is unindexed, the malicous server doesn't reply it deliberately, etc).\n// Therefore, unretrieved transactions(UNKNOWN) will receive a certain number\n// of retries, thus giving a weak guarantee.\nfunc (odr *LesOdr) RetrieveTxStatus(ctx context.Context, req *light.TxStatusRequest) error {\n\t// Sort according to the transaction history supported by the peer and\n\t// select the peers with longest history.\n\tvar (\n\t\tretries int\n\t\tpeers   []*serverPeer\n\t\tmissing = len(req.Hashes)\n\t\tresult  = make([]light.TxStatus, len(req.Hashes))\n\t\tcanSend = make(map[string]bool)\n\t)\n\tfor _, peer := range odr.peers.allPeers() {\n\t\tif peer.txHistory == txIndexDisabled {\n\t\t\tcontinue\n\t\t}\n\t\tpeers = append(peers, peer)\n\t}\n\tsort.Sort(sort.Reverse(peerByTxHistory(peers)))\n\tfor i := 0; i < maxTxStatusCandidates && i < len(peers); i++ {\n\t\tcanSend[peers[i].id] = true\n\t}\n\t// Send out the request and assemble the result.\n\tfor {\n\t\tif retries >= maxTxStatusRetry || len(canSend) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tvar (\n\t\t\t// Deep copy the request, so that the partial result won't be mixed.\n\t\t\treq     = &TxStatusRequest{Hashes: req.Hashes}\n\t\t\tid      = genReqID()\n\t\t\tdistreq = &distReq{\n\t\t\t\tgetCost: func(dp distPeer) uint64 { return req.GetCost(dp.(*serverPeer)) },\n\t\t\t\tcanSend: func(dp distPeer) bool { return canSend[dp.(*serverPeer).id] },\n\t\t\t\trequest: func(dp distPeer) func() {\n\t\t\t\t\tp := dp.(*serverPeer)\n\t\t\t\t\tp.fcServer.QueuedRequest(id, req.GetCost(p))\n\t\t\t\t\tdelete(canSend, p.id)\n\t\t\t\t\treturn func() { req.Request(id, p) }\n\t\t\t\t},\n\t\t\t}\n\t\t)\n\t\tif err := odr.retriever.retrieve(ctx, id, distreq, func(p distPeer, msg *Msg) error { return req.Validate(odr.db, msg) }, odr.stop); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Collect the response and assemble them to the final result.\n\t\t// All the response is not verifiable, so always pick the first\n\t\t// one we get.\n\t\tfor index, status := range req.Status {\n\t\t\tif result[index].Status != core.TxStatusUnknown {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif status.Status == core.TxStatusUnknown {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresult[index], missing = status, missing-1\n\t\t}\n\t\t// Abort the procedure if all the status are retrieved\n\t\tif missing == 0 {\n\t\t\tbreak\n\t\t}\n\t\tretries += 1\n\t}\n\treq.Status = result\n\treturn nil\n}\n\n// Retrieve tries to fetch an object from the LES network. It's a common API\n// for most of the LES requests except for the TxStatusRequest which needs\n// the additional retry mechanism.\n// If the network retrieval was successful, it stores the object in local db.\nfunc (odr *LesOdr) Retrieve(ctx context.Context, req light.OdrRequest) (err error) {\n\tlreq := LesRequest(req)\n\n\treqID := genReqID()\n\trq := &distReq{\n\t\tgetCost: func(dp distPeer) uint64 {\n\t\t\treturn lreq.GetCost(dp.(*serverPeer))\n\t\t},\n\t\tcanSend: func(dp distPeer) bool {\n\t\t\tp := dp.(*serverPeer)\n\t\t\tif !p.onlyAnnounce {\n\t\t\t\treturn lreq.CanSend(p)\n\t\t\t}\n\t\t\treturn false\n\t\t},\n\t\trequest: func(dp distPeer) func() {\n\t\t\tp := dp.(*serverPeer)\n\t\t\tcost := lreq.GetCost(p)\n\t\t\tp.fcServer.QueuedRequest(reqID, cost)\n\t\t\treturn func() { lreq.Request(reqID, p) }\n\t\t},\n\t}\n\n\tdefer func(sent mclock.AbsTime) {\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\trequestRTT.Update(time.Duration(mclock.Now() - sent))\n\t}(mclock.Now())\n\n\tif err := odr.retriever.retrieve(ctx, reqID, rq, func(p distPeer, msg *Msg) error { return lreq.Validate(odr.db, msg) }, odr.stop); err != nil {\n\t\treturn err\n\t}\n\treq.StoreResult(odr.db)\n\treturn nil\n}\n"
  },
  {
    "path": "les/odr_requests.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\nvar (\n\terrInvalidMessageType  = errors.New(\"invalid message type\")\n\terrInvalidEntryCount   = errors.New(\"invalid number of response entries\")\n\terrHeaderUnavailable   = errors.New(\"header unavailable\")\n\terrTxHashMismatch      = errors.New(\"transaction hash mismatch\")\n\terrUncleHashMismatch   = errors.New(\"uncle hash mismatch\")\n\terrReceiptHashMismatch = errors.New(\"receipt hash mismatch\")\n\terrDataHashMismatch    = errors.New(\"data hash mismatch\")\n\terrCHTHashMismatch     = errors.New(\"cht hash mismatch\")\n\terrCHTNumberMismatch   = errors.New(\"cht number mismatch\")\n\terrUselessNodes        = errors.New(\"useless nodes in merkle proof nodeset\")\n)\n\ntype LesOdrRequest interface {\n\tGetCost(*serverPeer) uint64\n\tCanSend(*serverPeer) bool\n\tRequest(uint64, *serverPeer) error\n\tValidate(ethdb.Database, *Msg) error\n}\n\nfunc LesRequest(req light.OdrRequest) LesOdrRequest {\n\tswitch r := req.(type) {\n\tcase *light.BlockRequest:\n\t\treturn (*BlockRequest)(r)\n\tcase *light.ReceiptsRequest:\n\t\treturn (*ReceiptsRequest)(r)\n\tcase *light.TrieRequest:\n\t\treturn (*TrieRequest)(r)\n\tcase *light.CodeRequest:\n\t\treturn (*CodeRequest)(r)\n\tcase *light.ChtRequest:\n\t\treturn (*ChtRequest)(r)\n\tcase *light.BloomRequest:\n\t\treturn (*BloomRequest)(r)\n\tcase *light.TxStatusRequest:\n\t\treturn (*TxStatusRequest)(r)\n\tdefault:\n\t\treturn nil\n\t}\n}\n\n// BlockRequest is the ODR request type for block bodies\ntype BlockRequest light.BlockRequest\n\n// GetCost returns the cost of the given ODR request according to the serving\n// peer's cost table (implementation of LesOdrRequest)\nfunc (r *BlockRequest) GetCost(peer *serverPeer) uint64 {\n\treturn peer.getRequestCost(GetBlockBodiesMsg, 1)\n}\n\n// CanSend tells if a certain peer is suitable for serving the given request\nfunc (r *BlockRequest) CanSend(peer *serverPeer) bool {\n\treturn peer.HasBlock(r.Hash, r.Number, false)\n}\n\n// Request sends an ODR request to the LES network (implementation of LesOdrRequest)\nfunc (r *BlockRequest) Request(reqID uint64, peer *serverPeer) error {\n\tpeer.Log().Debug(\"Requesting block body\", \"hash\", r.Hash)\n\treturn peer.requestBodies(reqID, []common.Hash{r.Hash})\n}\n\n// Valid processes an ODR request reply message from the LES network\n// returns true and stores results in memory if the message was a valid reply\n// to the request (implementation of LesOdrRequest)\nfunc (r *BlockRequest) Validate(db ethdb.Database, msg *Msg) error {\n\tlog.Debug(\"Validating block body\", \"hash\", r.Hash)\n\n\t// Ensure we have a correct message with a single block body\n\tif msg.MsgType != MsgBlockBodies {\n\t\treturn errInvalidMessageType\n\t}\n\tbodies := msg.Obj.([]*types.Body)\n\tif len(bodies) != 1 {\n\t\treturn errInvalidEntryCount\n\t}\n\tbody := bodies[0]\n\n\t// Retrieve our stored header and validate block content against it\n\tif r.Header == nil {\n\t\tr.Header = rawdb.ReadHeader(db, r.Hash, r.Number)\n\t}\n\tif r.Header == nil {\n\t\treturn errHeaderUnavailable\n\t}\n\tif r.Header.TxHash != types.DeriveSha(types.Transactions(body.Transactions), trie.NewStackTrie(nil)) {\n\t\treturn errTxHashMismatch\n\t}\n\tif r.Header.UncleHash != types.CalcUncleHash(body.Uncles) {\n\t\treturn errUncleHashMismatch\n\t}\n\t// Validations passed, encode and store RLP\n\tdata, err := rlp.EncodeToBytes(body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr.Rlp = data\n\treturn nil\n}\n\n// ReceiptsRequest is the ODR request type for block receipts by block hash\ntype ReceiptsRequest light.ReceiptsRequest\n\n// GetCost returns the cost of the given ODR request according to the serving\n// peer's cost table (implementation of LesOdrRequest)\nfunc (r *ReceiptsRequest) GetCost(peer *serverPeer) uint64 {\n\treturn peer.getRequestCost(GetReceiptsMsg, 1)\n}\n\n// CanSend tells if a certain peer is suitable for serving the given request\nfunc (r *ReceiptsRequest) CanSend(peer *serverPeer) bool {\n\treturn peer.HasBlock(r.Hash, r.Number, false)\n}\n\n// Request sends an ODR request to the LES network (implementation of LesOdrRequest)\nfunc (r *ReceiptsRequest) Request(reqID uint64, peer *serverPeer) error {\n\tpeer.Log().Debug(\"Requesting block receipts\", \"hash\", r.Hash)\n\treturn peer.requestReceipts(reqID, []common.Hash{r.Hash})\n}\n\n// Valid processes an ODR request reply message from the LES network\n// returns true and stores results in memory if the message was a valid reply\n// to the request (implementation of LesOdrRequest)\nfunc (r *ReceiptsRequest) Validate(db ethdb.Database, msg *Msg) error {\n\tlog.Debug(\"Validating block receipts\", \"hash\", r.Hash)\n\n\t// Ensure we have a correct message with a single block receipt\n\tif msg.MsgType != MsgReceipts {\n\t\treturn errInvalidMessageType\n\t}\n\treceipts := msg.Obj.([]types.Receipts)\n\tif len(receipts) != 1 {\n\t\treturn errInvalidEntryCount\n\t}\n\treceipt := receipts[0]\n\n\t// Retrieve our stored header and validate receipt content against it\n\tif r.Header == nil {\n\t\tr.Header = rawdb.ReadHeader(db, r.Hash, r.Number)\n\t}\n\tif r.Header == nil {\n\t\treturn errHeaderUnavailable\n\t}\n\tif r.Header.ReceiptHash != types.DeriveSha(receipt, trie.NewStackTrie(nil)) {\n\t\treturn errReceiptHashMismatch\n\t}\n\t// Validations passed, store and return\n\tr.Receipts = receipt\n\treturn nil\n}\n\ntype ProofReq struct {\n\tBHash       common.Hash\n\tAccKey, Key []byte\n\tFromLevel   uint\n}\n\n// ODR request type for state/storage trie entries, see LesOdrRequest interface\ntype TrieRequest light.TrieRequest\n\n// GetCost returns the cost of the given ODR request according to the serving\n// peer's cost table (implementation of LesOdrRequest)\nfunc (r *TrieRequest) GetCost(peer *serverPeer) uint64 {\n\treturn peer.getRequestCost(GetProofsV2Msg, 1)\n}\n\n// CanSend tells if a certain peer is suitable for serving the given request\nfunc (r *TrieRequest) CanSend(peer *serverPeer) bool {\n\treturn peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber, true)\n}\n\n// Request sends an ODR request to the LES network (implementation of LesOdrRequest)\nfunc (r *TrieRequest) Request(reqID uint64, peer *serverPeer) error {\n\tpeer.Log().Debug(\"Requesting trie proof\", \"root\", r.Id.Root, \"key\", r.Key)\n\treq := ProofReq{\n\t\tBHash:  r.Id.BlockHash,\n\t\tAccKey: r.Id.AccKey,\n\t\tKey:    r.Key,\n\t}\n\treturn peer.requestProofs(reqID, []ProofReq{req})\n}\n\n// Valid processes an ODR request reply message from the LES network\n// returns true and stores results in memory if the message was a valid reply\n// to the request (implementation of LesOdrRequest)\nfunc (r *TrieRequest) Validate(db ethdb.Database, msg *Msg) error {\n\tlog.Debug(\"Validating trie proof\", \"root\", r.Id.Root, \"key\", r.Key)\n\n\tif msg.MsgType != MsgProofsV2 {\n\t\treturn errInvalidMessageType\n\t}\n\tproofs := msg.Obj.(light.NodeList)\n\t// Verify the proof and store if checks out\n\tnodeSet := proofs.NodeSet()\n\treads := &readTraceDB{db: nodeSet}\n\tif _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil {\n\t\treturn fmt.Errorf(\"merkle proof verification failed: %v\", err)\n\t}\n\t// check if all nodes have been read by VerifyProof\n\tif len(reads.reads) != nodeSet.KeyCount() {\n\t\treturn errUselessNodes\n\t}\n\tr.Proof = nodeSet\n\treturn nil\n}\n\ntype CodeReq struct {\n\tBHash  common.Hash\n\tAccKey []byte\n}\n\n// ODR request type for node data (used for retrieving contract code), see LesOdrRequest interface\ntype CodeRequest light.CodeRequest\n\n// GetCost returns the cost of the given ODR request according to the serving\n// peer's cost table (implementation of LesOdrRequest)\nfunc (r *CodeRequest) GetCost(peer *serverPeer) uint64 {\n\treturn peer.getRequestCost(GetCodeMsg, 1)\n}\n\n// CanSend tells if a certain peer is suitable for serving the given request\nfunc (r *CodeRequest) CanSend(peer *serverPeer) bool {\n\treturn peer.HasBlock(r.Id.BlockHash, r.Id.BlockNumber, true)\n}\n\n// Request sends an ODR request to the LES network (implementation of LesOdrRequest)\nfunc (r *CodeRequest) Request(reqID uint64, peer *serverPeer) error {\n\tpeer.Log().Debug(\"Requesting code data\", \"hash\", r.Hash)\n\treq := CodeReq{\n\t\tBHash:  r.Id.BlockHash,\n\t\tAccKey: r.Id.AccKey,\n\t}\n\treturn peer.requestCode(reqID, []CodeReq{req})\n}\n\n// Valid processes an ODR request reply message from the LES network\n// returns true and stores results in memory if the message was a valid reply\n// to the request (implementation of LesOdrRequest)\nfunc (r *CodeRequest) Validate(db ethdb.Database, msg *Msg) error {\n\tlog.Debug(\"Validating code data\", \"hash\", r.Hash)\n\n\t// Ensure we have a correct message with a single code element\n\tif msg.MsgType != MsgCode {\n\t\treturn errInvalidMessageType\n\t}\n\treply := msg.Obj.([][]byte)\n\tif len(reply) != 1 {\n\t\treturn errInvalidEntryCount\n\t}\n\tdata := reply[0]\n\n\t// Verify the data and store if checks out\n\tif hash := crypto.Keccak256Hash(data); r.Hash != hash {\n\t\treturn errDataHashMismatch\n\t}\n\tr.Data = data\n\treturn nil\n}\n\nconst (\n\t// helper trie type constants\n\thtCanonical = iota // Canonical hash trie\n\thtBloomBits        // BloomBits trie\n\n\t// helper trie auxiliary types\n\t// htAuxNone = 1 ; deprecated number, used in les2/3 previously.\n\thtAuxHeader = 2 // applicable for htCanonical, requests for relevant headers\n)\n\ntype HelperTrieReq struct {\n\tType              uint\n\tTrieIdx           uint64\n\tKey               []byte\n\tFromLevel, AuxReq uint\n}\n\ntype HelperTrieResps struct { // describes all responses, not just a single one\n\tProofs  light.NodeList\n\tAuxData [][]byte\n}\n\n// ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface\ntype ChtRequest light.ChtRequest\n\n// GetCost returns the cost of the given ODR request according to the serving\n// peer's cost table (implementation of LesOdrRequest)\nfunc (r *ChtRequest) GetCost(peer *serverPeer) uint64 {\n\treturn peer.getRequestCost(GetHelperTrieProofsMsg, 1)\n}\n\n// CanSend tells if a certain peer is suitable for serving the given request\nfunc (r *ChtRequest) CanSend(peer *serverPeer) bool {\n\tpeer.lock.RLock()\n\tdefer peer.lock.RUnlock()\n\n\treturn peer.headInfo.Number >= r.Config.ChtConfirms && r.ChtNum <= (peer.headInfo.Number-r.Config.ChtConfirms)/r.Config.ChtSize\n}\n\n// Request sends an ODR request to the LES network (implementation of LesOdrRequest)\nfunc (r *ChtRequest) Request(reqID uint64, peer *serverPeer) error {\n\tpeer.Log().Debug(\"Requesting CHT\", \"cht\", r.ChtNum, \"block\", r.BlockNum)\n\tvar encNum [8]byte\n\tbinary.BigEndian.PutUint64(encNum[:], r.BlockNum)\n\treq := HelperTrieReq{\n\t\tType:    htCanonical,\n\t\tTrieIdx: r.ChtNum,\n\t\tKey:     encNum[:],\n\t\tAuxReq:  htAuxHeader,\n\t}\n\treturn peer.requestHelperTrieProofs(reqID, []HelperTrieReq{req})\n}\n\n// Valid processes an ODR request reply message from the LES network\n// returns true and stores results in memory if the message was a valid reply\n// to the request (implementation of LesOdrRequest)\nfunc (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error {\n\tlog.Debug(\"Validating CHT\", \"cht\", r.ChtNum, \"block\", r.BlockNum)\n\n\tif msg.MsgType != MsgHelperTrieProofs {\n\t\treturn errInvalidMessageType\n\t}\n\tresp := msg.Obj.(HelperTrieResps)\n\tif len(resp.AuxData) != 1 {\n\t\treturn errInvalidEntryCount\n\t}\n\tnodeSet := resp.Proofs.NodeSet()\n\theaderEnc := resp.AuxData[0]\n\tif len(headerEnc) == 0 {\n\t\treturn errHeaderUnavailable\n\t}\n\theader := new(types.Header)\n\tif err := rlp.DecodeBytes(headerEnc, header); err != nil {\n\t\treturn errHeaderUnavailable\n\t}\n\t// Verify the CHT\n\tvar (\n\t\tnode      light.ChtNode\n\t\tencNumber [8]byte\n\t)\n\tbinary.BigEndian.PutUint64(encNumber[:], r.BlockNum)\n\n\treads := &readTraceDB{db: nodeSet}\n\tvalue, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"merkle proof verification failed: %v\", err)\n\t}\n\tif len(reads.reads) != nodeSet.KeyCount() {\n\t\treturn errUselessNodes\n\t}\n\tif err := rlp.DecodeBytes(value, &node); err != nil {\n\t\treturn err\n\t}\n\tif node.Hash != header.Hash() {\n\t\treturn errCHTHashMismatch\n\t}\n\tif r.BlockNum != header.Number.Uint64() {\n\t\treturn errCHTNumberMismatch\n\t}\n\t// Verifications passed, store and return\n\tr.Header = header\n\tr.Proof = nodeSet\n\tr.Td = node.Td\n\treturn nil\n}\n\ntype BloomReq struct {\n\tBloomTrieNum, BitIdx, SectionIndex, FromLevel uint64\n}\n\n// ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface\ntype BloomRequest light.BloomRequest\n\n// GetCost returns the cost of the given ODR request according to the serving\n// peer's cost table (implementation of LesOdrRequest)\nfunc (r *BloomRequest) GetCost(peer *serverPeer) uint64 {\n\treturn peer.getRequestCost(GetHelperTrieProofsMsg, len(r.SectionIndexList))\n}\n\n// CanSend tells if a certain peer is suitable for serving the given request\nfunc (r *BloomRequest) CanSend(peer *serverPeer) bool {\n\tpeer.lock.RLock()\n\tdefer peer.lock.RUnlock()\n\n\tif peer.version < lpv2 {\n\t\treturn false\n\t}\n\treturn peer.headInfo.Number >= r.Config.BloomTrieConfirms && r.BloomTrieNum <= (peer.headInfo.Number-r.Config.BloomTrieConfirms)/r.Config.BloomTrieSize\n}\n\n// Request sends an ODR request to the LES network (implementation of LesOdrRequest)\nfunc (r *BloomRequest) Request(reqID uint64, peer *serverPeer) error {\n\tpeer.Log().Debug(\"Requesting BloomBits\", \"bloomTrie\", r.BloomTrieNum, \"bitIdx\", r.BitIdx, \"sections\", r.SectionIndexList)\n\treqs := make([]HelperTrieReq, len(r.SectionIndexList))\n\n\tvar encNumber [10]byte\n\tbinary.BigEndian.PutUint16(encNumber[:2], uint16(r.BitIdx))\n\n\tfor i, sectionIdx := range r.SectionIndexList {\n\t\tbinary.BigEndian.PutUint64(encNumber[2:], sectionIdx)\n\t\treqs[i] = HelperTrieReq{\n\t\t\tType:    htBloomBits,\n\t\t\tTrieIdx: r.BloomTrieNum,\n\t\t\tKey:     common.CopyBytes(encNumber[:]),\n\t\t}\n\t}\n\treturn peer.requestHelperTrieProofs(reqID, reqs)\n}\n\n// Valid processes an ODR request reply message from the LES network\n// returns true and stores results in memory if the message was a valid reply\n// to the request (implementation of LesOdrRequest)\nfunc (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error {\n\tlog.Debug(\"Validating BloomBits\", \"bloomTrie\", r.BloomTrieNum, \"bitIdx\", r.BitIdx, \"sections\", r.SectionIndexList)\n\n\t// Ensure we have a correct message with a single proof element\n\tif msg.MsgType != MsgHelperTrieProofs {\n\t\treturn errInvalidMessageType\n\t}\n\tresps := msg.Obj.(HelperTrieResps)\n\tproofs := resps.Proofs\n\tnodeSet := proofs.NodeSet()\n\treads := &readTraceDB{db: nodeSet}\n\n\tr.BloomBits = make([][]byte, len(r.SectionIndexList))\n\n\t// Verify the proofs\n\tvar encNumber [10]byte\n\tbinary.BigEndian.PutUint16(encNumber[:2], uint16(r.BitIdx))\n\n\tfor i, idx := range r.SectionIndexList {\n\t\tbinary.BigEndian.PutUint64(encNumber[2:], idx)\n\t\tvalue, err := trie.VerifyProof(r.BloomTrieRoot, encNumber[:], reads)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tr.BloomBits[i] = value\n\t}\n\n\tif len(reads.reads) != nodeSet.KeyCount() {\n\t\treturn errUselessNodes\n\t}\n\tr.Proofs = nodeSet\n\treturn nil\n}\n\n// TxStatusRequest is the ODR request type for transaction status\ntype TxStatusRequest light.TxStatusRequest\n\n// GetCost returns the cost of the given ODR request according to the serving\n// peer's cost table (implementation of LesOdrRequest)\nfunc (r *TxStatusRequest) GetCost(peer *serverPeer) uint64 {\n\treturn peer.getRequestCost(GetTxStatusMsg, len(r.Hashes))\n}\n\n// CanSend tells if a certain peer is suitable for serving the given request\nfunc (r *TxStatusRequest) CanSend(peer *serverPeer) bool {\n\treturn peer.txHistory != txIndexDisabled\n}\n\n// Request sends an ODR request to the LES network (implementation of LesOdrRequest)\nfunc (r *TxStatusRequest) Request(reqID uint64, peer *serverPeer) error {\n\tpeer.Log().Debug(\"Requesting transaction status\", \"count\", len(r.Hashes))\n\treturn peer.requestTxStatus(reqID, r.Hashes)\n}\n\n// Validate processes an ODR request reply message from the LES network\n// returns true and stores results in memory if the message was a valid reply\n// to the request (implementation of LesOdrRequest)\nfunc (r *TxStatusRequest) Validate(db ethdb.Database, msg *Msg) error {\n\tlog.Debug(\"Validating transaction status\", \"count\", len(r.Hashes))\n\n\tif msg.MsgType != MsgTxStatus {\n\t\treturn errInvalidMessageType\n\t}\n\tstatus := msg.Obj.([]light.TxStatus)\n\tif len(status) != len(r.Hashes) {\n\t\treturn errInvalidEntryCount\n\t}\n\tr.Status = status\n\treturn nil\n}\n\n// readTraceDB stores the keys of database reads. We use this to check that received node\n// sets contain only the trie nodes necessary to make proofs pass.\ntype readTraceDB struct {\n\tdb    ethdb.KeyValueReader\n\treads map[string]struct{}\n}\n\n// Get returns a stored node\nfunc (db *readTraceDB) Get(k []byte) ([]byte, error) {\n\tif db.reads == nil {\n\t\tdb.reads = make(map[string]struct{})\n\t}\n\tdb.reads[string(k)] = struct{}{}\n\treturn db.db.Get(k)\n}\n\n// Has returns true if the node set contains the given key\nfunc (db *readTraceDB) Has(key []byte) (bool, error) {\n\t_, err := db.Get(key)\n\treturn err == nil, nil\n}\n"
  },
  {
    "path": "les/odr_test.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/rand\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/math\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\ntype odrTestFn func(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte\n\nfunc TestOdrGetBlockLes2(t *testing.T) { testOdr(t, 2, 1, true, odrGetBlock) }\nfunc TestOdrGetBlockLes3(t *testing.T) { testOdr(t, 3, 1, true, odrGetBlock) }\nfunc TestOdrGetBlockLes4(t *testing.T) { testOdr(t, 4, 1, true, odrGetBlock) }\n\nfunc odrGetBlock(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {\n\tvar block *types.Block\n\tif bc != nil {\n\t\tblock = bc.GetBlockByHash(bhash)\n\t} else {\n\t\tblock, _ = lc.GetBlockByHash(ctx, bhash)\n\t}\n\tif block == nil {\n\t\treturn nil\n\t}\n\trlp, _ := rlp.EncodeToBytes(block)\n\treturn rlp\n}\n\nfunc TestOdrGetReceiptsLes2(t *testing.T) { testOdr(t, 2, 1, true, odrGetReceipts) }\nfunc TestOdrGetReceiptsLes3(t *testing.T) { testOdr(t, 3, 1, true, odrGetReceipts) }\nfunc TestOdrGetReceiptsLes4(t *testing.T) { testOdr(t, 4, 1, true, odrGetReceipts) }\n\nfunc odrGetReceipts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {\n\tvar receipts types.Receipts\n\tif bc != nil {\n\t\tif number := rawdb.ReadHeaderNumber(db, bhash); number != nil {\n\t\t\treceipts = rawdb.ReadReceipts(db, bhash, *number, config)\n\t\t}\n\t} else {\n\t\tif number := rawdb.ReadHeaderNumber(db, bhash); number != nil {\n\t\t\treceipts, _ = light.GetBlockReceipts(ctx, lc.Odr(), bhash, *number)\n\t\t}\n\t}\n\tif receipts == nil {\n\t\treturn nil\n\t}\n\trlp, _ := rlp.EncodeToBytes(receipts)\n\treturn rlp\n}\n\nfunc TestOdrAccountsLes2(t *testing.T) { testOdr(t, 2, 1, true, odrAccounts) }\nfunc TestOdrAccountsLes3(t *testing.T) { testOdr(t, 3, 1, true, odrAccounts) }\nfunc TestOdrAccountsLes4(t *testing.T) { testOdr(t, 4, 1, true, odrAccounts) }\n\nfunc odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {\n\tdummyAddr := common.HexToAddress(\"1234567812345678123456781234567812345678\")\n\tacc := []common.Address{bankAddr, userAddr1, userAddr2, dummyAddr}\n\n\tvar (\n\t\tres []byte\n\t\tst  *state.StateDB\n\t\terr error\n\t)\n\tfor _, addr := range acc {\n\t\tif bc != nil {\n\t\t\theader := bc.GetHeaderByHash(bhash)\n\t\t\tst, err = state.New(header.Root, state.NewDatabase(db), nil)\n\t\t} else {\n\t\t\theader := lc.GetHeaderByHash(bhash)\n\t\t\tst = light.NewState(ctx, header, lc.Odr())\n\t\t}\n\t\tif err == nil {\n\t\t\tbal := st.GetBalance(addr)\n\t\t\trlp, _ := rlp.EncodeToBytes(bal)\n\t\t\tres = append(res, rlp...)\n\t\t}\n\t}\n\treturn res\n}\n\nfunc TestOdrContractCallLes2(t *testing.T) { testOdr(t, 2, 2, true, odrContractCall) }\nfunc TestOdrContractCallLes3(t *testing.T) { testOdr(t, 3, 2, true, odrContractCall) }\nfunc TestOdrContractCallLes4(t *testing.T) { testOdr(t, 4, 2, true, odrContractCall) }\n\ntype callmsg struct {\n\ttypes.Message\n}\n\nfunc (callmsg) CheckNonce() bool { return false }\n\nfunc odrContractCall(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {\n\tdata := common.Hex2Bytes(\"60CD26850000000000000000000000000000000000000000000000000000000000000000\")\n\n\tvar res []byte\n\tfor i := 0; i < 3; i++ {\n\t\tdata[35] = byte(i)\n\t\tif bc != nil {\n\t\t\theader := bc.GetHeaderByHash(bhash)\n\t\t\tstatedb, err := state.New(header.Root, state.NewDatabase(db), nil)\n\n\t\t\tif err == nil {\n\t\t\t\tfrom := statedb.GetOrNewStateObject(bankAddr)\n\t\t\t\tfrom.SetBalance(math.MaxBig256)\n\n\t\t\t\tmsg := callmsg{types.NewMessage(from.Address(), &testContractAddr, 0, new(big.Int), 100000, new(big.Int), data, nil, false)}\n\n\t\t\t\tcontext := core.NewEVMBlockContext(header, bc, nil)\n\t\t\t\ttxContext := core.NewEVMTxContext(msg)\n\t\t\t\tvmenv := vm.NewEVM(context, txContext, statedb, config, vm.Config{})\n\n\t\t\t\t//vmenv := core.NewEnv(statedb, config, bc, msg, header, vm.Config{})\n\t\t\t\tgp := new(core.GasPool).AddGas(math.MaxUint64)\n\t\t\t\tresult, _ := core.ApplyMessage(vmenv, msg, gp)\n\t\t\t\tres = append(res, result.Return()...)\n\t\t\t}\n\t\t} else {\n\t\t\theader := lc.GetHeaderByHash(bhash)\n\t\t\tstate := light.NewState(ctx, header, lc.Odr())\n\t\t\tstate.SetBalance(bankAddr, math.MaxBig256)\n\t\t\tmsg := callmsg{types.NewMessage(bankAddr, &testContractAddr, 0, new(big.Int), 100000, new(big.Int), data, nil, false)}\n\t\t\tcontext := core.NewEVMBlockContext(header, lc, nil)\n\t\t\ttxContext := core.NewEVMTxContext(msg)\n\t\t\tvmenv := vm.NewEVM(context, txContext, state, config, vm.Config{})\n\t\t\tgp := new(core.GasPool).AddGas(math.MaxUint64)\n\t\t\tresult, _ := core.ApplyMessage(vmenv, msg, gp)\n\t\t\tif state.Error() == nil {\n\t\t\t\tres = append(res, result.Return()...)\n\t\t\t}\n\t\t}\n\t}\n\treturn res\n}\n\nfunc TestOdrTxStatusLes2(t *testing.T) { testOdr(t, 2, 1, false, odrTxStatus) }\nfunc TestOdrTxStatusLes3(t *testing.T) { testOdr(t, 3, 1, false, odrTxStatus) }\nfunc TestOdrTxStatusLes4(t *testing.T) { testOdr(t, 4, 1, false, odrTxStatus) }\n\nfunc odrTxStatus(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte {\n\tvar txs types.Transactions\n\tif bc != nil {\n\t\tblock := bc.GetBlockByHash(bhash)\n\t\ttxs = block.Transactions()\n\t} else {\n\t\tif block, _ := lc.GetBlockByHash(ctx, bhash); block != nil {\n\t\t\tbtxs := block.Transactions()\n\t\t\ttxs = make(types.Transactions, len(btxs))\n\t\t\tfor i, tx := range btxs {\n\t\t\t\tvar err error\n\t\t\t\ttxs[i], _, _, _, err = light.GetTransaction(ctx, lc.Odr(), tx.Hash())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\trlp, _ := rlp.EncodeToBytes(txs)\n\treturn rlp\n}\n\n// testOdr tests odr requests whose validation guaranteed by block headers.\nfunc testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn odrTestFn) {\n\t// Assemble the test environment\n\tnetconfig := testnetConfig{\n\t\tblocks:    4,\n\t\tprotocol:  protocol,\n\t\tconnect:   true,\n\t\tnopruning: true,\n\t}\n\tserver, client, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\t// Ensure the client has synced all necessary data.\n\tclientHead := client.handler.backend.blockchain.CurrentHeader()\n\tif clientHead.Number.Uint64() != 4 {\n\t\tt.Fatalf(\"Failed to sync the chain with server, head: %v\", clientHead.Number.Uint64())\n\t}\n\t// Disable the mechanism that we will wait a few time for request\n\t// even there is no suitable peer to send right now.\n\twaitForPeers = 0\n\n\ttest := func(expFail uint64) {\n\t\t// Mark this as a helper to put the failures at the correct lines\n\t\tt.Helper()\n\n\t\tfor i := uint64(0); i <= server.handler.blockchain.CurrentHeader().Number.Uint64(); i++ {\n\t\t\tbhash := rawdb.ReadCanonicalHash(server.db, i)\n\t\t\tb1 := fn(light.NoOdr, server.db, server.handler.server.chainConfig, server.handler.blockchain, nil, bhash)\n\n\t\t\t// Set the timeout as 1 second here, ensure there is enough time\n\t\t\t// for travis to make the action.\n\t\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\t\t\tb2 := fn(ctx, client.db, client.handler.backend.chainConfig, nil, client.handler.backend.blockchain, bhash)\n\t\t\tcancel()\n\n\t\t\teq := bytes.Equal(b1, b2)\n\t\t\texp := i < expFail\n\t\t\tif exp && !eq {\n\t\t\t\tt.Fatalf(\"odr mismatch: have %x, want %x\", b2, b1)\n\t\t\t}\n\t\t\tif !exp && eq {\n\t\t\t\tt.Fatalf(\"unexpected odr match\")\n\t\t\t}\n\t\t}\n\t}\n\n\t// expect retrievals to fail (except genesis block) without a les peer\n\tclient.handler.backend.peers.lock.Lock()\n\tclient.peer.speer.hasBlockHook = func(common.Hash, uint64, bool) bool { return false }\n\tclient.handler.backend.peers.lock.Unlock()\n\ttest(expFail)\n\n\t// expect all retrievals to pass\n\tclient.handler.backend.peers.lock.Lock()\n\tclient.peer.speer.hasBlockHook = func(common.Hash, uint64, bool) bool { return true }\n\tclient.handler.backend.peers.lock.Unlock()\n\ttest(5)\n\n\t// still expect all retrievals to pass, now data should be cached locally\n\tif checkCached {\n\t\tclient.handler.backend.peers.unregister(client.peer.speer.id)\n\t\ttime.Sleep(time.Millisecond * 10) // ensure that all peerSetNotify callbacks are executed\n\t\ttest(5)\n\t}\n}\n\nfunc TestGetTxStatusFromUnindexedPeersLES4(t *testing.T) { testGetTxStatusFromUnindexedPeers(t, lpv4) }\n\nfunc testGetTxStatusFromUnindexedPeers(t *testing.T, protocol int) {\n\tvar (\n\t\tblocks    = 8\n\t\tnetconfig = testnetConfig{\n\t\t\tblocks:    blocks,\n\t\t\tprotocol:  protocol,\n\t\t\tnopruning: true,\n\t\t}\n\t)\n\tserver, client, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\t// Iterate the chain, create the tx indexes locally\n\tvar (\n\t\ttestHash   common.Hash\n\t\ttestStatus light.TxStatus\n\n\t\ttxs          = make(map[common.Hash]*types.Transaction) // Transaction objects set\n\t\tblockNumbers = make(map[common.Hash]uint64)             // Transaction hash to block number mappings\n\t\tblockHashes  = make(map[common.Hash]common.Hash)        // Transaction hash to block hash mappings\n\t\tintraIndex   = make(map[common.Hash]uint64)             // Transaction intra-index in block\n\t)\n\tfor number := uint64(1); number < server.backend.Blockchain().CurrentBlock().NumberU64(); number++ {\n\t\tblock := server.backend.Blockchain().GetBlockByNumber(number)\n\t\tif block == nil {\n\t\t\tt.Fatalf(\"Failed to retrieve block %d\", number)\n\t\t}\n\t\tfor index, tx := range block.Transactions() {\n\t\t\ttxs[tx.Hash()] = tx\n\t\t\tblockNumbers[tx.Hash()] = number\n\t\t\tblockHashes[tx.Hash()] = block.Hash()\n\t\t\tintraIndex[tx.Hash()] = uint64(index)\n\n\t\t\tif testHash == (common.Hash{}) {\n\t\t\t\ttestHash = tx.Hash()\n\t\t\t\ttestStatus = light.TxStatus{\n\t\t\t\t\tStatus: core.TxStatusIncluded,\n\t\t\t\t\tLookup: &rawdb.LegacyTxLookupEntry{\n\t\t\t\t\t\tBlockHash:  block.Hash(),\n\t\t\t\t\t\tBlockIndex: block.NumberU64(),\n\t\t\t\t\t\tIndex:      uint64(index),\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t// serveMsg processes incoming GetTxStatusMsg and sends the response back.\n\tserveMsg := func(peer *testPeer, txLookup uint64) error {\n\t\tmsg, err := peer.app.ReadMsg()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif msg.Code != GetTxStatusMsg {\n\t\t\treturn fmt.Errorf(\"message code mismatch: got %d, expected %d\", msg.Code, GetTxStatusMsg)\n\t\t}\n\t\tvar r GetTxStatusPacket\n\t\tif err := msg.Decode(&r); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstats := make([]light.TxStatus, len(r.Hashes))\n\t\tfor i, hash := range r.Hashes {\n\t\t\tnumber, exist := blockNumbers[hash]\n\t\t\tif !exist {\n\t\t\t\tcontinue // Filter out unknown transactions\n\t\t\t}\n\t\t\tmin := uint64(blocks) - txLookup\n\t\t\tif txLookup != txIndexUnlimited && (txLookup == txIndexDisabled || number < min) {\n\t\t\t\tcontinue // Filter out unindexed transactions\n\t\t\t}\n\t\t\tstats[i].Status = core.TxStatusIncluded\n\t\t\tstats[i].Lookup = &rawdb.LegacyTxLookupEntry{\n\t\t\t\tBlockHash:  blockHashes[hash],\n\t\t\t\tBlockIndex: number,\n\t\t\t\tIndex:      intraIndex[hash],\n\t\t\t}\n\t\t}\n\t\tdata, _ := rlp.EncodeToBytes(stats)\n\t\treply := &reply{peer.app, TxStatusMsg, r.ReqID, data}\n\t\treply.send(testBufLimit)\n\t\treturn nil\n\t}\n\n\tvar testspecs = []struct {\n\t\tpeers     int\n\t\ttxLookups []uint64\n\t\ttxs       []common.Hash\n\t\tresults   []light.TxStatus\n\t}{\n\t\t// Retrieve mined transaction from the empty peerset\n\t\t{\n\t\t\tpeers:     0,\n\t\t\ttxLookups: []uint64{},\n\t\t\ttxs:       []common.Hash{testHash},\n\t\t\tresults:   []light.TxStatus{{}},\n\t\t},\n\t\t// Retrieve unknown transaction from the full peers\n\t\t{\n\t\t\tpeers:     3,\n\t\t\ttxLookups: []uint64{txIndexUnlimited, txIndexUnlimited, txIndexUnlimited},\n\t\t\ttxs:       []common.Hash{randomHash()},\n\t\t\tresults:   []light.TxStatus{{}},\n\t\t},\n\t\t// Retrieve mined transaction from the full peers\n\t\t{\n\t\t\tpeers:     3,\n\t\t\ttxLookups: []uint64{txIndexUnlimited, txIndexUnlimited, txIndexUnlimited},\n\t\t\ttxs:       []common.Hash{testHash},\n\t\t\tresults:   []light.TxStatus{testStatus},\n\t\t},\n\t\t// Retrieve mixed transactions from the full peers\n\t\t{\n\t\t\tpeers:     3,\n\t\t\ttxLookups: []uint64{txIndexUnlimited, txIndexUnlimited, txIndexUnlimited},\n\t\t\ttxs:       []common.Hash{randomHash(), testHash},\n\t\t\tresults:   []light.TxStatus{{}, testStatus},\n\t\t},\n\t\t// Retrieve mixed transactions from unindexed peer(but the target is still available)\n\t\t{\n\t\t\tpeers:     3,\n\t\t\ttxLookups: []uint64{uint64(blocks) - testStatus.Lookup.BlockIndex, uint64(blocks) - testStatus.Lookup.BlockIndex - 1, uint64(blocks) - testStatus.Lookup.BlockIndex - 2},\n\t\t\ttxs:       []common.Hash{randomHash(), testHash},\n\t\t\tresults:   []light.TxStatus{{}, testStatus},\n\t\t},\n\t\t// Retrieve mixed transactions from unindexed peer(but the target is not available)\n\t\t{\n\t\t\tpeers:     3,\n\t\t\ttxLookups: []uint64{uint64(blocks) - testStatus.Lookup.BlockIndex - 1, uint64(blocks) - testStatus.Lookup.BlockIndex - 1, uint64(blocks) - testStatus.Lookup.BlockIndex - 2},\n\t\t\ttxs:       []common.Hash{randomHash(), testHash},\n\t\t\tresults:   []light.TxStatus{{}, {}},\n\t\t},\n\t}\n\tfor _, testspec := range testspecs {\n\t\t// Create a bunch of server peers with different tx history\n\t\tvar (\n\t\t\tserverPeers []*testPeer\n\t\t\tcloseFns    []func()\n\t\t)\n\t\tfor i := 0; i < testspec.peers; i++ {\n\t\t\tpeer, closePeer, _ := client.newRawPeer(t, fmt.Sprintf(\"server-%d\", i), protocol, testspec.txLookups[i])\n\t\t\tserverPeers = append(serverPeers, peer)\n\t\t\tcloseFns = append(closeFns, closePeer)\n\n\t\t\t// Create a one-time routine for serving message\n\t\t\tgo func(i int, peer *testPeer) {\n\t\t\t\tserveMsg(peer, testspec.txLookups[i])\n\t\t\t}(i, peer)\n\t\t}\n\n\t\t// Send out the GetTxStatus requests, compare the result with\n\t\t// expected value.\n\t\tr := &light.TxStatusRequest{Hashes: testspec.txs}\n\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\t\tdefer cancel()\n\n\t\terr := client.handler.backend.odr.RetrieveTxStatus(ctx, r)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to retrieve tx status %v\", err)\n\t\t} else {\n\t\t\tif !reflect.DeepEqual(testspec.results, r.Status) {\n\t\t\t\tt.Errorf(\"Result mismatch, diff\")\n\t\t\t}\n\t\t}\n\n\t\t// Close all connected peers and start the next round\n\t\tfor _, closeFn := range closeFns {\n\t\t\tcloseFn()\n\t\t}\n\t}\n}\n\n// randomHash generates a random blob of data and returns it as a hash.\nfunc randomHash() common.Hash {\n\tvar hash common.Hash\n\tif n, err := rand.Read(hash[:]); n != common.HashLength || err != nil {\n\t\tpanic(err)\n\t}\n\treturn hash\n}\n"
  },
  {
    "path": "les/peer.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"math/rand\"\n\t\"net\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/forkid\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/les/flowcontrol\"\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n\tvfc \"github.com/ethereum/go-ethereum/les/vflux/client\"\n\tvfs \"github.com/ethereum/go-ethereum/les/vflux/server\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\nvar (\n\terrClosed            = errors.New(\"peer set is closed\")\n\terrAlreadyRegistered = errors.New(\"peer is already registered\")\n\terrNotRegistered     = errors.New(\"peer is not registered\")\n)\n\nconst (\n\tmaxRequestErrors  = 20 // number of invalid requests tolerated (makes the protocol less brittle but still avoids spam)\n\tmaxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam)\n\n\tallowedUpdateBytes = 100000                // initial/maximum allowed update size\n\tallowedUpdateRate  = time.Millisecond * 10 // time constant for recharging one byte of allowance\n\n\tfreezeTimeBase    = time.Millisecond * 700 // fixed component of client freeze time\n\tfreezeTimeRandom  = time.Millisecond * 600 // random component of client freeze time\n\tfreezeCheckPeriod = time.Millisecond * 100 // buffer value recheck period after initial freeze time has elapsed\n\n\t// If the total encoded size of a sent transaction batch is over txSizeCostLimit\n\t// per transaction then the request cost is calculated as proportional to the\n\t// encoded size instead of the transaction count\n\ttxSizeCostLimit = 0x4000\n\n\t// handshakeTimeout is the timeout LES handshake will be treated as failed.\n\thandshakeTimeout = 5 * time.Second\n)\n\nconst (\n\tannounceTypeNone = iota\n\tannounceTypeSimple\n\tannounceTypeSigned\n)\n\ntype keyValueEntry struct {\n\tKey   string\n\tValue rlp.RawValue\n}\n\ntype keyValueList []keyValueEntry\ntype keyValueMap map[string]rlp.RawValue\n\nfunc (l keyValueList) add(key string, val interface{}) keyValueList {\n\tvar entry keyValueEntry\n\tentry.Key = key\n\tif val == nil {\n\t\tval = uint64(0)\n\t}\n\tenc, err := rlp.EncodeToBytes(val)\n\tif err == nil {\n\t\tentry.Value = enc\n\t}\n\treturn append(l, entry)\n}\n\nfunc (l keyValueList) decode() (keyValueMap, uint64) {\n\tm := make(keyValueMap)\n\tvar size uint64\n\tfor _, entry := range l {\n\t\tm[entry.Key] = entry.Value\n\t\tsize += uint64(len(entry.Key)) + uint64(len(entry.Value)) + 8\n\t}\n\treturn m, size\n}\n\nfunc (m keyValueMap) get(key string, val interface{}) error {\n\tenc, ok := m[key]\n\tif !ok {\n\t\treturn errResp(ErrMissingKey, \"%s\", key)\n\t}\n\tif val == nil {\n\t\treturn nil\n\t}\n\treturn rlp.DecodeBytes(enc, val)\n}\n\n// peerCommons contains fields needed by both server peer and client peer.\ntype peerCommons struct {\n\t*p2p.Peer\n\trw p2p.MsgReadWriter\n\n\tid           string    // Peer identity.\n\tversion      int       // Protocol version negotiated.\n\tnetwork      uint64    // Network ID being on.\n\tfrozen       uint32    // Flag whether the peer is frozen.\n\tannounceType uint64    // New block announcement type.\n\tserving      uint32    // The status indicates the peer is served.\n\theadInfo     blockInfo // Last announced block information.\n\n\t// Background task queue for caching peer tasks and executing in order.\n\tsendQueue *utils.ExecQueue\n\n\t// Flow control agreement.\n\tfcParams flowcontrol.ServerParams // The config for token bucket.\n\tfcCosts  requestCostTable         // The Maximum request cost table.\n\n\tcloseCh chan struct{}\n\tlock    sync.RWMutex // Lock used to protect all thread-sensitive fields.\n}\n\n// isFrozen returns true if the client is frozen or the server has put our\n// client in frozen state\nfunc (p *peerCommons) isFrozen() bool {\n\treturn atomic.LoadUint32(&p.frozen) != 0\n}\n\n// canQueue returns an indicator whether the peer can queue an operation.\nfunc (p *peerCommons) canQueue() bool {\n\treturn p.sendQueue.CanQueue() && !p.isFrozen()\n}\n\n// queueSend caches a peer operation in the background task queue.\n// Please ensure to check `canQueue` before call this function\nfunc (p *peerCommons) queueSend(f func()) bool {\n\treturn p.sendQueue.Queue(f)\n}\n\n// String implements fmt.Stringer.\nfunc (p *peerCommons) String() string {\n\treturn fmt.Sprintf(\"Peer %s [%s]\", p.id, fmt.Sprintf(\"les/%d\", p.version))\n}\n\n// PeerInfo represents a short summary of the `eth` sub-protocol metadata known\n// about a connected peer.\ntype PeerInfo struct {\n\tVersion    int      `json:\"version\"`    // Ethereum protocol version negotiated\n\tDifficulty *big.Int `json:\"difficulty\"` // Total difficulty of the peer's blockchain\n\tHead       string   `json:\"head\"`       // SHA3 hash of the peer's best owned block\n}\n\n// Info gathers and returns a collection of metadata known about a peer.\nfunc (p *peerCommons) Info() *PeerInfo {\n\treturn &PeerInfo{\n\t\tVersion:    p.version,\n\t\tDifficulty: p.Td(),\n\t\tHead:       fmt.Sprintf(\"%x\", p.Head()),\n\t}\n}\n\n// Head retrieves a copy of the current head (most recent) hash of the peer.\nfunc (p *peerCommons) Head() (hash common.Hash) {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\treturn p.headInfo.Hash\n}\n\n// Td retrieves the current total difficulty of a peer.\nfunc (p *peerCommons) Td() *big.Int {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\treturn new(big.Int).Set(p.headInfo.Td)\n}\n\n// HeadAndTd retrieves the current head hash and total difficulty of a peer.\nfunc (p *peerCommons) HeadAndTd() (hash common.Hash, td *big.Int) {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\treturn p.headInfo.Hash, new(big.Int).Set(p.headInfo.Td)\n}\n\n// sendReceiveHandshake exchanges handshake packet with remote peer and returns any error\n// if failed to send or receive packet.\nfunc (p *peerCommons) sendReceiveHandshake(sendList keyValueList) (keyValueList, error) {\n\tvar (\n\t\terrc     = make(chan error, 2)\n\t\trecvList keyValueList\n\t)\n\t// Send out own handshake in a new thread\n\tgo func() {\n\t\terrc <- p2p.Send(p.rw, StatusMsg, sendList)\n\t}()\n\tgo func() {\n\t\t// In the mean time retrieve the remote status message\n\t\tmsg, err := p.rw.ReadMsg()\n\t\tif err != nil {\n\t\t\terrc <- err\n\t\t\treturn\n\t\t}\n\t\tif msg.Code != StatusMsg {\n\t\t\terrc <- errResp(ErrNoStatusMsg, \"first msg has code %x (!= %x)\", msg.Code, StatusMsg)\n\t\t\treturn\n\t\t}\n\t\tif msg.Size > ProtocolMaxMsgSize {\n\t\t\terrc <- errResp(ErrMsgTooLarge, \"%v > %v\", msg.Size, ProtocolMaxMsgSize)\n\t\t\treturn\n\t\t}\n\t\t// Decode the handshake\n\t\tif err := msg.Decode(&recvList); err != nil {\n\t\t\terrc <- errResp(ErrDecode, \"msg %v: %v\", msg, err)\n\t\t\treturn\n\t\t}\n\t\terrc <- nil\n\t}()\n\ttimeout := time.NewTimer(handshakeTimeout)\n\tdefer timeout.Stop()\n\tfor i := 0; i < 2; i++ {\n\t\tselect {\n\t\tcase err := <-errc:\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase <-timeout.C:\n\t\t\treturn nil, p2p.DiscReadTimeout\n\t\t}\n\t}\n\treturn recvList, nil\n}\n\n// handshake executes the les protocol handshake, negotiating version number,\n// network IDs, difficulties, head and genesis blocks. Besides the basic handshake\n// fields, server and client can exchange and resolve some specified fields through\n// two callback functions.\nfunc (p *peerCommons) handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter, sendCallback func(*keyValueList), recvCallback func(keyValueMap) error) error {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\n\tvar send keyValueList\n\n\t// Add some basic handshake fields\n\tsend = send.add(\"protocolVersion\", uint64(p.version))\n\tsend = send.add(\"networkId\", p.network)\n\t// Note: the head info announced at handshake is only used in case of server peers\n\t// but dummy values are still announced by clients for compatibility with older servers\n\tsend = send.add(\"headTd\", td)\n\tsend = send.add(\"headHash\", head)\n\tsend = send.add(\"headNum\", headNum)\n\tsend = send.add(\"genesisHash\", genesis)\n\n\t// If the protocol version is beyond les4, then pass the forkID\n\t// as well. Check http://eips.ethereum.org/EIPS/eip-2124 for more\n\t// spec detail.\n\tif p.version >= lpv4 {\n\t\tsend = send.add(\"forkID\", forkID)\n\t}\n\t// Add client-specified or server-specified fields\n\tif sendCallback != nil {\n\t\tsendCallback(&send)\n\t}\n\t// Exchange the handshake packet and resolve the received one.\n\trecvList, err := p.sendReceiveHandshake(send)\n\tif err != nil {\n\t\treturn err\n\t}\n\trecv, size := recvList.decode()\n\tif size > allowedUpdateBytes {\n\t\treturn errResp(ErrRequestRejected, \"\")\n\t}\n\tvar rGenesis common.Hash\n\tvar rVersion, rNetwork uint64\n\tif err := recv.get(\"protocolVersion\", &rVersion); err != nil {\n\t\treturn err\n\t}\n\tif err := recv.get(\"networkId\", &rNetwork); err != nil {\n\t\treturn err\n\t}\n\tif err := recv.get(\"genesisHash\", &rGenesis); err != nil {\n\t\treturn err\n\t}\n\tif rGenesis != genesis {\n\t\treturn errResp(ErrGenesisBlockMismatch, \"%x (!= %x)\", rGenesis[:8], genesis[:8])\n\t}\n\tif rNetwork != p.network {\n\t\treturn errResp(ErrNetworkIdMismatch, \"%d (!= %d)\", rNetwork, p.network)\n\t}\n\tif int(rVersion) != p.version {\n\t\treturn errResp(ErrProtocolVersionMismatch, \"%d (!= %d)\", rVersion, p.version)\n\t}\n\t// Check forkID if the protocol version is beyond the les4\n\tif p.version >= lpv4 {\n\t\tvar forkID forkid.ID\n\t\tif err := recv.get(\"forkID\", &forkID); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := forkFilter(forkID); err != nil {\n\t\t\treturn errResp(ErrForkIDRejected, \"%v\", err)\n\t\t}\n\t}\n\tif recvCallback != nil {\n\t\treturn recvCallback(recv)\n\t}\n\treturn nil\n}\n\n// close closes the channel and notifies all background routines to exit.\nfunc (p *peerCommons) close() {\n\tclose(p.closeCh)\n\tp.sendQueue.Quit()\n}\n\n// serverPeer represents each node to which the client is connected.\n// The node here refers to the les server.\ntype serverPeer struct {\n\tpeerCommons\n\n\t// Status fields\n\ttrusted                 bool   // The flag whether the server is selected as trusted server.\n\tonlyAnnounce            bool   // The flag whether the server sends announcement only.\n\tchainSince, chainRecent uint64 // The range of chain server peer can serve.\n\tstateSince, stateRecent uint64 // The range of state server peer can serve.\n\ttxHistory               uint64 // The length of available tx history, 0 means all, 1 means disabled\n\n\t// Advertised checkpoint fields\n\tcheckpointNumber uint64                   // The block height which the checkpoint is registered.\n\tcheckpoint       params.TrustedCheckpoint // The advertised checkpoint sent by server.\n\n\tfcServer         *flowcontrol.ServerNode // Client side mirror token bucket.\n\tvtLock           sync.Mutex\n\tnodeValueTracker *vfc.NodeValueTracker\n\tsentReqs         map[uint64]sentReqEntry\n\n\t// Statistics\n\terrCount    utils.LinearExpiredValue // Counter the invalid responses server has replied\n\tupdateCount uint64\n\tupdateTime  mclock.AbsTime\n\n\t// Test callback hooks\n\thasBlockHook func(common.Hash, uint64, bool) bool // Used to determine whether the server has the specified block.\n}\n\nfunc newServerPeer(version int, network uint64, trusted bool, p *p2p.Peer, rw p2p.MsgReadWriter) *serverPeer {\n\treturn &serverPeer{\n\t\tpeerCommons: peerCommons{\n\t\t\tPeer:      p,\n\t\t\trw:        rw,\n\t\t\tid:        p.ID().String(),\n\t\t\tversion:   version,\n\t\t\tnetwork:   network,\n\t\t\tsendQueue: utils.NewExecQueue(100),\n\t\t\tcloseCh:   make(chan struct{}),\n\t\t},\n\t\ttrusted:  trusted,\n\t\terrCount: utils.LinearExpiredValue{Rate: mclock.AbsTime(time.Hour)},\n\t}\n}\n\n// rejectUpdate returns true if a parameter update has to be rejected because\n// the size and/or rate of updates exceed the capacity limitation\nfunc (p *serverPeer) rejectUpdate(size uint64) bool {\n\tnow := mclock.Now()\n\tif p.updateCount == 0 {\n\t\tp.updateTime = now\n\t} else {\n\t\tdt := now - p.updateTime\n\t\tp.updateTime = now\n\n\t\tr := uint64(dt / mclock.AbsTime(allowedUpdateRate))\n\t\tif p.updateCount > r {\n\t\t\tp.updateCount -= r\n\t\t} else {\n\t\t\tp.updateCount = 0\n\t\t}\n\t}\n\tp.updateCount += size\n\treturn p.updateCount > allowedUpdateBytes\n}\n\n// freeze processes Stop messages from the given server and set the status as\n// frozen.\nfunc (p *serverPeer) freeze() {\n\tif atomic.CompareAndSwapUint32(&p.frozen, 0, 1) {\n\t\tp.sendQueue.Clear()\n\t}\n}\n\n// unfreeze processes Resume messages from the given server and set the status\n// as unfrozen.\nfunc (p *serverPeer) unfreeze() {\n\tatomic.StoreUint32(&p.frozen, 0)\n}\n\n// sendRequest send a request to the server based on the given message type\n// and content.\nfunc sendRequest(w p2p.MsgWriter, msgcode, reqID uint64, data interface{}) error {\n\ttype req struct {\n\t\tReqID uint64\n\t\tData  interface{}\n\t}\n\treturn p2p.Send(w, msgcode, req{reqID, data})\n}\n\nfunc (p *serverPeer) sendRequest(msgcode, reqID uint64, data interface{}, amount int) error {\n\tp.sentRequest(reqID, uint32(msgcode), uint32(amount))\n\treturn sendRequest(p.rw, msgcode, reqID, data)\n}\n\n// requestHeadersByHash fetches a batch of blocks' headers corresponding to the\n// specified header query, based on the hash of an origin block.\nfunc (p *serverPeer) requestHeadersByHash(reqID uint64, origin common.Hash, amount int, skip int, reverse bool) error {\n\tp.Log().Debug(\"Fetching batch of headers\", \"count\", amount, \"fromhash\", origin, \"skip\", skip, \"reverse\", reverse)\n\treturn p.sendRequest(GetBlockHeadersMsg, reqID, &GetBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}, amount)\n}\n\n// requestHeadersByNumber fetches a batch of blocks' headers corresponding to the\n// specified header query, based on the number of an origin block.\nfunc (p *serverPeer) requestHeadersByNumber(reqID, origin uint64, amount int, skip int, reverse bool) error {\n\tp.Log().Debug(\"Fetching batch of headers\", \"count\", amount, \"fromnum\", origin, \"skip\", skip, \"reverse\", reverse)\n\treturn p.sendRequest(GetBlockHeadersMsg, reqID, &GetBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}, amount)\n}\n\n// requestBodies fetches a batch of blocks' bodies corresponding to the hashes\n// specified.\nfunc (p *serverPeer) requestBodies(reqID uint64, hashes []common.Hash) error {\n\tp.Log().Debug(\"Fetching batch of block bodies\", \"count\", len(hashes))\n\treturn p.sendRequest(GetBlockBodiesMsg, reqID, hashes, len(hashes))\n}\n\n// requestCode fetches a batch of arbitrary data from a node's known state\n// data, corresponding to the specified hashes.\nfunc (p *serverPeer) requestCode(reqID uint64, reqs []CodeReq) error {\n\tp.Log().Debug(\"Fetching batch of codes\", \"count\", len(reqs))\n\treturn p.sendRequest(GetCodeMsg, reqID, reqs, len(reqs))\n}\n\n// requestReceipts fetches a batch of transaction receipts from a remote node.\nfunc (p *serverPeer) requestReceipts(reqID uint64, hashes []common.Hash) error {\n\tp.Log().Debug(\"Fetching batch of receipts\", \"count\", len(hashes))\n\treturn p.sendRequest(GetReceiptsMsg, reqID, hashes, len(hashes))\n}\n\n// requestProofs fetches a batch of merkle proofs from a remote node.\nfunc (p *serverPeer) requestProofs(reqID uint64, reqs []ProofReq) error {\n\tp.Log().Debug(\"Fetching batch of proofs\", \"count\", len(reqs))\n\treturn p.sendRequest(GetProofsV2Msg, reqID, reqs, len(reqs))\n}\n\n// requestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node.\nfunc (p *serverPeer) requestHelperTrieProofs(reqID uint64, reqs []HelperTrieReq) error {\n\tp.Log().Debug(\"Fetching batch of HelperTrie proofs\", \"count\", len(reqs))\n\treturn p.sendRequest(GetHelperTrieProofsMsg, reqID, reqs, len(reqs))\n}\n\n// requestTxStatus fetches a batch of transaction status records from a remote node.\nfunc (p *serverPeer) requestTxStatus(reqID uint64, txHashes []common.Hash) error {\n\tp.Log().Debug(\"Requesting transaction status\", \"count\", len(txHashes))\n\treturn p.sendRequest(GetTxStatusMsg, reqID, txHashes, len(txHashes))\n}\n\n// sendTxs creates a reply with a batch of transactions to be added to the remote transaction pool.\nfunc (p *serverPeer) sendTxs(reqID uint64, amount int, txs rlp.RawValue) error {\n\tp.Log().Debug(\"Sending batch of transactions\", \"amount\", amount, \"size\", len(txs))\n\tsizeFactor := (len(txs) + txSizeCostLimit/2) / txSizeCostLimit\n\tif sizeFactor > amount {\n\t\tamount = sizeFactor\n\t}\n\treturn p.sendRequest(SendTxV2Msg, reqID, txs, amount)\n}\n\n// waitBefore implements distPeer interface\nfunc (p *serverPeer) waitBefore(maxCost uint64) (time.Duration, float64) {\n\treturn p.fcServer.CanSend(maxCost)\n}\n\n// getRequestCost returns an estimated request cost according to the flow control\n// rules negotiated between the server and the client.\nfunc (p *serverPeer) getRequestCost(msgcode uint64, amount int) uint64 {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\tcosts := p.fcCosts[msgcode]\n\tif costs == nil {\n\t\treturn 0\n\t}\n\tcost := costs.baseCost + costs.reqCost*uint64(amount)\n\tif cost > p.fcParams.BufLimit {\n\t\tcost = p.fcParams.BufLimit\n\t}\n\treturn cost\n}\n\n// getTxRelayCost returns an estimated relay cost according to the flow control\n// rules negotiated between the server and the client.\nfunc (p *serverPeer) getTxRelayCost(amount, size int) uint64 {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\tcosts := p.fcCosts[SendTxV2Msg]\n\tif costs == nil {\n\t\treturn 0\n\t}\n\tcost := costs.baseCost + costs.reqCost*uint64(amount)\n\tsizeCost := costs.baseCost + costs.reqCost*uint64(size)/txSizeCostLimit\n\tif sizeCost > cost {\n\t\tcost = sizeCost\n\t}\n\tif cost > p.fcParams.BufLimit {\n\t\tcost = p.fcParams.BufLimit\n\t}\n\treturn cost\n}\n\n// HasBlock checks if the peer has a given block\nfunc (p *serverPeer) HasBlock(hash common.Hash, number uint64, hasState bool) bool {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\tif p.hasBlockHook != nil {\n\t\treturn p.hasBlockHook(hash, number, hasState)\n\t}\n\thead := p.headInfo.Number\n\tvar since, recent uint64\n\tif hasState {\n\t\tsince = p.stateSince\n\t\trecent = p.stateRecent\n\t} else {\n\t\tsince = p.chainSince\n\t\trecent = p.chainRecent\n\t}\n\treturn head >= number && number >= since && (recent == 0 || number+recent+4 > head)\n}\n\n// updateFlowControl updates the flow control parameters belonging to the server\n// node if the announced key/value set contains relevant fields\nfunc (p *serverPeer) updateFlowControl(update keyValueMap) {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\n\t// If any of the flow control params is nil, refuse to update.\n\tvar params flowcontrol.ServerParams\n\tif update.get(\"flowControl/BL\", &params.BufLimit) == nil && update.get(\"flowControl/MRR\", &params.MinRecharge) == nil {\n\t\t// todo can light client set a minimal acceptable flow control params?\n\t\tp.fcParams = params\n\t\tp.fcServer.UpdateParams(params)\n\t}\n\tvar MRC RequestCostList\n\tif update.get(\"flowControl/MRC\", &MRC) == nil {\n\t\tcostUpdate := MRC.decode(ProtocolLengths[uint(p.version)])\n\t\tfor code, cost := range costUpdate {\n\t\t\tp.fcCosts[code] = cost\n\t\t}\n\t}\n}\n\n// updateHead updates the head information based on the announcement from\n// the peer.\nfunc (p *serverPeer) updateHead(hash common.Hash, number uint64, td *big.Int) {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\n\tp.headInfo = blockInfo{Hash: hash, Number: number, Td: td}\n}\n\n// Handshake executes the les protocol handshake, negotiating version number,\n// network IDs and genesis blocks.\nfunc (p *serverPeer) Handshake(genesis common.Hash, forkid forkid.ID, forkFilter forkid.Filter) error {\n\t// Note: there is no need to share local head with a server but older servers still\n\t// require these fields so we announce zero values.\n\treturn p.handshake(common.Big0, common.Hash{}, 0, genesis, forkid, forkFilter, func(lists *keyValueList) {\n\t\t// Add some client-specific handshake fields\n\t\t//\n\t\t// Enable signed announcement randomly even the server is not trusted.\n\t\tp.announceType = announceTypeSimple\n\t\tif p.trusted {\n\t\t\tp.announceType = announceTypeSigned\n\t\t}\n\t\t*lists = (*lists).add(\"announceType\", p.announceType)\n\t}, func(recv keyValueMap) error {\n\t\tvar (\n\t\t\trHash common.Hash\n\t\t\trNum  uint64\n\t\t\trTd   *big.Int\n\t\t)\n\t\tif err := recv.get(\"headTd\", &rTd); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := recv.get(\"headHash\", &rHash); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := recv.get(\"headNum\", &rNum); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.headInfo = blockInfo{Hash: rHash, Number: rNum, Td: rTd}\n\t\tif recv.get(\"serveChainSince\", &p.chainSince) != nil {\n\t\t\tp.onlyAnnounce = true\n\t\t}\n\t\tif recv.get(\"serveRecentChain\", &p.chainRecent) != nil {\n\t\t\tp.chainRecent = 0\n\t\t}\n\t\tif recv.get(\"serveStateSince\", &p.stateSince) != nil {\n\t\t\tp.onlyAnnounce = true\n\t\t}\n\t\tif recv.get(\"serveRecentState\", &p.stateRecent) != nil {\n\t\t\tp.stateRecent = 0\n\t\t}\n\t\tif recv.get(\"txRelay\", nil) != nil {\n\t\t\tp.onlyAnnounce = true\n\t\t}\n\t\tif p.version >= lpv4 {\n\t\t\tvar recentTx uint\n\t\t\tif err := recv.get(\"recentTxLookup\", &recentTx); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tp.txHistory = uint64(recentTx)\n\t\t} else {\n\t\t\t// The weak assumption is held here that legacy les server(les2,3)\n\t\t\t// has unlimited transaction history. The les serving in these legacy\n\t\t\t// versions is disabled if the transaction is unindexed.\n\t\t\tp.txHistory = txIndexUnlimited\n\t\t}\n\t\tif p.onlyAnnounce && !p.trusted {\n\t\t\treturn errResp(ErrUselessPeer, \"peer cannot serve requests\")\n\t\t}\n\t\t// Parse flow control handshake packet.\n\t\tvar sParams flowcontrol.ServerParams\n\t\tif err := recv.get(\"flowControl/BL\", &sParams.BufLimit); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := recv.get(\"flowControl/MRR\", &sParams.MinRecharge); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar MRC RequestCostList\n\t\tif err := recv.get(\"flowControl/MRC\", &MRC); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.fcParams = sParams\n\t\tp.fcServer = flowcontrol.NewServerNode(sParams, &mclock.System{})\n\t\tp.fcCosts = MRC.decode(ProtocolLengths[uint(p.version)])\n\n\t\trecv.get(\"checkpoint/value\", &p.checkpoint)\n\t\trecv.get(\"checkpoint/registerHeight\", &p.checkpointNumber)\n\n\t\tif !p.onlyAnnounce {\n\t\t\tfor msgCode := range reqAvgTimeCost {\n\t\t\t\tif p.fcCosts[msgCode] == nil {\n\t\t\t\t\treturn errResp(ErrUselessPeer, \"peer does not support message %d\", msgCode)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}\n\n// setValueTracker sets the value tracker references for connected servers. Note that the\n// references should be removed upon disconnection by setValueTracker(nil, nil).\nfunc (p *serverPeer) setValueTracker(nvt *vfc.NodeValueTracker) {\n\tp.vtLock.Lock()\n\tp.nodeValueTracker = nvt\n\tif nvt != nil {\n\t\tp.sentReqs = make(map[uint64]sentReqEntry)\n\t} else {\n\t\tp.sentReqs = nil\n\t}\n\tp.vtLock.Unlock()\n}\n\n// updateVtParams updates the server's price table in the value tracker.\nfunc (p *serverPeer) updateVtParams() {\n\tp.vtLock.Lock()\n\tdefer p.vtLock.Unlock()\n\n\tif p.nodeValueTracker == nil {\n\t\treturn\n\t}\n\treqCosts := make([]uint64, len(requestList))\n\tfor code, costs := range p.fcCosts {\n\t\tif m, ok := requestMapping[uint32(code)]; ok {\n\t\t\treqCosts[m.first] = costs.baseCost + costs.reqCost\n\t\t\tif m.rest != -1 {\n\t\t\t\treqCosts[m.rest] = costs.reqCost\n\t\t\t}\n\t\t}\n\t}\n\tp.nodeValueTracker.UpdateCosts(reqCosts)\n}\n\n// sentReqEntry remembers sent requests and their sending times\ntype sentReqEntry struct {\n\treqType, amount uint32\n\tat              mclock.AbsTime\n}\n\n// sentRequest marks a request sent at the current moment to this server.\nfunc (p *serverPeer) sentRequest(id uint64, reqType, amount uint32) {\n\tp.vtLock.Lock()\n\tif p.sentReqs != nil {\n\t\tp.sentReqs[id] = sentReqEntry{reqType, amount, mclock.Now()}\n\t}\n\tp.vtLock.Unlock()\n}\n\n// answeredRequest marks a request answered at the current moment by this server.\nfunc (p *serverPeer) answeredRequest(id uint64) {\n\tp.vtLock.Lock()\n\tif p.sentReqs == nil {\n\t\tp.vtLock.Unlock()\n\t\treturn\n\t}\n\te, ok := p.sentReqs[id]\n\tdelete(p.sentReqs, id)\n\tnvt := p.nodeValueTracker\n\tp.vtLock.Unlock()\n\tif !ok {\n\t\treturn\n\t}\n\tvar (\n\t\tvtReqs   [2]vfc.ServedRequest\n\t\treqCount int\n\t)\n\tm := requestMapping[e.reqType]\n\tif m.rest == -1 || e.amount <= 1 {\n\t\treqCount = 1\n\t\tvtReqs[0] = vfc.ServedRequest{ReqType: uint32(m.first), Amount: e.amount}\n\t} else {\n\t\treqCount = 2\n\t\tvtReqs[0] = vfc.ServedRequest{ReqType: uint32(m.first), Amount: 1}\n\t\tvtReqs[1] = vfc.ServedRequest{ReqType: uint32(m.rest), Amount: e.amount - 1}\n\t}\n\tdt := time.Duration(mclock.Now() - e.at)\n\tnvt.Served(vtReqs[:reqCount], dt)\n}\n\n// clientPeer represents each node to which the les server is connected.\n// The node here refers to the light client.\ntype clientPeer struct {\n\tpeerCommons\n\n\t// responseLock ensures that responses are queued in the same order as\n\t// RequestProcessed is called\n\tresponseLock  sync.Mutex\n\tresponseCount uint64 // Counter to generate an unique id for request processing.\n\n\tbalance *vfs.NodeBalance\n\n\t// invalidLock is used for protecting invalidCount.\n\tinvalidLock  sync.RWMutex\n\tinvalidCount utils.LinearExpiredValue // Counter the invalid request the client peer has made.\n\n\tserver   bool\n\terrCh    chan error\n\tfcClient *flowcontrol.ClientNode // Server side mirror token bucket.\n}\n\nfunc newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *clientPeer {\n\treturn &clientPeer{\n\t\tpeerCommons: peerCommons{\n\t\t\tPeer:      p,\n\t\t\trw:        rw,\n\t\t\tid:        p.ID().String(),\n\t\t\tversion:   version,\n\t\t\tnetwork:   network,\n\t\t\tsendQueue: utils.NewExecQueue(100),\n\t\t\tcloseCh:   make(chan struct{}),\n\t\t},\n\t\tinvalidCount: utils.LinearExpiredValue{Rate: mclock.AbsTime(time.Hour)},\n\t\terrCh:        make(chan error, 1),\n\t}\n}\n\n// freeClientId returns a string identifier for the peer. Multiple peers with\n// the same identifier can not be connected in free mode simultaneously.\nfunc (p *clientPeer) freeClientId() string {\n\tif addr, ok := p.RemoteAddr().(*net.TCPAddr); ok {\n\t\tif addr.IP.IsLoopback() {\n\t\t\t// using peer id instead of loopback ip address allows multiple free\n\t\t\t// connections from local machine to own server\n\t\t\treturn p.id\n\t\t} else {\n\t\t\treturn addr.IP.String()\n\t\t}\n\t}\n\treturn p.id\n}\n\n// sendStop notifies the client about being in frozen state\nfunc (p *clientPeer) sendStop() error {\n\treturn p2p.Send(p.rw, StopMsg, struct{}{})\n}\n\n// sendResume notifies the client about getting out of frozen state\nfunc (p *clientPeer) sendResume(bv uint64) error {\n\treturn p2p.Send(p.rw, ResumeMsg, bv)\n}\n\n// freeze temporarily puts the client in a frozen state which means all unprocessed\n// and subsequent requests are dropped. Unfreezing happens automatically after a short\n// time if the client's buffer value is at least in the slightly positive region.\n// The client is also notified about being frozen/unfrozen with a Stop/Resume message.\nfunc (p *clientPeer) freeze() {\n\tif p.version < lpv3 {\n\t\t// if Stop/Resume is not supported then just drop the peer after setting\n\t\t// its frozen status permanently\n\t\tatomic.StoreUint32(&p.frozen, 1)\n\t\tp.Peer.Disconnect(p2p.DiscUselessPeer)\n\t\treturn\n\t}\n\tif atomic.SwapUint32(&p.frozen, 1) == 0 {\n\t\tgo func() {\n\t\t\tp.sendStop()\n\t\t\ttime.Sleep(freezeTimeBase + time.Duration(rand.Int63n(int64(freezeTimeRandom))))\n\t\t\tfor {\n\t\t\t\tbufValue, bufLimit := p.fcClient.BufferStatus()\n\t\t\t\tif bufLimit == 0 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif bufValue <= bufLimit/8 {\n\t\t\t\t\ttime.Sleep(freezeCheckPeriod)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tatomic.StoreUint32(&p.frozen, 0)\n\t\t\t\tp.sendResume(bufValue)\n\t\t\t\treturn\n\t\t\t}\n\t\t}()\n\t}\n}\n\n// reply struct represents a reply with the actual data already RLP encoded and\n// only the bv (buffer value) missing. This allows the serving mechanism to\n// calculate the bv value which depends on the data size before sending the reply.\ntype reply struct {\n\tw              p2p.MsgWriter\n\tmsgcode, reqID uint64\n\tdata           rlp.RawValue\n}\n\n// send sends the reply with the calculated buffer value\nfunc (r *reply) send(bv uint64) error {\n\ttype resp struct {\n\t\tReqID, BV uint64\n\t\tData      rlp.RawValue\n\t}\n\treturn p2p.Send(r.w, r.msgcode, resp{r.reqID, bv, r.data})\n}\n\n// size returns the RLP encoded size of the message data\nfunc (r *reply) size() uint32 {\n\treturn uint32(len(r.data))\n}\n\n// replyBlockHeaders creates a reply with a batch of block headers\nfunc (p *clientPeer) replyBlockHeaders(reqID uint64, headers []*types.Header) *reply {\n\tdata, _ := rlp.EncodeToBytes(headers)\n\treturn &reply{p.rw, BlockHeadersMsg, reqID, data}\n}\n\n// replyBlockBodiesRLP creates a reply with a batch of block contents from\n// an already RLP encoded format.\nfunc (p *clientPeer) replyBlockBodiesRLP(reqID uint64, bodies []rlp.RawValue) *reply {\n\tdata, _ := rlp.EncodeToBytes(bodies)\n\treturn &reply{p.rw, BlockBodiesMsg, reqID, data}\n}\n\n// replyCode creates a reply with a batch of arbitrary internal data, corresponding to the\n// hashes requested.\nfunc (p *clientPeer) replyCode(reqID uint64, codes [][]byte) *reply {\n\tdata, _ := rlp.EncodeToBytes(codes)\n\treturn &reply{p.rw, CodeMsg, reqID, data}\n}\n\n// replyReceiptsRLP creates a reply with a batch of transaction receipts, corresponding to the\n// ones requested from an already RLP encoded format.\nfunc (p *clientPeer) replyReceiptsRLP(reqID uint64, receipts []rlp.RawValue) *reply {\n\tdata, _ := rlp.EncodeToBytes(receipts)\n\treturn &reply{p.rw, ReceiptsMsg, reqID, data}\n}\n\n// replyProofsV2 creates a reply with a batch of merkle proofs, corresponding to the ones requested.\nfunc (p *clientPeer) replyProofsV2(reqID uint64, proofs light.NodeList) *reply {\n\tdata, _ := rlp.EncodeToBytes(proofs)\n\treturn &reply{p.rw, ProofsV2Msg, reqID, data}\n}\n\n// replyHelperTrieProofs creates a reply with a batch of HelperTrie proofs, corresponding to the ones requested.\nfunc (p *clientPeer) replyHelperTrieProofs(reqID uint64, resp HelperTrieResps) *reply {\n\tdata, _ := rlp.EncodeToBytes(resp)\n\treturn &reply{p.rw, HelperTrieProofsMsg, reqID, data}\n}\n\n// replyTxStatus creates a reply with a batch of transaction status records, corresponding to the ones requested.\nfunc (p *clientPeer) replyTxStatus(reqID uint64, stats []light.TxStatus) *reply {\n\tdata, _ := rlp.EncodeToBytes(stats)\n\treturn &reply{p.rw, TxStatusMsg, reqID, data}\n}\n\n// sendAnnounce announces the availability of a number of blocks through\n// a hash notification.\nfunc (p *clientPeer) sendAnnounce(request announceData) error {\n\treturn p2p.Send(p.rw, AnnounceMsg, request)\n}\n\n// allowInactive implements clientPoolPeer\nfunc (p *clientPeer) allowInactive() bool {\n\treturn false\n}\n\n// updateCapacity updates the request serving capacity assigned to a given client\n// and also sends an announcement about the updated flow control parameters\nfunc (p *clientPeer) updateCapacity(cap uint64) {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\n\tif cap != p.fcParams.MinRecharge {\n\t\tp.fcParams = flowcontrol.ServerParams{MinRecharge: cap, BufLimit: cap * bufLimitRatio}\n\t\tp.fcClient.UpdateParams(p.fcParams)\n\t\tvar kvList keyValueList\n\t\tkvList = kvList.add(\"flowControl/MRR\", cap)\n\t\tkvList = kvList.add(\"flowControl/BL\", cap*bufLimitRatio)\n\t\tp.queueSend(func() { p.sendAnnounce(announceData{Update: kvList}) })\n\t}\n}\n\n// freezeClient temporarily puts the client in a frozen state which means all\n// unprocessed and subsequent requests are dropped. Unfreezing happens automatically\n// after a short time if the client's buffer value is at least in the slightly positive\n// region. The client is also notified about being frozen/unfrozen with a Stop/Resume\n// message.\nfunc (p *clientPeer) freezeClient() {\n\tif p.version < lpv3 {\n\t\t// if Stop/Resume is not supported then just drop the peer after setting\n\t\t// its frozen status permanently\n\t\tatomic.StoreUint32(&p.frozen, 1)\n\t\tp.Peer.Disconnect(p2p.DiscUselessPeer)\n\t\treturn\n\t}\n\tif atomic.SwapUint32(&p.frozen, 1) == 0 {\n\t\tgo func() {\n\t\t\tp.sendStop()\n\t\t\ttime.Sleep(freezeTimeBase + time.Duration(rand.Int63n(int64(freezeTimeRandom))))\n\t\t\tfor {\n\t\t\t\tbufValue, bufLimit := p.fcClient.BufferStatus()\n\t\t\t\tif bufLimit == 0 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif bufValue <= bufLimit/8 {\n\t\t\t\t\ttime.Sleep(freezeCheckPeriod)\n\t\t\t\t} else {\n\t\t\t\t\tatomic.StoreUint32(&p.frozen, 0)\n\t\t\t\t\tp.sendResume(bufValue)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n\n// Handshake executes the les protocol handshake, negotiating version number,\n// network IDs, difficulties, head and genesis blocks.\nfunc (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter, server *LesServer) error {\n\trecentTx := server.handler.blockchain.TxLookupLimit()\n\tif recentTx != txIndexUnlimited {\n\t\tif recentTx < blockSafetyMargin {\n\t\t\trecentTx = txIndexDisabled\n\t\t} else {\n\t\t\trecentTx -= blockSafetyMargin - txIndexRecentOffset\n\t\t}\n\t}\n\tif server.config.UltraLightOnlyAnnounce {\n\t\trecentTx = txIndexDisabled\n\t}\n\tif recentTx != txIndexUnlimited && p.version < lpv4 {\n\t\treturn errors.New(\"Cannot serve old clients without a complete tx index\")\n\t}\n\t// Note: clientPeer.headInfo should contain the last head announced to the client by us.\n\t// The values announced in the handshake are dummy values for compatibility reasons and should be ignored.\n\tp.headInfo = blockInfo{Hash: head, Number: headNum, Td: td}\n\treturn p.handshake(td, head, headNum, genesis, forkID, forkFilter, func(lists *keyValueList) {\n\t\t// Add some information which services server can offer.\n\t\tif !server.config.UltraLightOnlyAnnounce {\n\t\t\t*lists = (*lists).add(\"serveHeaders\", nil)\n\t\t\t*lists = (*lists).add(\"serveChainSince\", uint64(0))\n\t\t\t*lists = (*lists).add(\"serveStateSince\", uint64(0))\n\n\t\t\t// If local ethereum node is running in archive mode, advertise ourselves we have\n\t\t\t// all version state data. Otherwise only recent state is available.\n\t\t\tstateRecent := uint64(core.TriesInMemory - blockSafetyMargin)\n\t\t\tif server.archiveMode {\n\t\t\t\tstateRecent = 0\n\t\t\t}\n\t\t\t*lists = (*lists).add(\"serveRecentState\", stateRecent)\n\t\t\t*lists = (*lists).add(\"txRelay\", nil)\n\t\t}\n\t\tif p.version >= lpv4 {\n\t\t\t*lists = (*lists).add(\"recentTxLookup\", recentTx)\n\t\t}\n\t\t*lists = (*lists).add(\"flowControl/BL\", server.defParams.BufLimit)\n\t\t*lists = (*lists).add(\"flowControl/MRR\", server.defParams.MinRecharge)\n\n\t\tvar costList RequestCostList\n\t\tif server.costTracker.testCostList != nil {\n\t\t\tcostList = server.costTracker.testCostList\n\t\t} else {\n\t\t\tcostList = server.costTracker.makeCostList(server.costTracker.globalFactor())\n\t\t}\n\t\t*lists = (*lists).add(\"flowControl/MRC\", costList)\n\t\tp.fcCosts = costList.decode(ProtocolLengths[uint(p.version)])\n\t\tp.fcParams = server.defParams\n\n\t\t// Add advertised checkpoint and register block height which\n\t\t// client can verify the checkpoint validity.\n\t\tif server.oracle != nil && server.oracle.IsRunning() {\n\t\t\tcp, height := server.oracle.StableCheckpoint()\n\t\t\tif cp != nil {\n\t\t\t\t*lists = (*lists).add(\"checkpoint/value\", cp)\n\t\t\t\t*lists = (*lists).add(\"checkpoint/registerHeight\", height)\n\t\t\t}\n\t\t}\n\t}, func(recv keyValueMap) error {\n\t\tp.server = recv.get(\"flowControl/MRR\", nil) == nil\n\t\tif p.server {\n\t\t\tp.announceType = announceTypeNone // connected to another server, send no messages\n\t\t} else {\n\t\t\tif recv.get(\"announceType\", &p.announceType) != nil {\n\t\t\t\t// set default announceType on server side\n\t\t\t\tp.announceType = announceTypeSimple\n\t\t\t}\n\t\t\tp.fcClient = flowcontrol.NewClientNode(server.fcManager, p.fcParams)\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (p *clientPeer) bumpInvalid() {\n\tp.invalidLock.Lock()\n\tp.invalidCount.Add(1, mclock.Now())\n\tp.invalidLock.Unlock()\n}\n\nfunc (p *clientPeer) getInvalid() uint64 {\n\tp.invalidLock.RLock()\n\tdefer p.invalidLock.RUnlock()\n\treturn p.invalidCount.Value(mclock.Now())\n}\n\n// serverPeerSubscriber is an interface to notify services about added or\n// removed server peers\ntype serverPeerSubscriber interface {\n\tregisterPeer(*serverPeer)\n\tunregisterPeer(*serverPeer)\n}\n\n// serverPeerSet represents the set of active server peers currently\n// participating in the Light Ethereum sub-protocol.\ntype serverPeerSet struct {\n\tpeers map[string]*serverPeer\n\t// subscribers is a batch of subscribers and peerset will notify\n\t// these subscribers when the peerset changes(new server peer is\n\t// added or removed)\n\tsubscribers []serverPeerSubscriber\n\tclosed      bool\n\tlock        sync.RWMutex\n}\n\n// newServerPeerSet creates a new peer set to track the active server peers.\nfunc newServerPeerSet() *serverPeerSet {\n\treturn &serverPeerSet{peers: make(map[string]*serverPeer)}\n}\n\n// subscribe adds a service to be notified about added or removed\n// peers and also register all active peers into the given service.\nfunc (ps *serverPeerSet) subscribe(sub serverPeerSubscriber) {\n\tps.lock.Lock()\n\tdefer ps.lock.Unlock()\n\n\tps.subscribers = append(ps.subscribers, sub)\n\tfor _, p := range ps.peers {\n\t\tsub.registerPeer(p)\n\t}\n}\n\n// unSubscribe removes the specified service from the subscriber pool.\nfunc (ps *serverPeerSet) unSubscribe(sub serverPeerSubscriber) {\n\tps.lock.Lock()\n\tdefer ps.lock.Unlock()\n\n\tfor i, s := range ps.subscribers {\n\t\tif s == sub {\n\t\t\tps.subscribers = append(ps.subscribers[:i], ps.subscribers[i+1:]...)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// register adds a new server peer into the set, or returns an error if the\n// peer is already known.\nfunc (ps *serverPeerSet) register(peer *serverPeer) error {\n\tps.lock.Lock()\n\tdefer ps.lock.Unlock()\n\n\tif ps.closed {\n\t\treturn errClosed\n\t}\n\tif _, exist := ps.peers[peer.id]; exist {\n\t\treturn errAlreadyRegistered\n\t}\n\tps.peers[peer.id] = peer\n\tfor _, sub := range ps.subscribers {\n\t\tsub.registerPeer(peer)\n\t}\n\treturn nil\n}\n\n// unregister removes a remote peer from the active set, disabling any further\n// actions to/from that particular entity. It also initiates disconnection at\n// the networking layer.\nfunc (ps *serverPeerSet) unregister(id string) error {\n\tps.lock.Lock()\n\tdefer ps.lock.Unlock()\n\n\tp, ok := ps.peers[id]\n\tif !ok {\n\t\treturn errNotRegistered\n\t}\n\tdelete(ps.peers, id)\n\tfor _, sub := range ps.subscribers {\n\t\tsub.unregisterPeer(p)\n\t}\n\tp.Peer.Disconnect(p2p.DiscRequested)\n\treturn nil\n}\n\n// ids returns a list of all registered peer IDs\nfunc (ps *serverPeerSet) ids() []string {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\tvar ids []string\n\tfor id := range ps.peers {\n\t\tids = append(ids, id)\n\t}\n\treturn ids\n}\n\n// peer retrieves the registered peer with the given id.\nfunc (ps *serverPeerSet) peer(id string) *serverPeer {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\treturn ps.peers[id]\n}\n\n// len returns if the current number of peers in the set.\nfunc (ps *serverPeerSet) len() int {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\treturn len(ps.peers)\n}\n\n// bestPeer retrieves the known peer with the currently highest total difficulty.\n// If the peerset is \"client peer set\", then nothing meaningful will return. The\n// reason is client peer never send back their latest status to server.\nfunc (ps *serverPeerSet) bestPeer() *serverPeer {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\tvar (\n\t\tbestPeer *serverPeer\n\t\tbestTd   *big.Int\n\t)\n\tfor _, p := range ps.peers {\n\t\tif td := p.Td(); bestTd == nil || td.Cmp(bestTd) > 0 {\n\t\t\tbestPeer, bestTd = p, td\n\t\t}\n\t}\n\treturn bestPeer\n}\n\n// allServerPeers returns all server peers in a list.\nfunc (ps *serverPeerSet) allPeers() []*serverPeer {\n\tps.lock.RLock()\n\tdefer ps.lock.RUnlock()\n\n\tlist := make([]*serverPeer, 0, len(ps.peers))\n\tfor _, p := range ps.peers {\n\t\tlist = append(list, p)\n\t}\n\treturn list\n}\n\n// close disconnects all peers. No new peers can be registered\n// after close has returned.\nfunc (ps *serverPeerSet) close() {\n\tps.lock.Lock()\n\tdefer ps.lock.Unlock()\n\n\tfor _, p := range ps.peers {\n\t\tp.Disconnect(p2p.DiscQuitting)\n\t}\n\tps.closed = true\n}\n"
  },
  {
    "path": "les/peer_test.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"crypto/rand\"\n\t\"errors\"\n\t\"math/big\"\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/forkid\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/params\"\n)\n\ntype testServerPeerSub struct {\n\tregCh   chan *serverPeer\n\tunregCh chan *serverPeer\n}\n\nfunc newTestServerPeerSub() *testServerPeerSub {\n\treturn &testServerPeerSub{\n\t\tregCh:   make(chan *serverPeer, 1),\n\t\tunregCh: make(chan *serverPeer, 1),\n\t}\n}\n\nfunc (t *testServerPeerSub) registerPeer(p *serverPeer)   { t.regCh <- p }\nfunc (t *testServerPeerSub) unregisterPeer(p *serverPeer) { t.unregCh <- p }\n\nfunc TestPeerSubscription(t *testing.T) {\n\tpeers := newServerPeerSet()\n\tdefer peers.close()\n\n\tcheckIds := func(expect []string) {\n\t\tgiven := peers.ids()\n\t\tif len(given) == 0 && len(expect) == 0 {\n\t\t\treturn\n\t\t}\n\t\tsort.Strings(given)\n\t\tsort.Strings(expect)\n\t\tif !reflect.DeepEqual(given, expect) {\n\t\t\tt.Fatalf(\"all peer ids mismatch, want %v, given %v\", expect, given)\n\t\t}\n\t}\n\tcheckPeers := func(peerCh chan *serverPeer) {\n\t\tselect {\n\t\tcase <-peerCh:\n\t\tcase <-time.NewTimer(100 * time.Millisecond).C:\n\t\t\tt.Fatalf(\"timeout, no event received\")\n\t\t}\n\t\tselect {\n\t\tcase <-peerCh:\n\t\t\tt.Fatalf(\"unexpected event received\")\n\t\tcase <-time.NewTimer(10 * time.Millisecond).C:\n\t\t}\n\t}\n\tcheckIds([]string{})\n\n\tsub := newTestServerPeerSub()\n\tpeers.subscribe(sub)\n\n\t// Generate a random id and create the peer\n\tvar id enode.ID\n\trand.Read(id[:])\n\tpeer := newServerPeer(2, NetworkId, false, p2p.NewPeer(id, \"name\", nil), nil)\n\tpeers.register(peer)\n\n\tcheckIds([]string{peer.id})\n\tcheckPeers(sub.regCh)\n\n\tpeers.unregister(peer.id)\n\tcheckIds([]string{})\n\tcheckPeers(sub.unregCh)\n}\n\ntype fakeChain struct{}\n\nfunc (f *fakeChain) Config() *params.ChainConfig { return params.MainnetChainConfig }\nfunc (f *fakeChain) Genesis() *types.Block {\n\treturn core.DefaultGenesisBlock().ToBlock(rawdb.NewMemoryDatabase())\n}\nfunc (f *fakeChain) CurrentHeader() *types.Header { return &types.Header{Number: big.NewInt(10000000)} }\n\nfunc TestHandshake(t *testing.T) {\n\t// Create a message pipe to communicate through\n\tapp, net := p2p.MsgPipe()\n\n\t// Generate a random id and create the peer\n\tvar id enode.ID\n\trand.Read(id[:])\n\n\tpeer1 := newClientPeer(2, NetworkId, p2p.NewPeer(id, \"name\", nil), net)\n\tpeer2 := newServerPeer(2, NetworkId, true, p2p.NewPeer(id, \"name\", nil), app)\n\n\tvar (\n\t\terrCh1 = make(chan error, 1)\n\t\terrCh2 = make(chan error, 1)\n\n\t\ttd      = big.NewInt(100)\n\t\thead    = common.HexToHash(\"deadbeef\")\n\t\theadNum = uint64(10)\n\t\tgenesis = common.HexToHash(\"cafebabe\")\n\n\t\tchain1, chain2   = &fakeChain{}, &fakeChain{}\n\t\tforkID1          = forkid.NewID(chain1.Config(), chain1.Genesis().Hash(), chain1.CurrentHeader().Number.Uint64())\n\t\tforkID2          = forkid.NewID(chain2.Config(), chain2.Genesis().Hash(), chain2.CurrentHeader().Number.Uint64())\n\t\tfilter1, filter2 = forkid.NewFilter(chain1), forkid.NewFilter(chain2)\n\t)\n\n\tgo func() {\n\t\terrCh1 <- peer1.handshake(td, head, headNum, genesis, forkID1, filter1, func(list *keyValueList) {\n\t\t\tvar announceType uint64 = announceTypeSigned\n\t\t\t*list = (*list).add(\"announceType\", announceType)\n\t\t}, nil)\n\t}()\n\tgo func() {\n\t\terrCh2 <- peer2.handshake(td, head, headNum, genesis, forkID2, filter2, nil, func(recv keyValueMap) error {\n\t\t\tvar reqType uint64\n\t\t\terr := recv.get(\"announceType\", &reqType)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif reqType != announceTypeSigned {\n\t\t\t\treturn errors.New(\"Expected announceTypeSigned\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}()\n\n\tfor i := 0; i < 2; i++ {\n\t\tselect {\n\t\tcase err := <-errCh1:\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"handshake failed, %v\", err)\n\t\t\t}\n\t\tcase err := <-errCh2:\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"handshake failed, %v\", err)\n\t\t\t}\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Fatalf(\"timeout\")\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "les/protocol.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"crypto/ecdsa\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math/big\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\tvfc \"github.com/ethereum/go-ethereum/les/vflux/client\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\n// Constants to match up protocol versions and messages\nconst (\n\tlpv2 = 2\n\tlpv3 = 3\n\tlpv4 = 4\n)\n\n// Supported versions of the les protocol (first is primary)\nvar (\n\tClientProtocolVersions    = []uint{lpv2, lpv3, lpv4}\n\tServerProtocolVersions    = []uint{lpv2, lpv3, lpv4}\n\tAdvertiseProtocolVersions = []uint{lpv2} // clients are searching for the first advertised protocol in the list\n)\n\n// Number of implemented message corresponding to different protocol versions.\nvar ProtocolLengths = map[uint]uint64{lpv2: 22, lpv3: 24, lpv4: 24}\n\nconst (\n\tNetworkId          = 1\n\tProtocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message\n\tblockSafetyMargin  = 4                // safety margin applied to block ranges specified relative to head block\n\n\ttxIndexUnlimited    = 0 // this value in the \"recentTxLookup\" handshake field means the entire tx index history is served\n\ttxIndexDisabled     = 1 // this value means tx index is not served at all\n\ttxIndexRecentOffset = 1 // txIndexRecentOffset + N in the handshake field means then tx index of the last N blocks is supported\n)\n\n// les protocol message codes\nconst (\n\t// Protocol messages inherited from LPV1\n\tStatusMsg          = 0x00\n\tAnnounceMsg        = 0x01\n\tGetBlockHeadersMsg = 0x02\n\tBlockHeadersMsg    = 0x03\n\tGetBlockBodiesMsg  = 0x04\n\tBlockBodiesMsg     = 0x05\n\tGetReceiptsMsg     = 0x06\n\tReceiptsMsg        = 0x07\n\tGetCodeMsg         = 0x0a\n\tCodeMsg            = 0x0b\n\t// Protocol messages introduced in LPV2\n\tGetProofsV2Msg         = 0x0f\n\tProofsV2Msg            = 0x10\n\tGetHelperTrieProofsMsg = 0x11\n\tHelperTrieProofsMsg    = 0x12\n\tSendTxV2Msg            = 0x13\n\tGetTxStatusMsg         = 0x14\n\tTxStatusMsg            = 0x15\n\t// Protocol messages introduced in LPV3\n\tStopMsg   = 0x16\n\tResumeMsg = 0x17\n)\n\n// GetBlockHeadersData represents a block header query (the request ID is not included)\ntype GetBlockHeadersData struct {\n\tOrigin  hashOrNumber // Block from which to retrieve headers\n\tAmount  uint64       // Maximum number of headers to retrieve\n\tSkip    uint64       // Blocks to skip between consecutive headers\n\tReverse bool         // Query direction (false = rising towards latest, true = falling towards genesis)\n}\n\n// GetBlockHeadersPacket represents a block header request\ntype GetBlockHeadersPacket struct {\n\tReqID uint64\n\tQuery GetBlockHeadersData\n}\n\n// GetBlockBodiesPacket represents a block body request\ntype GetBlockBodiesPacket struct {\n\tReqID  uint64\n\tHashes []common.Hash\n}\n\n// GetCodePacket represents a contract code request\ntype GetCodePacket struct {\n\tReqID uint64\n\tReqs  []CodeReq\n}\n\n// GetReceiptsPacket represents a block receipts request\ntype GetReceiptsPacket struct {\n\tReqID  uint64\n\tHashes []common.Hash\n}\n\n// GetProofsPacket represents a proof request\ntype GetProofsPacket struct {\n\tReqID uint64\n\tReqs  []ProofReq\n}\n\n// GetHelperTrieProofsPacket represents a helper trie proof request\ntype GetHelperTrieProofsPacket struct {\n\tReqID uint64\n\tReqs  []HelperTrieReq\n}\n\n// SendTxPacket represents a transaction propagation request\ntype SendTxPacket struct {\n\tReqID uint64\n\tTxs   []*types.Transaction\n}\n\n// GetTxStatusPacket represents a transaction status query\ntype GetTxStatusPacket struct {\n\tReqID  uint64\n\tHashes []common.Hash\n}\n\ntype requestInfo struct {\n\tname                          string\n\tmaxCount                      uint64\n\trefBasketFirst, refBasketRest float64\n}\n\n// reqMapping maps an LES request to one or two vflux service vector entries.\n// If rest != -1 and the request type is used with amounts larger than one then the\n// first one of the multi-request is mapped to first while the rest is mapped to rest.\ntype reqMapping struct {\n\tfirst, rest int\n}\n\nvar (\n\t// requests describes the available LES request types and their initializing amounts\n\t// in the vfc.ValueTracker reference basket. Initial values are estimates\n\t// based on the same values as the server's default cost estimates (reqAvgTimeCost).\n\trequests = map[uint64]requestInfo{\n\t\tGetBlockHeadersMsg:     {\"GetBlockHeaders\", MaxHeaderFetch, 10, 1000},\n\t\tGetBlockBodiesMsg:      {\"GetBlockBodies\", MaxBodyFetch, 1, 0},\n\t\tGetReceiptsMsg:         {\"GetReceipts\", MaxReceiptFetch, 1, 0},\n\t\tGetCodeMsg:             {\"GetCode\", MaxCodeFetch, 1, 0},\n\t\tGetProofsV2Msg:         {\"GetProofsV2\", MaxProofsFetch, 10, 0},\n\t\tGetHelperTrieProofsMsg: {\"GetHelperTrieProofs\", MaxHelperTrieProofsFetch, 10, 100},\n\t\tSendTxV2Msg:            {\"SendTxV2\", MaxTxSend, 1, 0},\n\t\tGetTxStatusMsg:         {\"GetTxStatus\", MaxTxStatus, 10, 0},\n\t}\n\trequestList    []vfc.RequestInfo\n\trequestMapping map[uint32]reqMapping\n)\n\n// init creates a request list and mapping between protocol message codes and vflux\n// service vector indices.\nfunc init() {\n\trequestMapping = make(map[uint32]reqMapping)\n\tfor code, req := range requests {\n\t\tcost := reqAvgTimeCost[code]\n\t\trm := reqMapping{len(requestList), -1}\n\t\trequestList = append(requestList, vfc.RequestInfo{\n\t\t\tName:       req.name + \".first\",\n\t\t\tInitAmount: req.refBasketFirst,\n\t\t\tInitValue:  float64(cost.baseCost + cost.reqCost),\n\t\t})\n\t\tif req.refBasketRest != 0 {\n\t\t\trm.rest = len(requestList)\n\t\t\trequestList = append(requestList, vfc.RequestInfo{\n\t\t\t\tName:       req.name + \".rest\",\n\t\t\t\tInitAmount: req.refBasketRest,\n\t\t\t\tInitValue:  float64(cost.reqCost),\n\t\t\t})\n\t\t}\n\t\trequestMapping[uint32(code)] = rm\n\t}\n}\n\ntype errCode int\n\nconst (\n\tErrMsgTooLarge = iota\n\tErrDecode\n\tErrInvalidMsgCode\n\tErrProtocolVersionMismatch\n\tErrNetworkIdMismatch\n\tErrGenesisBlockMismatch\n\tErrNoStatusMsg\n\tErrExtraStatusMsg\n\tErrSuspendedPeer\n\tErrUselessPeer\n\tErrRequestRejected\n\tErrUnexpectedResponse\n\tErrInvalidResponse\n\tErrTooManyTimeouts\n\tErrMissingKey\n\tErrForkIDRejected\n)\n\nfunc (e errCode) String() string {\n\treturn errorToString[int(e)]\n}\n\n// XXX change once legacy code is out\nvar errorToString = map[int]string{\n\tErrMsgTooLarge:             \"Message too long\",\n\tErrDecode:                  \"Invalid message\",\n\tErrInvalidMsgCode:          \"Invalid message code\",\n\tErrProtocolVersionMismatch: \"Protocol version mismatch\",\n\tErrNetworkIdMismatch:       \"NetworkId mismatch\",\n\tErrGenesisBlockMismatch:    \"Genesis block mismatch\",\n\tErrNoStatusMsg:             \"No status message\",\n\tErrExtraStatusMsg:          \"Extra status message\",\n\tErrSuspendedPeer:           \"Suspended peer\",\n\tErrRequestRejected:         \"Request rejected\",\n\tErrUnexpectedResponse:      \"Unexpected response\",\n\tErrInvalidResponse:         \"Invalid response\",\n\tErrTooManyTimeouts:         \"Too many request timeouts\",\n\tErrMissingKey:              \"Key missing from list\",\n\tErrForkIDRejected:          \"ForkID rejected\",\n}\n\n// announceData is the network packet for the block announcements.\ntype announceData struct {\n\tHash       common.Hash // Hash of one particular block being announced\n\tNumber     uint64      // Number of one particular block being announced\n\tTd         *big.Int    // Total difficulty of one particular block being announced\n\tReorgDepth uint64\n\tUpdate     keyValueList\n}\n\n// sanityCheck verifies that the values are reasonable, as a DoS protection\nfunc (a *announceData) sanityCheck() error {\n\tif tdlen := a.Td.BitLen(); tdlen > 100 {\n\t\treturn fmt.Errorf(\"too large block TD: bitlen %d\", tdlen)\n\t}\n\treturn nil\n}\n\n// sign adds a signature to the block announcement by the given privKey\nfunc (a *announceData) sign(privKey *ecdsa.PrivateKey) {\n\trlp, _ := rlp.EncodeToBytes(blockInfo{a.Hash, a.Number, a.Td})\n\tsig, _ := crypto.Sign(crypto.Keccak256(rlp), privKey)\n\ta.Update = a.Update.add(\"sign\", sig)\n}\n\n// checkSignature verifies if the block announcement has a valid signature by the given pubKey\nfunc (a *announceData) checkSignature(id enode.ID, update keyValueMap) error {\n\tvar sig []byte\n\tif err := update.get(\"sign\", &sig); err != nil {\n\t\treturn err\n\t}\n\trlp, _ := rlp.EncodeToBytes(blockInfo{a.Hash, a.Number, a.Td})\n\trecPubkey, err := crypto.SigToPub(crypto.Keccak256(rlp), sig)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif id == enode.PubkeyToIDV4(recPubkey) {\n\t\treturn nil\n\t}\n\treturn errors.New(\"wrong signature\")\n}\n\ntype blockInfo struct {\n\tHash   common.Hash // Hash of one particular block being announced\n\tNumber uint64      // Number of one particular block being announced\n\tTd     *big.Int    // Total difficulty of one particular block being announced\n}\n\n// hashOrNumber is a combined field for specifying an origin block.\ntype hashOrNumber struct {\n\tHash   common.Hash // Block hash from which to retrieve headers (excludes Number)\n\tNumber uint64      // Block hash from which to retrieve headers (excludes Hash)\n}\n\n// EncodeRLP is a specialized encoder for hashOrNumber to encode only one of the\n// two contained union fields.\nfunc (hn *hashOrNumber) EncodeRLP(w io.Writer) error {\n\tif hn.Hash == (common.Hash{}) {\n\t\treturn rlp.Encode(w, hn.Number)\n\t}\n\tif hn.Number != 0 {\n\t\treturn fmt.Errorf(\"both origin hash (%x) and number (%d) provided\", hn.Hash, hn.Number)\n\t}\n\treturn rlp.Encode(w, hn.Hash)\n}\n\n// DecodeRLP is a specialized decoder for hashOrNumber to decode the contents\n// into either a block hash or a block number.\nfunc (hn *hashOrNumber) DecodeRLP(s *rlp.Stream) error {\n\t_, size, _ := s.Kind()\n\torigin, err := s.Raw()\n\tif err == nil {\n\t\tswitch {\n\t\tcase size == 32:\n\t\t\terr = rlp.DecodeBytes(origin, &hn.Hash)\n\t\tcase size <= 8:\n\t\t\terr = rlp.DecodeBytes(origin, &hn.Number)\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"invalid input size %d for origin\", size)\n\t\t}\n\t}\n\treturn err\n}\n\n// CodeData is the network response packet for a node data retrieval.\ntype CodeData []struct {\n\tValue []byte\n}\n"
  },
  {
    "path": "les/pruner.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/math\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/log\"\n)\n\n// pruner is responsible for pruning historical light chain data.\ntype pruner struct {\n\tdb       ethdb.Database\n\tindexers []*core.ChainIndexer\n\tcloseCh  chan struct{}\n\twg       sync.WaitGroup\n}\n\n// newPruner returns a light chain pruner instance.\nfunc newPruner(db ethdb.Database, indexers ...*core.ChainIndexer) *pruner {\n\tpruner := &pruner{\n\t\tdb:       db,\n\t\tindexers: indexers,\n\t\tcloseCh:  make(chan struct{}),\n\t}\n\tpruner.wg.Add(1)\n\tgo pruner.loop()\n\treturn pruner\n}\n\n// close notifies all background goroutines belonging to pruner to exit.\nfunc (p *pruner) close() {\n\tclose(p.closeCh)\n\tp.wg.Wait()\n}\n\n// loop periodically queries the status of chain indexers and prunes useless\n// historical chain data. Notably, whenever Geth restarts, it will iterate\n// all historical sections even they don't exist at all(below checkpoint) so\n// that light client can prune cached chain data that was ODRed after pruning\n// that section.\nfunc (p *pruner) loop() {\n\tdefer p.wg.Done()\n\n\t// cleanTicker is the ticker used to trigger a history clean 2 times a day.\n\tvar cleanTicker = time.NewTicker(12 * time.Hour)\n\n\t// pruning finds the sections that have been processed by all indexers\n\t// and deletes all historical chain data.\n\t// Note, if some indexers don't support pruning(e.g. eth.BloomIndexer),\n\t// pruning operations can be silently ignored.\n\tpruning := func() {\n\t\tmin := uint64(math.MaxUint64)\n\t\tfor _, indexer := range p.indexers {\n\t\t\tsections, _, _ := indexer.Sections()\n\t\t\tif sections < min {\n\t\t\t\tmin = sections\n\t\t\t}\n\t\t}\n\t\t// Always keep the latest section data in database.\n\t\tif min < 2 || len(p.indexers) == 0 {\n\t\t\treturn\n\t\t}\n\t\tfor _, indexer := range p.indexers {\n\t\t\tif err := indexer.Prune(min - 2); err != nil {\n\t\t\t\tlog.Debug(\"Failed to prune historical data\", \"err\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tp.db.Compact(nil, nil) // Compact entire database, ensure all removed data are deleted.\n\t}\n\tfor {\n\t\tpruning()\n\t\tselect {\n\t\tcase <-cleanTicker.C:\n\t\tcase <-p.closeCh:\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "les/pruner_test.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/binary\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/light\"\n)\n\nfunc TestLightPruner(t *testing.T) {\n\tvar (\n\t\twaitIndexers = func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {\n\t\t\tfor {\n\t\t\t\tcs, _, _ := cIndexer.Sections()\n\t\t\t\tbts, _, _ := btIndexer.Sections()\n\t\t\t\tif cs >= 3 && bts >= 3 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t\tconfig    = light.TestClientIndexerConfig\n\t\tnetconfig = testnetConfig{\n\t\t\tblocks:   int(3*config.ChtSize + config.ChtConfirms),\n\t\t\tprotocol: 3,\n\t\t\tindexFn:  waitIndexers,\n\t\t\tconnect:  true,\n\t\t}\n\t)\n\tserver, client, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\t// checkDB iterates the chain with given prefix, resolves the block number\n\t// with given callback and ensures this entry should exist or not.\n\tcheckDB := func(from, to uint64, prefix []byte, resolve func(key, value []byte) *uint64, exist bool) bool {\n\t\tit := client.db.NewIterator(prefix, nil)\n\t\tdefer it.Release()\n\n\t\tvar next = from\n\t\tfor it.Next() {\n\t\t\tnumber := resolve(it.Key(), it.Value())\n\t\t\tif number == nil || *number < from {\n\t\t\t\tcontinue\n\t\t\t} else if *number > to {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif exist {\n\t\t\t\tif *number != next {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tnext++\n\t\t\t} else {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\t// checkPruned checks and ensures the stale chain data has been pruned.\n\tcheckPruned := func(from, to uint64) {\n\t\t// Iterate canonical hash\n\t\tif !checkDB(from, to, []byte(\"h\"), func(key, value []byte) *uint64 {\n\t\t\tif len(key) == 1+8+1 && bytes.Equal(key[9:10], []byte(\"n\")) {\n\t\t\t\tn := binary.BigEndian.Uint64(key[1:9])\n\t\t\t\treturn &n\n\t\t\t}\n\t\t\treturn nil\n\t\t}, false) {\n\t\t\tt.Fatalf(\"canonical hash mappings are not properly pruned\")\n\t\t}\n\t\t// Iterate header\n\t\tif !checkDB(from, to, []byte(\"h\"), func(key, value []byte) *uint64 {\n\t\t\tif len(key) == 1+8+32 {\n\t\t\t\tn := binary.BigEndian.Uint64(key[1:9])\n\t\t\t\treturn &n\n\t\t\t}\n\t\t\treturn nil\n\t\t}, false) {\n\t\t\tt.Fatalf(\"headers are not properly pruned\")\n\t\t}\n\t\t// Iterate body\n\t\tif !checkDB(from, to, []byte(\"b\"), func(key, value []byte) *uint64 {\n\t\t\tif len(key) == 1+8+32 {\n\t\t\t\tn := binary.BigEndian.Uint64(key[1:9])\n\t\t\t\treturn &n\n\t\t\t}\n\t\t\treturn nil\n\t\t}, false) {\n\t\t\tt.Fatalf(\"block bodies are not properly pruned\")\n\t\t}\n\t\t// Iterate receipts\n\t\tif !checkDB(from, to, []byte(\"r\"), func(key, value []byte) *uint64 {\n\t\t\tif len(key) == 1+8+32 {\n\t\t\t\tn := binary.BigEndian.Uint64(key[1:9])\n\t\t\t\treturn &n\n\t\t\t}\n\t\t\treturn nil\n\t\t}, false) {\n\t\t\tt.Fatalf(\"receipts are not properly pruned\")\n\t\t}\n\t\t// Iterate td\n\t\tif !checkDB(from, to, []byte(\"h\"), func(key, value []byte) *uint64 {\n\t\t\tif len(key) == 1+8+32+1 && bytes.Equal(key[41:42], []byte(\"t\")) {\n\t\t\t\tn := binary.BigEndian.Uint64(key[1:9])\n\t\t\t\treturn &n\n\t\t\t}\n\t\t\treturn nil\n\t\t}, false) {\n\t\t\tt.Fatalf(\"tds are not properly pruned\")\n\t\t}\n\t}\n\t// Start light pruner.\n\ttime.Sleep(1500 * time.Millisecond) // Ensure light client has finished the syncing and indexing\n\tnewPruner(client.db, client.chtIndexer, client.bloomTrieIndexer)\n\n\ttime.Sleep(1500 * time.Millisecond) // Ensure pruner have enough time to prune data.\n\tcheckPruned(1, config.ChtSize-1)\n\n\t// Ensure all APIs still work after pruning.\n\tvar cases = []struct {\n\t\tfrom, to   uint64\n\t\tmethodName string\n\t\tmethod     func(uint64) bool\n\t}{\n\t\t{\n\t\t\t1, 10, \"GetHeaderByNumber\",\n\t\t\tfunc(n uint64) bool {\n\t\t\t\t_, err := light.GetHeaderByNumber(context.Background(), client.handler.backend.odr, n)\n\t\t\t\treturn err == nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t11, 20, \"GetCanonicalHash\",\n\t\t\tfunc(n uint64) bool {\n\t\t\t\t_, err := light.GetCanonicalHash(context.Background(), client.handler.backend.odr, n)\n\t\t\t\treturn err == nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t21, 30, \"GetTd\",\n\t\t\tfunc(n uint64) bool {\n\t\t\t\t_, err := light.GetTd(context.Background(), client.handler.backend.odr, server.handler.blockchain.GetHeaderByNumber(n).Hash(), n)\n\t\t\t\treturn err == nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t31, 40, \"GetBodyRLP\",\n\t\t\tfunc(n uint64) bool {\n\t\t\t\t_, err := light.GetBodyRLP(context.Background(), client.handler.backend.odr, server.handler.blockchain.GetHeaderByNumber(n).Hash(), n)\n\t\t\t\treturn err == nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t41, 50, \"GetBlock\",\n\t\t\tfunc(n uint64) bool {\n\t\t\t\t_, err := light.GetBlock(context.Background(), client.handler.backend.odr, server.handler.blockchain.GetHeaderByNumber(n).Hash(), n)\n\t\t\t\treturn err == nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t51, 60, \"GetBlockReceipts\",\n\t\t\tfunc(n uint64) bool {\n\t\t\t\t_, err := light.GetBlockReceipts(context.Background(), client.handler.backend.odr, server.handler.blockchain.GetHeaderByNumber(n).Hash(), n)\n\t\t\t\treturn err == nil\n\t\t\t},\n\t\t},\n\t}\n\tfor _, c := range cases {\n\t\tfor i := c.from; i <= c.to; i++ {\n\t\t\tif !c.method(i) {\n\t\t\t\tt.Fatalf(\"rpc method %s failed, number %d\", c.methodName, i)\n\t\t\t}\n\t\t}\n\t}\n\t// Check GetBloombits\n\t_, err := light.GetBloomBits(context.Background(), client.handler.backend.odr, 0, []uint64{0})\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to retrieve bloombits of pruned section: %v\", err)\n\t}\n\n\t// Ensure the ODR cached data can be cleaned by pruner.\n\tnewPruner(client.db, client.chtIndexer, client.bloomTrieIndexer)\n\ttime.Sleep(50 * time.Millisecond) // Ensure pruner have enough time to prune data.\n\tcheckPruned(1, config.ChtSize-1)  // Ensure all cached data(by odr) is cleaned.\n}\n"
  },
  {
    "path": "les/request_test.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"context\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/light\"\n)\n\nvar testBankSecureTrieKey = secAddr(bankAddr)\n\nfunc secAddr(addr common.Address) []byte {\n\treturn crypto.Keccak256(addr[:])\n}\n\ntype accessTestFn func(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest\n\nfunc TestBlockAccessLes2(t *testing.T) { testAccess(t, 2, tfBlockAccess) }\nfunc TestBlockAccessLes3(t *testing.T) { testAccess(t, 3, tfBlockAccess) }\nfunc TestBlockAccessLes4(t *testing.T) { testAccess(t, 4, tfBlockAccess) }\n\nfunc tfBlockAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {\n\treturn &light.BlockRequest{Hash: bhash, Number: number}\n}\n\nfunc TestReceiptsAccessLes2(t *testing.T) { testAccess(t, 2, tfReceiptsAccess) }\nfunc TestReceiptsAccessLes3(t *testing.T) { testAccess(t, 3, tfReceiptsAccess) }\nfunc TestReceiptsAccessLes4(t *testing.T) { testAccess(t, 4, tfReceiptsAccess) }\n\nfunc tfReceiptsAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {\n\treturn &light.ReceiptsRequest{Hash: bhash, Number: number}\n}\n\nfunc TestTrieEntryAccessLes2(t *testing.T) { testAccess(t, 2, tfTrieEntryAccess) }\nfunc TestTrieEntryAccessLes3(t *testing.T) { testAccess(t, 3, tfTrieEntryAccess) }\nfunc TestTrieEntryAccessLes4(t *testing.T) { testAccess(t, 4, tfTrieEntryAccess) }\n\nfunc tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest {\n\tif number := rawdb.ReadHeaderNumber(db, bhash); number != nil {\n\t\treturn &light.TrieRequest{Id: light.StateTrieID(rawdb.ReadHeader(db, bhash, *number)), Key: testBankSecureTrieKey}\n\t}\n\treturn nil\n}\n\nfunc TestCodeAccessLes2(t *testing.T) { testAccess(t, 2, tfCodeAccess) }\nfunc TestCodeAccessLes3(t *testing.T) { testAccess(t, 3, tfCodeAccess) }\nfunc TestCodeAccessLes4(t *testing.T) { testAccess(t, 4, tfCodeAccess) }\n\nfunc tfCodeAccess(db ethdb.Database, bhash common.Hash, num uint64) light.OdrRequest {\n\tnumber := rawdb.ReadHeaderNumber(db, bhash)\n\tif number != nil {\n\t\treturn nil\n\t}\n\theader := rawdb.ReadHeader(db, bhash, *number)\n\tif header.Number.Uint64() < testContractDeployed {\n\t\treturn nil\n\t}\n\tsti := light.StateTrieID(header)\n\tci := light.StorageTrieID(sti, crypto.Keccak256Hash(testContractAddr[:]), common.Hash{})\n\treturn &light.CodeRequest{Id: ci, Hash: crypto.Keccak256Hash(testContractCodeDeployed)}\n}\n\nfunc testAccess(t *testing.T, protocol int, fn accessTestFn) {\n\t// Assemble the test environment\n\tnetconfig := testnetConfig{\n\t\tblocks:    4,\n\t\tprotocol:  protocol,\n\t\tindexFn:   nil,\n\t\tconnect:   true,\n\t\tnopruning: true,\n\t}\n\tserver, client, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\t// Ensure the client has synced all necessary data.\n\tclientHead := client.handler.backend.blockchain.CurrentHeader()\n\tif clientHead.Number.Uint64() != 4 {\n\t\tt.Fatalf(\"Failed to sync the chain with server, head: %v\", clientHead.Number.Uint64())\n\t}\n\n\ttest := func(expFail uint64) {\n\t\tfor i := uint64(0); i <= server.handler.blockchain.CurrentHeader().Number.Uint64(); i++ {\n\t\t\tbhash := rawdb.ReadCanonicalHash(server.db, i)\n\t\t\tif req := fn(client.db, bhash, i); req != nil {\n\t\t\t\tctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)\n\t\t\t\terr := client.handler.backend.odr.Retrieve(ctx, req)\n\t\t\t\tcancel()\n\n\t\t\t\tgot := err == nil\n\t\t\t\texp := i < expFail\n\t\t\t\tif exp && !got {\n\t\t\t\t\tt.Errorf(\"object retrieval failed\")\n\t\t\t\t}\n\t\t\t\tif !exp && got {\n\t\t\t\t\tt.Errorf(\"unexpected object retrieval success\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\ttest(5)\n}\n"
  },
  {
    "path": "les/retrieve.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"context\"\n\t\"crypto/rand\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/light\"\n)\n\nvar (\n\tretryQueue         = time.Millisecond * 100\n\thardRequestTimeout = time.Second * 10\n)\n\n// retrieveManager is a layer on top of requestDistributor which takes care of\n// matching replies by request ID and handles timeouts and resends if necessary.\ntype retrieveManager struct {\n\tdist               *requestDistributor\n\tpeers              *serverPeerSet\n\tsoftRequestTimeout func() time.Duration\n\n\tlock     sync.RWMutex\n\tsentReqs map[uint64]*sentReq\n}\n\n// validatorFunc is a function that processes a reply message\ntype validatorFunc func(distPeer, *Msg) error\n\n// sentReq represents a request sent and tracked by retrieveManager\ntype sentReq struct {\n\trm       *retrieveManager\n\treq      *distReq\n\tid       uint64\n\tvalidate validatorFunc\n\n\teventsCh chan reqPeerEvent\n\tstopCh   chan struct{}\n\tstopped  bool\n\terr      error\n\n\tlock   sync.RWMutex // protect access to sentTo map\n\tsentTo map[distPeer]sentReqToPeer\n\n\tlastReqQueued bool     // last request has been queued but not sent\n\tlastReqSentTo distPeer // if not nil then last request has been sent to given peer but not timed out\n\treqSrtoCount  int      // number of requests that reached soft (but not hard) timeout\n}\n\n// sentReqToPeer notifies the request-from-peer goroutine (tryRequest) about a response\n// delivered by the given peer. Only one delivery is allowed per request per peer,\n// after which delivered is set to true, the validity of the response is sent on the\n// valid channel and no more responses are accepted.\ntype sentReqToPeer struct {\n\tdelivered, frozen bool\n\tevent             chan int\n}\n\n// reqPeerEvent is sent by the request-from-peer goroutine (tryRequest) to the\n// request state machine (retrieveLoop) through the eventsCh channel.\ntype reqPeerEvent struct {\n\tevent int\n\tpeer  distPeer\n}\n\nconst (\n\trpSent = iota // if peer == nil, not sent (no suitable peers)\n\trpSoftTimeout\n\trpHardTimeout\n\trpDeliveredValid\n\trpDeliveredInvalid\n\trpNotDelivered\n)\n\n// newRetrieveManager creates the retrieve manager\nfunc newRetrieveManager(peers *serverPeerSet, dist *requestDistributor, srto func() time.Duration) *retrieveManager {\n\treturn &retrieveManager{\n\t\tpeers:              peers,\n\t\tdist:               dist,\n\t\tsentReqs:           make(map[uint64]*sentReq),\n\t\tsoftRequestTimeout: srto,\n\t}\n}\n\n// retrieve sends a request (to multiple peers if necessary) and waits for an answer\n// that is delivered through the deliver function and successfully validated by the\n// validator callback. It returns when a valid answer is delivered or the context is\n// cancelled.\nfunc (rm *retrieveManager) retrieve(ctx context.Context, reqID uint64, req *distReq, val validatorFunc, shutdown chan struct{}) error {\n\tsentReq := rm.sendReq(reqID, req, val)\n\tselect {\n\tcase <-sentReq.stopCh:\n\tcase <-ctx.Done():\n\t\tsentReq.stop(ctx.Err())\n\tcase <-shutdown:\n\t\tsentReq.stop(fmt.Errorf(\"client is shutting down\"))\n\t}\n\treturn sentReq.getError()\n}\n\n// sendReq starts a process that keeps trying to retrieve a valid answer for a\n// request from any suitable peers until stopped or succeeded.\nfunc (rm *retrieveManager) sendReq(reqID uint64, req *distReq, val validatorFunc) *sentReq {\n\tr := &sentReq{\n\t\trm:       rm,\n\t\treq:      req,\n\t\tid:       reqID,\n\t\tsentTo:   make(map[distPeer]sentReqToPeer),\n\t\tstopCh:   make(chan struct{}),\n\t\teventsCh: make(chan reqPeerEvent, 10),\n\t\tvalidate: val,\n\t}\n\n\tcanSend := req.canSend\n\treq.canSend = func(p distPeer) bool {\n\t\t// add an extra check to canSend: the request has not been sent to the same peer before\n\t\tr.lock.RLock()\n\t\t_, sent := r.sentTo[p]\n\t\tr.lock.RUnlock()\n\t\treturn !sent && canSend(p)\n\t}\n\n\trequest := req.request\n\treq.request = func(p distPeer) func() {\n\t\t// before actually sending the request, put an entry into the sentTo map\n\t\tr.lock.Lock()\n\t\tr.sentTo[p] = sentReqToPeer{delivered: false, frozen: false, event: make(chan int, 1)}\n\t\tr.lock.Unlock()\n\t\treturn request(p)\n\t}\n\trm.lock.Lock()\n\trm.sentReqs[reqID] = r\n\trm.lock.Unlock()\n\n\tgo r.retrieveLoop()\n\treturn r\n}\n\n// requested reports whether the request with given reqid is sent by the retriever.\nfunc (rm *retrieveManager) requested(reqId uint64) bool {\n\trm.lock.RLock()\n\tdefer rm.lock.RUnlock()\n\n\t_, ok := rm.sentReqs[reqId]\n\treturn ok\n}\n\n// deliver is called by the LES protocol manager to deliver reply messages to waiting requests\nfunc (rm *retrieveManager) deliver(peer distPeer, msg *Msg) error {\n\trm.lock.RLock()\n\treq, ok := rm.sentReqs[msg.ReqID]\n\trm.lock.RUnlock()\n\n\tif ok {\n\t\treturn req.deliver(peer, msg)\n\t}\n\treturn errResp(ErrUnexpectedResponse, \"reqID = %v\", msg.ReqID)\n}\n\n// frozen is called by the LES protocol manager when a server has suspended its service and we\n// should not expect an answer for the requests already sent there\nfunc (rm *retrieveManager) frozen(peer distPeer) {\n\trm.lock.RLock()\n\tdefer rm.lock.RUnlock()\n\n\tfor _, req := range rm.sentReqs {\n\t\treq.frozen(peer)\n\t}\n}\n\n// reqStateFn represents a state of the retrieve loop state machine\ntype reqStateFn func() reqStateFn\n\n// retrieveLoop is the retrieval state machine event loop\nfunc (r *sentReq) retrieveLoop() {\n\tgo r.tryRequest()\n\tr.lastReqQueued = true\n\tstate := r.stateRequesting\n\n\tfor state != nil {\n\t\tstate = state()\n\t}\n\n\tr.rm.lock.Lock()\n\tdelete(r.rm.sentReqs, r.id)\n\tr.rm.lock.Unlock()\n}\n\n// stateRequesting: a request has been queued or sent recently; when it reaches soft timeout,\n// a new request is sent to a new peer\nfunc (r *sentReq) stateRequesting() reqStateFn {\n\tselect {\n\tcase ev := <-r.eventsCh:\n\t\tr.update(ev)\n\t\tswitch ev.event {\n\t\tcase rpSent:\n\t\t\tif ev.peer == nil {\n\t\t\t\t// request send failed, no more suitable peers\n\t\t\t\tif r.waiting() {\n\t\t\t\t\t// we are already waiting for sent requests which may succeed so keep waiting\n\t\t\t\t\treturn r.stateNoMorePeers\n\t\t\t\t}\n\t\t\t\t// nothing to wait for, no more peers to ask, return with error\n\t\t\t\tr.stop(light.ErrNoPeers)\n\t\t\t\t// no need to go to stopped state because waiting() already returned false\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase rpSoftTimeout:\n\t\t\t// last request timed out, try asking a new peer\n\t\t\tgo r.tryRequest()\n\t\t\tr.lastReqQueued = true\n\t\t\treturn r.stateRequesting\n\t\tcase rpDeliveredInvalid, rpNotDelivered:\n\t\t\t// if it was the last sent request (set to nil by update) then start a new one\n\t\t\tif !r.lastReqQueued && r.lastReqSentTo == nil {\n\t\t\t\tgo r.tryRequest()\n\t\t\t\tr.lastReqQueued = true\n\t\t\t}\n\t\t\treturn r.stateRequesting\n\t\tcase rpDeliveredValid:\n\t\t\tr.stop(nil)\n\t\t\treturn r.stateStopped\n\t\t}\n\t\treturn r.stateRequesting\n\tcase <-r.stopCh:\n\t\treturn r.stateStopped\n\t}\n}\n\n// stateNoMorePeers: could not send more requests because no suitable peers are available.\n// Peers may become suitable for a certain request later or new peers may appear so we\n// keep trying.\nfunc (r *sentReq) stateNoMorePeers() reqStateFn {\n\tselect {\n\tcase <-time.After(retryQueue):\n\t\tgo r.tryRequest()\n\t\tr.lastReqQueued = true\n\t\treturn r.stateRequesting\n\tcase ev := <-r.eventsCh:\n\t\tr.update(ev)\n\t\tif ev.event == rpDeliveredValid {\n\t\t\tr.stop(nil)\n\t\t\treturn r.stateStopped\n\t\t}\n\t\tif r.waiting() {\n\t\t\treturn r.stateNoMorePeers\n\t\t}\n\t\tr.stop(light.ErrNoPeers)\n\t\treturn nil\n\tcase <-r.stopCh:\n\t\treturn r.stateStopped\n\t}\n}\n\n// stateStopped: request succeeded or cancelled, just waiting for some peers\n// to either answer or time out hard\nfunc (r *sentReq) stateStopped() reqStateFn {\n\tfor r.waiting() {\n\t\tr.update(<-r.eventsCh)\n\t}\n\treturn nil\n}\n\n// update updates the queued/sent flags and timed out peers counter according to the event\nfunc (r *sentReq) update(ev reqPeerEvent) {\n\tswitch ev.event {\n\tcase rpSent:\n\t\tr.lastReqQueued = false\n\t\tr.lastReqSentTo = ev.peer\n\tcase rpSoftTimeout:\n\t\tr.lastReqSentTo = nil\n\t\tr.reqSrtoCount++\n\tcase rpHardTimeout:\n\t\tr.reqSrtoCount--\n\tcase rpDeliveredValid, rpDeliveredInvalid, rpNotDelivered:\n\t\tif ev.peer == r.lastReqSentTo {\n\t\t\tr.lastReqSentTo = nil\n\t\t} else {\n\t\t\tr.reqSrtoCount--\n\t\t}\n\t}\n}\n\n// waiting returns true if the retrieval mechanism is waiting for an answer from\n// any peer\nfunc (r *sentReq) waiting() bool {\n\treturn r.lastReqQueued || r.lastReqSentTo != nil || r.reqSrtoCount > 0\n}\n\n// tryRequest tries to send the request to a new peer and waits for it to either\n// succeed or time out if it has been sent. It also sends the appropriate reqPeerEvent\n// messages to the request's event channel.\nfunc (r *sentReq) tryRequest() {\n\tsent := r.rm.dist.queue(r.req)\n\tvar p distPeer\n\tselect {\n\tcase p = <-sent:\n\tcase <-r.stopCh:\n\t\tif r.rm.dist.cancel(r.req) {\n\t\t\tp = nil\n\t\t} else {\n\t\t\tp = <-sent\n\t\t}\n\t}\n\n\tr.eventsCh <- reqPeerEvent{rpSent, p}\n\tif p == nil {\n\t\treturn\n\t}\n\n\thrto := false\n\n\tr.lock.RLock()\n\ts, ok := r.sentTo[p]\n\tr.lock.RUnlock()\n\tif !ok {\n\t\tpanic(nil)\n\t}\n\n\tdefer func() {\n\t\tpp, ok := p.(*serverPeer)\n\t\tif hrto && ok {\n\t\t\tpp.Log().Debug(\"Request timed out hard\")\n\t\t\tif r.rm.peers != nil {\n\t\t\t\tr.rm.peers.unregister(pp.id)\n\t\t\t}\n\t\t}\n\t}()\n\n\tselect {\n\tcase event := <-s.event:\n\t\tif event == rpNotDelivered {\n\t\t\tr.lock.Lock()\n\t\t\tdelete(r.sentTo, p)\n\t\t\tr.lock.Unlock()\n\t\t}\n\t\tr.eventsCh <- reqPeerEvent{event, p}\n\t\treturn\n\tcase <-time.After(r.rm.softRequestTimeout()):\n\t\tr.eventsCh <- reqPeerEvent{rpSoftTimeout, p}\n\t}\n\n\tselect {\n\tcase event := <-s.event:\n\t\tif event == rpNotDelivered {\n\t\t\tr.lock.Lock()\n\t\t\tdelete(r.sentTo, p)\n\t\t\tr.lock.Unlock()\n\t\t}\n\t\tr.eventsCh <- reqPeerEvent{event, p}\n\tcase <-time.After(hardRequestTimeout):\n\t\thrto = true\n\t\tr.eventsCh <- reqPeerEvent{rpHardTimeout, p}\n\t}\n}\n\n// deliver a reply belonging to this request\nfunc (r *sentReq) deliver(peer distPeer, msg *Msg) error {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\ts, ok := r.sentTo[peer]\n\tif !ok || s.delivered {\n\t\treturn errResp(ErrUnexpectedResponse, \"reqID = %v\", msg.ReqID)\n\t}\n\tif s.frozen {\n\t\treturn nil\n\t}\n\tvalid := r.validate(peer, msg) == nil\n\tr.sentTo[peer] = sentReqToPeer{delivered: true, frozen: false, event: s.event}\n\tif valid {\n\t\ts.event <- rpDeliveredValid\n\t} else {\n\t\ts.event <- rpDeliveredInvalid\n\t}\n\tif !valid {\n\t\treturn errResp(ErrInvalidResponse, \"reqID = %v\", msg.ReqID)\n\t}\n\treturn nil\n}\n\n// frozen sends a \"not delivered\" event to the peer event channel belonging to the\n// given peer if the request has been sent there, causing the state machine to not\n// expect an answer and potentially even send the request to the same peer again\n// when canSend allows it.\nfunc (r *sentReq) frozen(peer distPeer) {\n\tr.lock.Lock()\n\tdefer r.lock.Unlock()\n\n\ts, ok := r.sentTo[peer]\n\tif ok && !s.delivered && !s.frozen {\n\t\tr.sentTo[peer] = sentReqToPeer{delivered: false, frozen: true, event: s.event}\n\t\ts.event <- rpNotDelivered\n\t}\n}\n\n// stop stops the retrieval process and sets an error code that will be returned\n// by getError\nfunc (r *sentReq) stop(err error) {\n\tr.lock.Lock()\n\tif !r.stopped {\n\t\tr.stopped = true\n\t\tr.err = err\n\t\tclose(r.stopCh)\n\t}\n\tr.lock.Unlock()\n}\n\n// getError returns any retrieval error (either internally generated or set by the\n// stop function) after stopCh has been closed\nfunc (r *sentReq) getError() error {\n\treturn r.err\n}\n\n// genReqID generates a new random request ID\nfunc genReqID() uint64 {\n\tvar rnd [8]byte\n\trand.Read(rnd[:])\n\treturn binary.BigEndian.Uint64(rnd[:])\n}\n"
  },
  {
    "path": "les/server.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"crypto/ecdsa\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/eth/ethconfig\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/les/flowcontrol\"\n\t\"github.com/ethereum/go-ethereum/les/vflux\"\n\tvfs \"github.com/ethereum/go-ethereum/les/vflux/server\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/node\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/enr\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n\t\"github.com/ethereum/go-ethereum/params\"\n\t\"github.com/ethereum/go-ethereum/rpc\"\n)\n\nvar (\n\tserverSetup         = &nodestate.Setup{}\n\tclientPeerField     = serverSetup.NewField(\"clientPeer\", reflect.TypeOf(&clientPeer{}))\n\tclientInfoField     = serverSetup.NewField(\"clientInfo\", reflect.TypeOf(&clientInfo{}))\n\tconnAddressField    = serverSetup.NewField(\"connAddr\", reflect.TypeOf(\"\"))\n\tbalanceTrackerSetup = vfs.NewBalanceTrackerSetup(serverSetup)\n\tpriorityPoolSetup   = vfs.NewPriorityPoolSetup(serverSetup)\n)\n\nfunc init() {\n\tbalanceTrackerSetup.Connect(connAddressField, priorityPoolSetup.CapacityField)\n\tpriorityPoolSetup.Connect(balanceTrackerSetup.BalanceField, balanceTrackerSetup.UpdateFlag) // NodeBalance implements nodePriority\n}\n\ntype ethBackend interface {\n\tArchiveMode() bool\n\tBlockChain() *core.BlockChain\n\tBloomIndexer() *core.ChainIndexer\n\tChainDb() ethdb.Database\n\tSynced() bool\n\tTxPool() *core.TxPool\n}\n\ntype LesServer struct {\n\tlesCommons\n\n\tns          *nodestate.NodeStateMachine\n\tarchiveMode bool // Flag whether the ethereum node runs in archive mode.\n\thandler     *serverHandler\n\tbroadcaster *broadcaster\n\tvfluxServer *vfs.Server\n\tprivateKey  *ecdsa.PrivateKey\n\n\t// Flow control and capacity management\n\tfcManager    *flowcontrol.ClientManager\n\tcostTracker  *costTracker\n\tdefParams    flowcontrol.ServerParams\n\tservingQueue *servingQueue\n\tclientPool   *clientPool\n\n\tminCapacity, maxCapacity uint64\n\tthreadsIdle              int // Request serving threads count when system is idle.\n\tthreadsBusy              int // Request serving threads count when system is busy(block insertion).\n\n\tp2pSrv *p2p.Server\n}\n\nfunc NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*LesServer, error) {\n\tlesDb, err := node.OpenDatabase(\"les.server\", 0, 0, \"eth/db/lesserver/\", false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tns := nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup)\n\t// Calculate the number of threads used to service the light client\n\t// requests based on the user-specified value.\n\tthreads := config.LightServ * 4 / 100\n\tif threads < 4 {\n\t\tthreads = 4\n\t}\n\tsrv := &LesServer{\n\t\tlesCommons: lesCommons{\n\t\t\tgenesis:          e.BlockChain().Genesis().Hash(),\n\t\t\tconfig:           config,\n\t\t\tchainConfig:      e.BlockChain().Config(),\n\t\t\tiConfig:          light.DefaultServerIndexerConfig,\n\t\t\tchainDb:          e.ChainDb(),\n\t\t\tlesDb:            lesDb,\n\t\t\tchainReader:      e.BlockChain(),\n\t\t\tchtIndexer:       light.NewChtIndexer(e.ChainDb(), nil, params.CHTFrequency, params.HelperTrieProcessConfirmations, true),\n\t\t\tbloomTrieIndexer: light.NewBloomTrieIndexer(e.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency, true),\n\t\t\tcloseCh:          make(chan struct{}),\n\t\t},\n\t\tns:           ns,\n\t\tarchiveMode:  e.ArchiveMode(),\n\t\tbroadcaster:  newBroadcaster(ns),\n\t\tvfluxServer:  vfs.NewServer(time.Millisecond * 10),\n\t\tfcManager:    flowcontrol.NewClientManager(nil, &mclock.System{}),\n\t\tservingQueue: newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100),\n\t\tthreadsBusy:  config.LightServ/100 + 1,\n\t\tthreadsIdle:  threads,\n\t\tp2pSrv:       node.Server(),\n\t}\n\tsrv.vfluxServer.Register(srv)\n\tissync := e.Synced\n\tif config.LightNoSyncServe {\n\t\tissync = func() bool { return true }\n\t}\n\tsrv.handler = newServerHandler(srv, e.BlockChain(), e.ChainDb(), e.TxPool(), issync)\n\tsrv.costTracker, srv.minCapacity = newCostTracker(e.ChainDb(), config)\n\tsrv.oracle = srv.setupOracle(node, e.BlockChain().Genesis().Hash(), config)\n\n\t// Initialize the bloom trie indexer.\n\te.BloomIndexer().AddChildIndexer(srv.bloomTrieIndexer)\n\n\t// Initialize server capacity management fields.\n\tsrv.defParams = flowcontrol.ServerParams{\n\t\tBufLimit:    srv.minCapacity * bufLimitRatio,\n\t\tMinRecharge: srv.minCapacity,\n\t}\n\t// LES flow control tries to more or less guarantee the possibility for the\n\t// clients to send a certain amount of requests at any time and get a quick\n\t// response. Most of the clients want this guarantee but don't actually need\n\t// to send requests most of the time. Our goal is to serve as many clients as\n\t// possible while the actually used server capacity does not exceed the limits\n\ttotalRecharge := srv.costTracker.totalRecharge()\n\tsrv.maxCapacity = srv.minCapacity * uint64(srv.config.LightPeers)\n\tif totalRecharge > srv.maxCapacity {\n\t\tsrv.maxCapacity = totalRecharge\n\t}\n\tsrv.fcManager.SetCapacityLimits(srv.minCapacity, srv.maxCapacity, srv.minCapacity*2)\n\tsrv.clientPool = newClientPool(ns, lesDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, srv.dropClient, issync)\n\tsrv.clientPool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1})\n\n\tcheckpoint := srv.latestLocalCheckpoint()\n\tif !checkpoint.Empty() {\n\t\tlog.Info(\"Loaded latest checkpoint\", \"section\", checkpoint.SectionIndex, \"head\", checkpoint.SectionHead,\n\t\t\t\"chtroot\", checkpoint.CHTRoot, \"bloomroot\", checkpoint.BloomRoot)\n\t}\n\tsrv.chtIndexer.Start(e.BlockChain())\n\n\tnode.RegisterProtocols(srv.Protocols())\n\tnode.RegisterAPIs(srv.APIs())\n\tnode.RegisterLifecycle(srv)\n\n\t// disconnect all peers at nsm shutdown\n\tns.SubscribeField(clientPeerField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {\n\t\tif state.Equals(serverSetup.OfflineFlag()) && oldValue != nil {\n\t\t\toldValue.(*clientPeer).Peer.Disconnect(p2p.DiscRequested)\n\t\t}\n\t})\n\tns.Start()\n\treturn srv, nil\n}\n\nfunc (s *LesServer) APIs() []rpc.API {\n\treturn []rpc.API{\n\t\t{\n\t\t\tNamespace: \"les\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   NewPrivateLightAPI(&s.lesCommons),\n\t\t\tPublic:    false,\n\t\t},\n\t\t{\n\t\t\tNamespace: \"les\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   NewPrivateLightServerAPI(s),\n\t\t\tPublic:    false,\n\t\t},\n\t\t{\n\t\t\tNamespace: \"debug\",\n\t\t\tVersion:   \"1.0\",\n\t\t\tService:   NewPrivateDebugAPI(s),\n\t\t\tPublic:    false,\n\t\t},\n\t}\n}\n\nfunc (s *LesServer) Protocols() []p2p.Protocol {\n\tps := s.makeProtocols(ServerProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} {\n\t\tif p := s.getClient(id); p != nil {\n\t\t\treturn p.Info()\n\t\t}\n\t\treturn nil\n\t}, nil)\n\t// Add \"les\" ENR entries.\n\tfor i := range ps {\n\t\tps[i].Attributes = []enr.Entry{&lesEntry{\n\t\t\tVfxVersion: 1,\n\t\t}}\n\t}\n\treturn ps\n}\n\n// Start starts the LES server\nfunc (s *LesServer) Start() error {\n\ts.privateKey = s.p2pSrv.PrivateKey\n\ts.broadcaster.setSignerKey(s.privateKey)\n\ts.handler.start()\n\ts.wg.Add(1)\n\tgo s.capacityManagement()\n\tif s.p2pSrv.DiscV5 != nil {\n\t\ts.p2pSrv.DiscV5.RegisterTalkHandler(\"vfx\", s.vfluxServer.ServeEncoded)\n\t}\n\treturn nil\n}\n\n// Stop stops the LES service\nfunc (s *LesServer) Stop() error {\n\tclose(s.closeCh)\n\n\ts.clientPool.stop()\n\ts.ns.Stop()\n\ts.fcManager.Stop()\n\ts.costTracker.stop()\n\ts.handler.stop()\n\ts.servingQueue.stop()\n\ts.vfluxServer.Stop()\n\n\t// Note, bloom trie indexer is closed by parent bloombits indexer.\n\ts.chtIndexer.Close()\n\ts.lesDb.Close()\n\ts.wg.Wait()\n\tlog.Info(\"Les server stopped\")\n\n\treturn nil\n}\n\n// capacityManagement starts an event handler loop that updates the recharge curve of\n// the client manager and adjusts the client pool's size according to the total\n// capacity updates coming from the client manager\nfunc (s *LesServer) capacityManagement() {\n\tdefer s.wg.Done()\n\n\tprocessCh := make(chan bool, 100)\n\tsub := s.handler.blockchain.SubscribeBlockProcessingEvent(processCh)\n\tdefer sub.Unsubscribe()\n\n\ttotalRechargeCh := make(chan uint64, 100)\n\ttotalRecharge := s.costTracker.subscribeTotalRecharge(totalRechargeCh)\n\n\ttotalCapacityCh := make(chan uint64, 100)\n\ttotalCapacity := s.fcManager.SubscribeTotalCapacity(totalCapacityCh)\n\ts.clientPool.setLimits(s.config.LightPeers, totalCapacity)\n\n\tvar (\n\t\tbusy         bool\n\t\tfreePeers    uint64\n\t\tblockProcess mclock.AbsTime\n\t)\n\tupdateRecharge := func() {\n\t\tif busy {\n\t\t\ts.servingQueue.setThreads(s.threadsBusy)\n\t\t\ts.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge, totalRecharge}})\n\t\t} else {\n\t\t\ts.servingQueue.setThreads(s.threadsIdle)\n\t\t\ts.fcManager.SetRechargeCurve(flowcontrol.PieceWiseLinear{{0, 0}, {totalRecharge / 10, totalRecharge}, {totalRecharge, totalRecharge}})\n\t\t}\n\t}\n\tupdateRecharge()\n\n\tfor {\n\t\tselect {\n\t\tcase busy = <-processCh:\n\t\t\tif busy {\n\t\t\t\tblockProcess = mclock.Now()\n\t\t\t} else {\n\t\t\t\tblockProcessingTimer.Update(time.Duration(mclock.Now() - blockProcess))\n\t\t\t}\n\t\t\tupdateRecharge()\n\t\tcase totalRecharge = <-totalRechargeCh:\n\t\t\ttotalRechargeGauge.Update(int64(totalRecharge))\n\t\t\tupdateRecharge()\n\t\tcase totalCapacity = <-totalCapacityCh:\n\t\t\ttotalCapacityGauge.Update(int64(totalCapacity))\n\t\t\tnewFreePeers := totalCapacity / s.minCapacity\n\t\t\tif newFreePeers < freePeers && newFreePeers < uint64(s.config.LightPeers) {\n\t\t\t\tlog.Warn(\"Reduced free peer connections\", \"from\", freePeers, \"to\", newFreePeers)\n\t\t\t}\n\t\t\tfreePeers = newFreePeers\n\t\t\ts.clientPool.setLimits(s.config.LightPeers, totalCapacity)\n\t\tcase <-s.closeCh:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (s *LesServer) getClient(id enode.ID) *clientPeer {\n\tif node := s.ns.GetNode(id); node != nil {\n\t\tif p, ok := s.ns.GetField(node, clientPeerField).(*clientPeer); ok {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *LesServer) dropClient(id enode.ID) {\n\tif p := s.getClient(id); p != nil {\n\t\tp.Peer.Disconnect(p2p.DiscRequested)\n\t}\n}\n\n// ServiceInfo implements vfs.Service\nfunc (s *LesServer) ServiceInfo() (string, string) {\n\treturn \"les\", \"Ethereum light client service\"\n}\n\n// Handle implements vfs.Service\nfunc (s *LesServer) Handle(id enode.ID, address string, name string, data []byte) []byte {\n\tswitch name {\n\tcase vflux.CapacityQueryName:\n\t\treturn s.clientPool.serveCapQuery(id, address, data)\n\tdefault:\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "les/server_handler.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"crypto/ecdsa\"\n\t\"errors\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/forkid\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\tvfs \"github.com/ethereum/go-ethereum/les/vflux/server\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/metrics\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\nconst (\n\tsoftResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.\n\testHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header\n\tethVersion        = 64              // equivalent eth version for the downloader\n\n\tMaxHeaderFetch           = 192 // Amount of block headers to be fetched per retrieval request\n\tMaxBodyFetch             = 32  // Amount of block bodies to be fetched per retrieval request\n\tMaxReceiptFetch          = 128 // Amount of transaction receipts to allow fetching per request\n\tMaxCodeFetch             = 64  // Amount of contract codes to allow fetching per request\n\tMaxProofsFetch           = 64  // Amount of merkle proofs to be fetched per retrieval request\n\tMaxHelperTrieProofsFetch = 64  // Amount of helper tries to be fetched per retrieval request\n\tMaxTxSend                = 64  // Amount of transactions to be send per request\n\tMaxTxStatus              = 256 // Amount of transactions to queried per request\n)\n\nvar (\n\terrTooManyInvalidRequest = errors.New(\"too many invalid requests made\")\n\terrFullClientPool        = errors.New(\"client pool is full\")\n)\n\n// serverHandler is responsible for serving light client and process\n// all incoming light requests.\ntype serverHandler struct {\n\tforkFilter forkid.Filter\n\tblockchain *core.BlockChain\n\tchainDb    ethdb.Database\n\ttxpool     *core.TxPool\n\tserver     *LesServer\n\n\tcloseCh chan struct{}  // Channel used to exit all background routines of handler.\n\twg      sync.WaitGroup // WaitGroup used to track all background routines of handler.\n\tsynced  func() bool    // Callback function used to determine whether local node is synced.\n\n\t// Testing fields\n\taddTxsSync bool\n}\n\nfunc newServerHandler(server *LesServer, blockchain *core.BlockChain, chainDb ethdb.Database, txpool *core.TxPool, synced func() bool) *serverHandler {\n\thandler := &serverHandler{\n\t\tforkFilter: forkid.NewFilter(blockchain),\n\t\tserver:     server,\n\t\tblockchain: blockchain,\n\t\tchainDb:    chainDb,\n\t\ttxpool:     txpool,\n\t\tcloseCh:    make(chan struct{}),\n\t\tsynced:     synced,\n\t}\n\treturn handler\n}\n\n// start starts the server handler.\nfunc (h *serverHandler) start() {\n\th.wg.Add(1)\n\tgo h.broadcastLoop()\n}\n\n// stop stops the server handler.\nfunc (h *serverHandler) stop() {\n\tclose(h.closeCh)\n\th.wg.Wait()\n}\n\n// runPeer is the p2p protocol run function for the given version.\nfunc (h *serverHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) error {\n\tpeer := newClientPeer(int(version), h.server.config.NetworkId, p, newMeteredMsgWriter(rw, int(version)))\n\tdefer peer.close()\n\th.wg.Add(1)\n\tdefer h.wg.Done()\n\treturn h.handle(peer)\n}\n\nfunc (h *serverHandler) handle(p *clientPeer) error {\n\tp.Log().Debug(\"Light Ethereum peer connected\", \"name\", p.Name())\n\n\t// Execute the LES handshake\n\tvar (\n\t\thead   = h.blockchain.CurrentHeader()\n\t\thash   = head.Hash()\n\t\tnumber = head.Number.Uint64()\n\t\ttd     = h.blockchain.GetTd(hash, number)\n\t\tforkID = forkid.NewID(h.blockchain.Config(), h.blockchain.Genesis().Hash(), h.blockchain.CurrentBlock().NumberU64())\n\t)\n\tif err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), forkID, h.forkFilter, h.server); err != nil {\n\t\tp.Log().Debug(\"Light Ethereum handshake failed\", \"err\", err)\n\t\treturn err\n\t}\n\t// Reject the duplicated peer, otherwise register it to peerset.\n\tvar registered bool\n\tif err := h.server.ns.Operation(func() {\n\t\tif h.server.ns.GetField(p.Node(), clientPeerField) != nil {\n\t\t\tregistered = true\n\t\t} else {\n\t\t\th.server.ns.SetFieldSub(p.Node(), clientPeerField, p)\n\t\t}\n\t}); err != nil {\n\t\treturn err\n\t}\n\tif registered {\n\t\treturn errAlreadyRegistered\n\t}\n\n\tdefer func() {\n\t\th.server.ns.SetField(p.Node(), clientPeerField, nil)\n\t\tif p.fcClient != nil { // is nil when connecting another server\n\t\t\tp.fcClient.Disconnect()\n\t\t}\n\t}()\n\tif p.server {\n\t\t// connected to another server, no messages expected, just wait for disconnection\n\t\t_, err := p.rw.ReadMsg()\n\t\treturn err\n\t}\n\t// Reject light clients if server is not synced.\n\t//\n\t// Put this checking here, so that \"non-synced\" les-server peers are still allowed\n\t// to keep the connection.\n\tif !h.synced() {\n\t\tp.Log().Debug(\"Light server not synced, rejecting peer\")\n\t\treturn p2p.DiscRequested\n\t}\n\t// Disconnect the inbound peer if it's rejected by clientPool\n\tif cap, err := h.server.clientPool.connect(p); cap != p.fcParams.MinRecharge || err != nil {\n\t\tp.Log().Debug(\"Light Ethereum peer rejected\", \"err\", errFullClientPool)\n\t\treturn errFullClientPool\n\t}\n\tp.balance, _ = h.server.ns.GetField(p.Node(), h.server.clientPool.BalanceField).(*vfs.NodeBalance)\n\tif p.balance == nil {\n\t\treturn p2p.DiscRequested\n\t}\n\tactiveCount, _ := h.server.clientPool.pp.Active()\n\tclientConnectionGauge.Update(int64(activeCount))\n\n\tvar wg sync.WaitGroup // Wait group used to track all in-flight task routines.\n\n\tconnectedAt := mclock.Now()\n\tdefer func() {\n\t\twg.Wait() // Ensure all background task routines have exited.\n\t\th.server.clientPool.disconnect(p)\n\t\tp.balance = nil\n\t\tactiveCount, _ := h.server.clientPool.pp.Active()\n\t\tclientConnectionGauge.Update(int64(activeCount))\n\t\tconnectionTimer.Update(time.Duration(mclock.Now() - connectedAt))\n\t}()\n\t// Mark the peer starts to be served.\n\tatomic.StoreUint32(&p.serving, 1)\n\tdefer atomic.StoreUint32(&p.serving, 0)\n\n\t// Spawn a main loop to handle all incoming messages.\n\tfor {\n\t\tselect {\n\t\tcase err := <-p.errCh:\n\t\t\tp.Log().Debug(\"Failed to send light ethereum response\", \"err\", err)\n\t\t\treturn err\n\t\tdefault:\n\t\t}\n\t\tif err := h.handleMsg(p, &wg); err != nil {\n\t\t\tp.Log().Debug(\"Light Ethereum message handling failed\", \"err\", err)\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n// beforeHandle will do a series of prechecks before handling message.\nfunc (h *serverHandler) beforeHandle(p *clientPeer, reqID, responseCount uint64, msg p2p.Msg, reqCnt uint64, maxCount uint64) (*servingTask, uint64) {\n\t// Ensure that the request sent by client peer is valid\n\tinSizeCost := h.server.costTracker.realCost(0, msg.Size, 0)\n\tif reqCnt == 0 || reqCnt > maxCount {\n\t\tp.fcClient.OneTimeCost(inSizeCost)\n\t\treturn nil, 0\n\t}\n\t// Ensure that the client peer complies with the flow control\n\t// rules agreed by both sides.\n\tif p.isFrozen() {\n\t\tp.fcClient.OneTimeCost(inSizeCost)\n\t\treturn nil, 0\n\t}\n\tmaxCost := p.fcCosts.getMaxCost(msg.Code, reqCnt)\n\taccepted, bufShort, priority := p.fcClient.AcceptRequest(reqID, responseCount, maxCost)\n\tif !accepted {\n\t\tp.freeze()\n\t\tp.Log().Error(\"Request came too early\", \"remaining\", common.PrettyDuration(time.Duration(bufShort*1000000/p.fcParams.MinRecharge)))\n\t\tp.fcClient.OneTimeCost(inSizeCost)\n\t\treturn nil, 0\n\t}\n\t// Create a multi-stage task, estimate the time it takes for the task to\n\t// execute, and cache it in the request service queue.\n\tfactor := h.server.costTracker.globalFactor()\n\tif factor < 0.001 {\n\t\tfactor = 1\n\t\tp.Log().Error(\"Invalid global cost factor\", \"factor\", factor)\n\t}\n\tmaxTime := uint64(float64(maxCost) / factor)\n\ttask := h.server.servingQueue.newTask(p, maxTime, priority)\n\tif !task.start() {\n\t\tp.fcClient.RequestProcessed(reqID, responseCount, maxCost, inSizeCost)\n\t\treturn nil, 0\n\t}\n\treturn task, maxCost\n}\n\n// Afterhandle will perform a series of operations after message handling,\n// such as updating flow control data, sending reply, etc.\nfunc (h *serverHandler) afterHandle(p *clientPeer, reqID, responseCount uint64, msg p2p.Msg, maxCost uint64, reqCnt uint64, task *servingTask, reply *reply) {\n\tif reply != nil {\n\t\ttask.done()\n\t}\n\tp.responseLock.Lock()\n\tdefer p.responseLock.Unlock()\n\n\t// Short circuit if the client is already frozen.\n\tif p.isFrozen() {\n\t\trealCost := h.server.costTracker.realCost(task.servingTime, msg.Size, 0)\n\t\tp.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)\n\t\treturn\n\t}\n\t// Positive correction buffer value with real cost.\n\tvar replySize uint32\n\tif reply != nil {\n\t\treplySize = reply.size()\n\t}\n\tvar realCost uint64\n\tif h.server.costTracker.testing {\n\t\trealCost = maxCost // Assign a fake cost for testing purpose\n\t} else {\n\t\trealCost = h.server.costTracker.realCost(task.servingTime, msg.Size, replySize)\n\t\tif realCost > maxCost {\n\t\t\trealCost = maxCost\n\t\t}\n\t}\n\tbv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost)\n\tif reply != nil {\n\t\t// Feed cost tracker request serving statistic.\n\t\th.server.costTracker.updateStats(msg.Code, reqCnt, task.servingTime, realCost)\n\t\t// Reduce priority \"balance\" for the specific peer.\n\t\tp.balance.RequestServed(realCost)\n\t\tp.queueSend(func() {\n\t\t\tif err := reply.send(bv); err != nil {\n\t\t\t\tselect {\n\t\t\t\tcase p.errCh <- err:\n\t\t\t\tdefault:\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// handleMsg is invoked whenever an inbound message is received from a remote\n// peer. The remote connection is torn down upon returning any error.\nfunc (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error {\n\t// Read the next message from the remote peer, and ensure it's fully consumed\n\tmsg, err := p.rw.ReadMsg()\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.Log().Trace(\"Light Ethereum message arrived\", \"code\", msg.Code, \"bytes\", msg.Size)\n\n\t// Discard large message which exceeds the limitation.\n\tif msg.Size > ProtocolMaxMsgSize {\n\t\tclientErrorMeter.Mark(1)\n\t\treturn errResp(ErrMsgTooLarge, \"%v > %v\", msg.Size, ProtocolMaxMsgSize)\n\t}\n\tdefer msg.Discard()\n\n\t// Lookup the request handler table, ensure it's supported\n\t// message type by the protocol.\n\treq, ok := Les3[msg.Code]\n\tif !ok {\n\t\tp.Log().Trace(\"Received invalid message\", \"code\", msg.Code)\n\t\tclientErrorMeter.Mark(1)\n\t\treturn errResp(ErrInvalidMsgCode, \"%v\", msg.Code)\n\t}\n\tp.Log().Trace(\"Received \" + req.Name)\n\n\t// Decode the p2p message, resolve the concrete handler for it.\n\tserve, reqID, reqCnt, err := req.Handle(msg)\n\tif err != nil {\n\t\tclientErrorMeter.Mark(1)\n\t\treturn errResp(ErrDecode, \"%v: %v\", msg, err)\n\t}\n\tif metrics.EnabledExpensive {\n\t\treq.InPacketsMeter.Mark(1)\n\t\treq.InTrafficMeter.Mark(int64(msg.Size))\n\t}\n\tp.responseCount++\n\tresponseCount := p.responseCount\n\n\t// First check this client message complies all rules before\n\t// handling it and return a processor if all checks are passed.\n\ttask, maxCost := h.beforeHandle(p, reqID, responseCount, msg, reqCnt, req.MaxCount)\n\tif task == nil {\n\t\treturn nil\n\t}\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\treply := serve(h, p, task.waitOrStop)\n\t\th.afterHandle(p, reqID, responseCount, msg, maxCost, reqCnt, task, reply)\n\n\t\tif metrics.EnabledExpensive {\n\t\t\tsize := uint32(0)\n\t\t\tif reply != nil {\n\t\t\t\tsize = reply.size()\n\t\t\t}\n\t\t\treq.OutPacketsMeter.Mark(1)\n\t\t\treq.OutTrafficMeter.Mark(int64(size))\n\t\t\treq.ServingTimeMeter.Update(time.Duration(task.servingTime))\n\t\t}\n\t}()\n\t// If the client has made too much invalid request(e.g. request a non-existent data),\n\t// reject them to prevent SPAM attack.\n\tif p.getInvalid() > maxRequestErrors {\n\t\tclientErrorMeter.Mark(1)\n\t\treturn errTooManyInvalidRequest\n\t}\n\treturn nil\n}\n\n// BlockChain implements serverBackend\nfunc (h *serverHandler) BlockChain() *core.BlockChain {\n\treturn h.blockchain\n}\n\n// TxPool implements serverBackend\nfunc (h *serverHandler) TxPool() *core.TxPool {\n\treturn h.txpool\n}\n\n// ArchiveMode implements serverBackend\nfunc (h *serverHandler) ArchiveMode() bool {\n\treturn h.server.archiveMode\n}\n\n// AddTxsSync implements serverBackend\nfunc (h *serverHandler) AddTxsSync() bool {\n\treturn h.addTxsSync\n}\n\n// getAccount retrieves an account from the state based on root.\nfunc getAccount(triedb *trie.Database, root, hash common.Hash) (state.Account, error) {\n\ttrie, err := trie.New(root, triedb)\n\tif err != nil {\n\t\treturn state.Account{}, err\n\t}\n\tblob, err := trie.TryGet(hash[:])\n\tif err != nil {\n\t\treturn state.Account{}, err\n\t}\n\tvar account state.Account\n\tif err = rlp.DecodeBytes(blob, &account); err != nil {\n\t\treturn state.Account{}, err\n\t}\n\treturn account, nil\n}\n\n// getHelperTrie returns the post-processed trie root for the given trie ID and section index\nfunc (h *serverHandler) GetHelperTrie(typ uint, index uint64) *trie.Trie {\n\tvar (\n\t\troot   common.Hash\n\t\tprefix string\n\t)\n\tswitch typ {\n\tcase htCanonical:\n\t\tsectionHead := rawdb.ReadCanonicalHash(h.chainDb, (index+1)*h.server.iConfig.ChtSize-1)\n\t\troot, prefix = light.GetChtRoot(h.chainDb, index, sectionHead), light.ChtTablePrefix\n\tcase htBloomBits:\n\t\tsectionHead := rawdb.ReadCanonicalHash(h.chainDb, (index+1)*h.server.iConfig.BloomTrieSize-1)\n\t\troot, prefix = light.GetBloomTrieRoot(h.chainDb, index, sectionHead), light.BloomTrieTablePrefix\n\t}\n\tif root == (common.Hash{}) {\n\t\treturn nil\n\t}\n\ttrie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix)))\n\treturn trie\n}\n\n// broadcastLoop broadcasts new block information to all connected light\n// clients. According to the agreement between client and server, server should\n// only broadcast new announcement if the total difficulty is higher than the\n// last one. Besides server will add the signature if client requires.\nfunc (h *serverHandler) broadcastLoop() {\n\tdefer h.wg.Done()\n\n\theadCh := make(chan core.ChainHeadEvent, 10)\n\theadSub := h.blockchain.SubscribeChainHeadEvent(headCh)\n\tdefer headSub.Unsubscribe()\n\n\tvar (\n\t\tlastHead *types.Header\n\t\tlastTd   = common.Big0\n\t)\n\tfor {\n\t\tselect {\n\t\tcase ev := <-headCh:\n\t\t\theader := ev.Block.Header()\n\t\t\thash, number := header.Hash(), header.Number.Uint64()\n\t\t\ttd := h.blockchain.GetTd(hash, number)\n\t\t\tif td == nil || td.Cmp(lastTd) <= 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar reorg uint64\n\t\t\tif lastHead != nil {\n\t\t\t\treorg = lastHead.Number.Uint64() - rawdb.FindCommonAncestor(h.chainDb, header, lastHead).Number.Uint64()\n\t\t\t}\n\t\t\tlastHead, lastTd = header, td\n\t\t\tlog.Debug(\"Announcing block to peers\", \"number\", number, \"hash\", hash, \"td\", td, \"reorg\", reorg)\n\t\t\th.server.broadcaster.broadcast(announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg})\n\t\tcase <-h.closeCh:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// broadcaster sends new header announcements to active client peers\ntype broadcaster struct {\n\tns                           *nodestate.NodeStateMachine\n\tprivateKey                   *ecdsa.PrivateKey\n\tlastAnnounce, signedAnnounce announceData\n}\n\n// newBroadcaster creates a new broadcaster\nfunc newBroadcaster(ns *nodestate.NodeStateMachine) *broadcaster {\n\tb := &broadcaster{ns: ns}\n\tns.SubscribeState(priorityPoolSetup.ActiveFlag, func(node *enode.Node, oldState, newState nodestate.Flags) {\n\t\tif newState.Equals(priorityPoolSetup.ActiveFlag) {\n\t\t\t// send last announcement to activated peers\n\t\t\tb.sendTo(node)\n\t\t}\n\t})\n\treturn b\n}\n\n// setSignerKey sets the signer key for signed announcements. Should be called before\n// starting the protocol handler.\nfunc (b *broadcaster) setSignerKey(privateKey *ecdsa.PrivateKey) {\n\tb.privateKey = privateKey\n}\n\n// broadcast sends the given announcements to all active peers\nfunc (b *broadcaster) broadcast(announce announceData) {\n\tb.ns.Operation(func() {\n\t\t// iterate in an Operation to ensure that the active set does not change while iterating\n\t\tb.lastAnnounce = announce\n\t\tb.ns.ForEach(priorityPoolSetup.ActiveFlag, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {\n\t\t\tb.sendTo(node)\n\t\t})\n\t})\n}\n\n// sendTo sends the most recent announcement to the given node unless the same or higher Td\n// announcement has already been sent.\nfunc (b *broadcaster) sendTo(node *enode.Node) {\n\tif b.lastAnnounce.Td == nil {\n\t\treturn\n\t}\n\tif p, _ := b.ns.GetField(node, clientPeerField).(*clientPeer); p != nil {\n\t\tif p.headInfo.Td == nil || b.lastAnnounce.Td.Cmp(p.headInfo.Td) > 0 {\n\t\t\tannounce := b.lastAnnounce\n\t\t\tswitch p.announceType {\n\t\t\tcase announceTypeSimple:\n\t\t\t\tif !p.queueSend(func() { p.sendAnnounce(announce) }) {\n\t\t\t\t\tlog.Debug(\"Drop announcement because queue is full\", \"number\", announce.Number, \"hash\", announce.Hash)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Debug(\"Sent announcement\", \"number\", announce.Number, \"hash\", announce.Hash)\n\t\t\t\t}\n\t\t\tcase announceTypeSigned:\n\t\t\t\tif b.signedAnnounce.Hash != b.lastAnnounce.Hash {\n\t\t\t\t\tb.signedAnnounce = b.lastAnnounce\n\t\t\t\t\tb.signedAnnounce.sign(b.privateKey)\n\t\t\t\t}\n\t\t\t\tannounce := b.signedAnnounce\n\t\t\t\tif !p.queueSend(func() { p.sendAnnounce(announce) }) {\n\t\t\t\t\tlog.Debug(\"Drop announcement because queue is full\", \"number\", announce.Number, \"hash\", announce.Hash)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Debug(\"Sent announcement\", \"number\", announce.Number, \"hash\", announce.Hash)\n\t\t\t\t}\n\t\t\t}\n\t\t\tp.headInfo = blockInfo{b.lastAnnounce.Hash, b.lastAnnounce.Number, b.lastAnnounce.Td}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "les/server_requests.go",
    "content": "// Copyright 2021 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"encoding/binary\"\n\t\"encoding/json\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/metrics\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n\t\"github.com/ethereum/go-ethereum/trie\"\n)\n\n// serverBackend defines the backend functions needed for serving LES requests\ntype serverBackend interface {\n\tArchiveMode() bool\n\tAddTxsSync() bool\n\tBlockChain() *core.BlockChain\n\tTxPool() *core.TxPool\n\tGetHelperTrie(typ uint, index uint64) *trie.Trie\n}\n\n// Decoder is implemented by the messages passed to the handler functions\ntype Decoder interface {\n\tDecode(val interface{}) error\n}\n\n// RequestType is a static struct that describes an LES request type and references\n// its handler function.\ntype RequestType struct {\n\tName                                                             string\n\tMaxCount                                                         uint64\n\tInPacketsMeter, InTrafficMeter, OutPacketsMeter, OutTrafficMeter metrics.Meter\n\tServingTimeMeter                                                 metrics.Timer\n\tHandle                                                           func(msg Decoder) (serve serveRequestFn, reqID, amount uint64, err error)\n}\n\n// serveRequestFn is returned by the request handler functions after decoding the request.\n// This function does the actual request serving using the supplied backend. waitOrStop is\n// called between serving individual request items and may block if the serving process\n// needs to be throttled. If it returns false then the process is terminated.\n// The reply is not sent by this function yet. The flow control feedback value is supplied\n// by the protocol handler when calling the send function of the returned reply struct.\ntype serveRequestFn func(backend serverBackend, peer *clientPeer, waitOrStop func() bool) *reply\n\n// Les3 contains the request types supported by les/2 and les/3\nvar Les3 = map[uint64]RequestType{\n\tGetBlockHeadersMsg: {\n\t\tName:             \"block header request\",\n\t\tMaxCount:         MaxHeaderFetch,\n\t\tInPacketsMeter:   miscInHeaderPacketsMeter,\n\t\tInTrafficMeter:   miscInHeaderTrafficMeter,\n\t\tOutPacketsMeter:  miscOutHeaderPacketsMeter,\n\t\tOutTrafficMeter:  miscOutHeaderTrafficMeter,\n\t\tServingTimeMeter: miscServingTimeHeaderTimer,\n\t\tHandle:           handleGetBlockHeaders,\n\t},\n\tGetBlockBodiesMsg: {\n\t\tName:             \"block bodies request\",\n\t\tMaxCount:         MaxBodyFetch,\n\t\tInPacketsMeter:   miscInBodyPacketsMeter,\n\t\tInTrafficMeter:   miscInBodyTrafficMeter,\n\t\tOutPacketsMeter:  miscOutBodyPacketsMeter,\n\t\tOutTrafficMeter:  miscOutBodyTrafficMeter,\n\t\tServingTimeMeter: miscServingTimeBodyTimer,\n\t\tHandle:           handleGetBlockBodies,\n\t},\n\tGetCodeMsg: {\n\t\tName:             \"code request\",\n\t\tMaxCount:         MaxCodeFetch,\n\t\tInPacketsMeter:   miscInCodePacketsMeter,\n\t\tInTrafficMeter:   miscInCodeTrafficMeter,\n\t\tOutPacketsMeter:  miscOutCodePacketsMeter,\n\t\tOutTrafficMeter:  miscOutCodeTrafficMeter,\n\t\tServingTimeMeter: miscServingTimeCodeTimer,\n\t\tHandle:           handleGetCode,\n\t},\n\tGetReceiptsMsg: {\n\t\tName:             \"receipts request\",\n\t\tMaxCount:         MaxReceiptFetch,\n\t\tInPacketsMeter:   miscInReceiptPacketsMeter,\n\t\tInTrafficMeter:   miscInReceiptTrafficMeter,\n\t\tOutPacketsMeter:  miscOutReceiptPacketsMeter,\n\t\tOutTrafficMeter:  miscOutReceiptTrafficMeter,\n\t\tServingTimeMeter: miscServingTimeReceiptTimer,\n\t\tHandle:           handleGetReceipts,\n\t},\n\tGetProofsV2Msg: {\n\t\tName:             \"les/2 proofs request\",\n\t\tMaxCount:         MaxProofsFetch,\n\t\tInPacketsMeter:   miscInTrieProofPacketsMeter,\n\t\tInTrafficMeter:   miscInTrieProofTrafficMeter,\n\t\tOutPacketsMeter:  miscOutTrieProofPacketsMeter,\n\t\tOutTrafficMeter:  miscOutTrieProofTrafficMeter,\n\t\tServingTimeMeter: miscServingTimeTrieProofTimer,\n\t\tHandle:           handleGetProofs,\n\t},\n\tGetHelperTrieProofsMsg: {\n\t\tName:             \"helper trie proof request\",\n\t\tMaxCount:         MaxHelperTrieProofsFetch,\n\t\tInPacketsMeter:   miscInHelperTriePacketsMeter,\n\t\tInTrafficMeter:   miscInHelperTrieTrafficMeter,\n\t\tOutPacketsMeter:  miscOutHelperTriePacketsMeter,\n\t\tOutTrafficMeter:  miscOutHelperTrieTrafficMeter,\n\t\tServingTimeMeter: miscServingTimeHelperTrieTimer,\n\t\tHandle:           handleGetHelperTrieProofs,\n\t},\n\tSendTxV2Msg: {\n\t\tName:             \"new transactions\",\n\t\tMaxCount:         MaxTxSend,\n\t\tInPacketsMeter:   miscInTxsPacketsMeter,\n\t\tInTrafficMeter:   miscInTxsTrafficMeter,\n\t\tOutPacketsMeter:  miscOutTxsPacketsMeter,\n\t\tOutTrafficMeter:  miscOutTxsTrafficMeter,\n\t\tServingTimeMeter: miscServingTimeTxTimer,\n\t\tHandle:           handleSendTx,\n\t},\n\tGetTxStatusMsg: {\n\t\tName:             \"transaction status query request\",\n\t\tMaxCount:         MaxTxStatus,\n\t\tInPacketsMeter:   miscInTxStatusPacketsMeter,\n\t\tInTrafficMeter:   miscInTxStatusTrafficMeter,\n\t\tOutPacketsMeter:  miscOutTxStatusPacketsMeter,\n\t\tOutTrafficMeter:  miscOutTxStatusTrafficMeter,\n\t\tServingTimeMeter: miscServingTimeTxStatusTimer,\n\t\tHandle:           handleGetTxStatus,\n\t},\n}\n\n// handleGetBlockHeaders handles a block header request\nfunc handleGetBlockHeaders(msg Decoder) (serveRequestFn, uint64, uint64, error) {\n\tvar r GetBlockHeadersPacket\n\tif err := msg.Decode(&r); err != nil {\n\t\treturn nil, 0, 0, err\n\t}\n\treturn func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {\n\t\t// Gather headers until the fetch or network limits is reached\n\t\tvar (\n\t\t\tbc              = backend.BlockChain()\n\t\t\thashMode        = r.Query.Origin.Hash != (common.Hash{})\n\t\t\tfirst           = true\n\t\t\tmaxNonCanonical = uint64(100)\n\t\t\tbytes           common.StorageSize\n\t\t\theaders         []*types.Header\n\t\t\tunknown         bool\n\t\t)\n\t\tfor !unknown && len(headers) < int(r.Query.Amount) && bytes < softResponseLimit {\n\t\t\tif !first && !waitOrStop() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// Retrieve the next header satisfying the r\n\t\t\tvar origin *types.Header\n\t\t\tif hashMode {\n\t\t\t\tif first {\n\t\t\t\t\torigin = bc.GetHeaderByHash(r.Query.Origin.Hash)\n\t\t\t\t\tif origin != nil {\n\t\t\t\t\t\tr.Query.Origin.Number = origin.Number.Uint64()\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\torigin = bc.GetHeader(r.Query.Origin.Hash, r.Query.Origin.Number)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\torigin = bc.GetHeaderByNumber(r.Query.Origin.Number)\n\t\t\t}\n\t\t\tif origin == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\theaders = append(headers, origin)\n\t\t\tbytes += estHeaderRlpSize\n\n\t\t\t// Advance to the next header of the r\n\t\t\tswitch {\n\t\t\tcase hashMode && r.Query.Reverse:\n\t\t\t\t// Hash based traversal towards the genesis block\n\t\t\t\tancestor := r.Query.Skip + 1\n\t\t\t\tif ancestor == 0 {\n\t\t\t\t\tunknown = true\n\t\t\t\t} else {\n\t\t\t\t\tr.Query.Origin.Hash, r.Query.Origin.Number = bc.GetAncestor(r.Query.Origin.Hash, r.Query.Origin.Number, ancestor, &maxNonCanonical)\n\t\t\t\t\tunknown = r.Query.Origin.Hash == common.Hash{}\n\t\t\t\t}\n\t\t\tcase hashMode && !r.Query.Reverse:\n\t\t\t\t// Hash based traversal towards the leaf block\n\t\t\t\tvar (\n\t\t\t\t\tcurrent = origin.Number.Uint64()\n\t\t\t\t\tnext    = current + r.Query.Skip + 1\n\t\t\t\t)\n\t\t\t\tif next <= current {\n\t\t\t\t\tinfos, _ := json.Marshal(p.Peer.Info())\n\t\t\t\t\tp.Log().Warn(\"GetBlockHeaders skip overflow attack\", \"current\", current, \"skip\", r.Query.Skip, \"next\", next, \"attacker\", string(infos))\n\t\t\t\t\tunknown = true\n\t\t\t\t} else {\n\t\t\t\t\tif header := bc.GetHeaderByNumber(next); header != nil {\n\t\t\t\t\t\tnextHash := header.Hash()\n\t\t\t\t\t\texpOldHash, _ := bc.GetAncestor(nextHash, next, r.Query.Skip+1, &maxNonCanonical)\n\t\t\t\t\t\tif expOldHash == r.Query.Origin.Hash {\n\t\t\t\t\t\t\tr.Query.Origin.Hash, r.Query.Origin.Number = nextHash, next\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tunknown = true\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tunknown = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase r.Query.Reverse:\n\t\t\t\t// Number based traversal towards the genesis block\n\t\t\t\tif r.Query.Origin.Number >= r.Query.Skip+1 {\n\t\t\t\t\tr.Query.Origin.Number -= r.Query.Skip + 1\n\t\t\t\t} else {\n\t\t\t\t\tunknown = true\n\t\t\t\t}\n\n\t\t\tcase !r.Query.Reverse:\n\t\t\t\t// Number based traversal towards the leaf block\n\t\t\t\tr.Query.Origin.Number += r.Query.Skip + 1\n\t\t\t}\n\t\t\tfirst = false\n\t\t}\n\t\treturn p.replyBlockHeaders(r.ReqID, headers)\n\t}, r.ReqID, r.Query.Amount, nil\n}\n\n// handleGetBlockBodies handles a block body request\nfunc handleGetBlockBodies(msg Decoder) (serveRequestFn, uint64, uint64, error) {\n\tvar r GetBlockBodiesPacket\n\tif err := msg.Decode(&r); err != nil {\n\t\treturn nil, 0, 0, err\n\t}\n\treturn func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {\n\t\tvar (\n\t\t\tbytes  int\n\t\t\tbodies []rlp.RawValue\n\t\t)\n\t\tbc := backend.BlockChain()\n\t\tfor i, hash := range r.Hashes {\n\t\t\tif i != 0 && !waitOrStop() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif bytes >= softResponseLimit {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tbody := bc.GetBodyRLP(hash)\n\t\t\tif body == nil {\n\t\t\t\tp.bumpInvalid()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbodies = append(bodies, body)\n\t\t\tbytes += len(body)\n\t\t}\n\t\treturn p.replyBlockBodiesRLP(r.ReqID, bodies)\n\t}, r.ReqID, uint64(len(r.Hashes)), nil\n}\n\n// handleGetCode handles a contract code request\nfunc handleGetCode(msg Decoder) (serveRequestFn, uint64, uint64, error) {\n\tvar r GetCodePacket\n\tif err := msg.Decode(&r); err != nil {\n\t\treturn nil, 0, 0, err\n\t}\n\treturn func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {\n\t\tvar (\n\t\t\tbytes int\n\t\t\tdata  [][]byte\n\t\t)\n\t\tbc := backend.BlockChain()\n\t\tfor i, request := range r.Reqs {\n\t\t\tif i != 0 && !waitOrStop() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// Look up the root hash belonging to the request\n\t\t\theader := bc.GetHeaderByHash(request.BHash)\n\t\t\tif header == nil {\n\t\t\t\tp.Log().Warn(\"Failed to retrieve associate header for code\", \"hash\", request.BHash)\n\t\t\t\tp.bumpInvalid()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Refuse to search stale state data in the database since looking for\n\t\t\t// a non-exist key is kind of expensive.\n\t\t\tlocal := bc.CurrentHeader().Number.Uint64()\n\t\t\tif !backend.ArchiveMode() && header.Number.Uint64()+core.TriesInMemory <= local {\n\t\t\t\tp.Log().Debug(\"Reject stale code request\", \"number\", header.Number.Uint64(), \"head\", local)\n\t\t\t\tp.bumpInvalid()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttriedb := bc.StateCache().TrieDB()\n\n\t\t\taccount, err := getAccount(triedb, header.Root, common.BytesToHash(request.AccKey))\n\t\t\tif err != nil {\n\t\t\t\tp.Log().Warn(\"Failed to retrieve account for code\", \"block\", header.Number, \"hash\", header.Hash(), \"account\", common.BytesToHash(request.AccKey), \"err\", err)\n\t\t\t\tp.bumpInvalid()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcode, err := bc.StateCache().ContractCode(common.BytesToHash(request.AccKey), common.BytesToHash(account.CodeHash))\n\t\t\tif err != nil {\n\t\t\t\tp.Log().Warn(\"Failed to retrieve account code\", \"block\", header.Number, \"hash\", header.Hash(), \"account\", common.BytesToHash(request.AccKey), \"codehash\", common.BytesToHash(account.CodeHash), \"err\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Accumulate the code and abort if enough data was retrieved\n\t\t\tdata = append(data, code)\n\t\t\tif bytes += len(code); bytes >= softResponseLimit {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treturn p.replyCode(r.ReqID, data)\n\t}, r.ReqID, uint64(len(r.Reqs)), nil\n}\n\n// handleGetReceipts handles a block receipts request\nfunc handleGetReceipts(msg Decoder) (serveRequestFn, uint64, uint64, error) {\n\tvar r GetReceiptsPacket\n\tif err := msg.Decode(&r); err != nil {\n\t\treturn nil, 0, 0, err\n\t}\n\treturn func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {\n\t\tvar (\n\t\t\tbytes    int\n\t\t\treceipts []rlp.RawValue\n\t\t)\n\t\tbc := backend.BlockChain()\n\t\tfor i, hash := range r.Hashes {\n\t\t\tif i != 0 && !waitOrStop() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif bytes >= softResponseLimit {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// Retrieve the requested block's receipts, skipping if unknown to us\n\t\t\tresults := bc.GetReceiptsByHash(hash)\n\t\t\tif results == nil {\n\t\t\t\tif header := bc.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {\n\t\t\t\t\tp.bumpInvalid()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\t// If known, encode and queue for response packet\n\t\t\tif encoded, err := rlp.EncodeToBytes(results); err != nil {\n\t\t\t\tlog.Error(\"Failed to encode receipt\", \"err\", err)\n\t\t\t} else {\n\t\t\t\treceipts = append(receipts, encoded)\n\t\t\t\tbytes += len(encoded)\n\t\t\t}\n\t\t}\n\t\treturn p.replyReceiptsRLP(r.ReqID, receipts)\n\t}, r.ReqID, uint64(len(r.Hashes)), nil\n}\n\n// handleGetProofs handles a proof request\nfunc handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) {\n\tvar r GetProofsPacket\n\tif err := msg.Decode(&r); err != nil {\n\t\treturn nil, 0, 0, err\n\t}\n\treturn func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {\n\t\tvar (\n\t\t\tlastBHash common.Hash\n\t\t\troot      common.Hash\n\t\t\theader    *types.Header\n\t\t\terr       error\n\t\t)\n\t\tbc := backend.BlockChain()\n\t\tnodes := light.NewNodeSet()\n\n\t\tfor i, request := range r.Reqs {\n\t\t\tif i != 0 && !waitOrStop() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// Look up the root hash belonging to the request\n\t\t\tif request.BHash != lastBHash {\n\t\t\t\troot, lastBHash = common.Hash{}, request.BHash\n\n\t\t\t\tif header = bc.GetHeaderByHash(request.BHash); header == nil {\n\t\t\t\t\tp.Log().Warn(\"Failed to retrieve header for proof\", \"hash\", request.BHash)\n\t\t\t\t\tp.bumpInvalid()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// Refuse to search stale state data in the database since looking for\n\t\t\t\t// a non-exist key is kind of expensive.\n\t\t\t\tlocal := bc.CurrentHeader().Number.Uint64()\n\t\t\t\tif !backend.ArchiveMode() && header.Number.Uint64()+core.TriesInMemory <= local {\n\t\t\t\t\tp.Log().Debug(\"Reject stale trie request\", \"number\", header.Number.Uint64(), \"head\", local)\n\t\t\t\t\tp.bumpInvalid()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\troot = header.Root\n\t\t\t}\n\t\t\t// If a header lookup failed (non existent), ignore subsequent requests for the same header\n\t\t\tif root == (common.Hash{}) {\n\t\t\t\tp.bumpInvalid()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Open the account or storage trie for the request\n\t\t\tstatedb := bc.StateCache()\n\n\t\t\tvar trie state.Trie\n\t\t\tswitch len(request.AccKey) {\n\t\t\tcase 0:\n\t\t\t\t// No account key specified, open an account trie\n\t\t\t\ttrie, err = statedb.OpenTrie(root)\n\t\t\t\tif trie == nil || err != nil {\n\t\t\t\t\tp.Log().Warn(\"Failed to open storage trie for proof\", \"block\", header.Number, \"hash\", header.Hash(), \"root\", root, \"err\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\t// Account key specified, open a storage trie\n\t\t\t\taccount, err := getAccount(statedb.TrieDB(), root, common.BytesToHash(request.AccKey))\n\t\t\t\tif err != nil {\n\t\t\t\t\tp.Log().Warn(\"Failed to retrieve account for proof\", \"block\", header.Number, \"hash\", header.Hash(), \"account\", common.BytesToHash(request.AccKey), \"err\", err)\n\t\t\t\t\tp.bumpInvalid()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ttrie, err = statedb.OpenStorageTrie(common.BytesToHash(request.AccKey), account.Root)\n\t\t\t\tif trie == nil || err != nil {\n\t\t\t\t\tp.Log().Warn(\"Failed to open storage trie for proof\", \"block\", header.Number, \"hash\", header.Hash(), \"account\", common.BytesToHash(request.AccKey), \"root\", account.Root, \"err\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Prove the user's request from the account or stroage trie\n\t\t\tif err := trie.Prove(request.Key, request.FromLevel, nodes); err != nil {\n\t\t\t\tp.Log().Warn(\"Failed to prove state request\", \"block\", header.Number, \"hash\", header.Hash(), \"err\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif nodes.DataSize() >= softResponseLimit {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treturn p.replyProofsV2(r.ReqID, nodes.NodeList())\n\t}, r.ReqID, uint64(len(r.Reqs)), nil\n}\n\n// handleGetHelperTrieProofs handles a helper trie proof request\nfunc handleGetHelperTrieProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) {\n\tvar r GetHelperTrieProofsPacket\n\tif err := msg.Decode(&r); err != nil {\n\t\treturn nil, 0, 0, err\n\t}\n\treturn func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {\n\t\tvar (\n\t\t\tlastIdx  uint64\n\t\t\tlastType uint\n\t\t\tauxTrie  *trie.Trie\n\t\t\tauxBytes int\n\t\t\tauxData  [][]byte\n\t\t)\n\t\tbc := backend.BlockChain()\n\t\tnodes := light.NewNodeSet()\n\t\tfor i, request := range r.Reqs {\n\t\t\tif i != 0 && !waitOrStop() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif auxTrie == nil || request.Type != lastType || request.TrieIdx != lastIdx {\n\t\t\t\tlastType, lastIdx = request.Type, request.TrieIdx\n\t\t\t\tauxTrie = backend.GetHelperTrie(request.Type, request.TrieIdx)\n\t\t\t}\n\t\t\tif auxTrie == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\t// TODO(rjl493456442) short circuit if the proving is failed.\n\t\t\t// The original client side code has a dirty hack to retrieve\n\t\t\t// the headers with no valid proof. Keep the compatibility for\n\t\t\t// legacy les protocol and drop this hack when the les2/3 are\n\t\t\t// not supported.\n\t\t\terr := auxTrie.Prove(request.Key, request.FromLevel, nodes)\n\t\t\tif p.version >= lpv4 && err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif request.Type == htCanonical && request.AuxReq == htAuxHeader && len(request.Key) == 8 {\n\t\t\t\theader := bc.GetHeaderByNumber(binary.BigEndian.Uint64(request.Key))\n\t\t\t\tdata, err := rlp.EncodeToBytes(header)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"Failed to encode header\", \"err\", err)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tauxData = append(auxData, data)\n\t\t\t\tauxBytes += len(data)\n\t\t\t}\n\t\t\tif nodes.DataSize()+auxBytes >= softResponseLimit {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treturn p.replyHelperTrieProofs(r.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData})\n\t}, r.ReqID, uint64(len(r.Reqs)), nil\n}\n\n// handleSendTx handles a transaction propagation request\nfunc handleSendTx(msg Decoder) (serveRequestFn, uint64, uint64, error) {\n\tvar r SendTxPacket\n\tif err := msg.Decode(&r); err != nil {\n\t\treturn nil, 0, 0, err\n\t}\n\tamount := uint64(len(r.Txs))\n\treturn func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {\n\t\tstats := make([]light.TxStatus, len(r.Txs))\n\t\tfor i, tx := range r.Txs {\n\t\t\tif i != 0 && !waitOrStop() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\thash := tx.Hash()\n\t\t\tstats[i] = txStatus(backend, hash)\n\t\t\tif stats[i].Status == core.TxStatusUnknown {\n\t\t\t\taddFn := backend.TxPool().AddRemotes\n\t\t\t\t// Add txs synchronously for testing purpose\n\t\t\t\tif backend.AddTxsSync() {\n\t\t\t\t\taddFn = backend.TxPool().AddRemotesSync\n\t\t\t\t}\n\t\t\t\tif errs := addFn([]*types.Transaction{tx}); errs[0] != nil {\n\t\t\t\t\tstats[i].Error = errs[0].Error()\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tstats[i] = txStatus(backend, hash)\n\t\t\t}\n\t\t}\n\t\treturn p.replyTxStatus(r.ReqID, stats)\n\t}, r.ReqID, amount, nil\n}\n\n// handleGetTxStatus handles a transaction status query\nfunc handleGetTxStatus(msg Decoder) (serveRequestFn, uint64, uint64, error) {\n\tvar r GetTxStatusPacket\n\tif err := msg.Decode(&r); err != nil {\n\t\treturn nil, 0, 0, err\n\t}\n\treturn func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply {\n\t\tstats := make([]light.TxStatus, len(r.Hashes))\n\t\tfor i, hash := range r.Hashes {\n\t\t\tif i != 0 && !waitOrStop() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tstats[i] = txStatus(backend, hash)\n\t\t}\n\t\treturn p.replyTxStatus(r.ReqID, stats)\n\t}, r.ReqID, uint64(len(r.Hashes)), nil\n}\n\n// txStatus returns the status of a specified transaction.\nfunc txStatus(b serverBackend, hash common.Hash) light.TxStatus {\n\tvar stat light.TxStatus\n\t// Looking the transaction in txpool first.\n\tstat.Status = b.TxPool().Status([]common.Hash{hash})[0]\n\n\t// If the transaction is unknown to the pool, try looking it up locally.\n\tif stat.Status == core.TxStatusUnknown {\n\t\tlookup := b.BlockChain().GetTransactionLookup(hash)\n\t\tif lookup != nil {\n\t\t\tstat.Status = core.TxStatusIncluded\n\t\t\tstat.Lookup = lookup\n\t\t}\n\t}\n\treturn stat\n}\n"
  },
  {
    "path": "les/servingqueue.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"sort\"\n\t\"sync\"\n\t\"sync/atomic\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/common/prque\"\n)\n\n// servingQueue allows running tasks in a limited number of threads and puts the\n// waiting tasks in a priority queue\ntype servingQueue struct {\n\trecentTime, queuedTime, servingTimeDiff uint64\n\tburstLimit, burstDropLimit              uint64\n\tburstDecRate                            float64\n\tlastUpdate                              mclock.AbsTime\n\n\tqueueAddCh, queueBestCh chan *servingTask\n\tstopThreadCh, quit      chan struct{}\n\tsetThreadsCh            chan int\n\n\twg          sync.WaitGroup\n\tthreadCount int          // number of currently running threads\n\tqueue       *prque.Prque // priority queue for waiting or suspended tasks\n\tbest        *servingTask // the highest priority task (not included in the queue)\n\tsuspendBias int64        // priority bias against suspending an already running task\n}\n\n// servingTask represents a request serving task. Tasks can be implemented to\n// run in multiple steps, allowing the serving queue to suspend execution between\n// steps if higher priority tasks are entered. The creator of the task should\n// set the following fields:\n//\n// - priority: greater value means higher priority; values can wrap around the int64 range\n// - run: execute a single step; return true if finished\n// - after: executed after run finishes or returns an error, receives the total serving time\ntype servingTask struct {\n\tsq                                       *servingQueue\n\tservingTime, timeAdded, maxTime, expTime uint64\n\tpeer                                     *clientPeer\n\tpriority                                 int64\n\tbiasAdded                                bool\n\ttoken                                    runToken\n\ttokenCh                                  chan runToken\n}\n\n// runToken received by servingTask.start allows the task to run. Closing the\n// channel by servingTask.stop signals the thread controller to allow a new task\n// to start running.\ntype runToken chan struct{}\n\n// start blocks until the task can start and returns true if it is allowed to run.\n// Returning false means that the task should be cancelled.\nfunc (t *servingTask) start() bool {\n\tif t.peer.isFrozen() {\n\t\treturn false\n\t}\n\tt.tokenCh = make(chan runToken, 1)\n\tselect {\n\tcase t.sq.queueAddCh <- t:\n\tcase <-t.sq.quit:\n\t\treturn false\n\t}\n\tselect {\n\tcase t.token = <-t.tokenCh:\n\tcase <-t.sq.quit:\n\t\treturn false\n\t}\n\tif t.token == nil {\n\t\treturn false\n\t}\n\tt.servingTime -= uint64(mclock.Now())\n\treturn true\n}\n\n// done signals the thread controller about the task being finished and returns\n// the total serving time of the task in nanoseconds.\nfunc (t *servingTask) done() uint64 {\n\tt.servingTime += uint64(mclock.Now())\n\tclose(t.token)\n\tdiff := t.servingTime - t.timeAdded\n\tt.timeAdded = t.servingTime\n\tif t.expTime > diff {\n\t\tt.expTime -= diff\n\t\tatomic.AddUint64(&t.sq.servingTimeDiff, t.expTime)\n\t} else {\n\t\tt.expTime = 0\n\t}\n\treturn t.servingTime\n}\n\n// waitOrStop can be called during the execution of the task. It blocks if there\n// is a higher priority task waiting (a bias is applied in favor of the currently\n// running task). Returning true means that the execution can be resumed. False\n// means the task should be cancelled.\nfunc (t *servingTask) waitOrStop() bool {\n\tt.done()\n\tif !t.biasAdded {\n\t\tt.priority += t.sq.suspendBias\n\t\tt.biasAdded = true\n\t}\n\treturn t.start()\n}\n\n// newServingQueue returns a new servingQueue\nfunc newServingQueue(suspendBias int64, utilTarget float64) *servingQueue {\n\tsq := &servingQueue{\n\t\tqueue:          prque.New(nil),\n\t\tsuspendBias:    suspendBias,\n\t\tqueueAddCh:     make(chan *servingTask, 100),\n\t\tqueueBestCh:    make(chan *servingTask),\n\t\tstopThreadCh:   make(chan struct{}),\n\t\tquit:           make(chan struct{}),\n\t\tsetThreadsCh:   make(chan int, 10),\n\t\tburstLimit:     uint64(utilTarget * bufLimitRatio * 1200000),\n\t\tburstDropLimit: uint64(utilTarget * bufLimitRatio * 1000000),\n\t\tburstDecRate:   utilTarget,\n\t\tlastUpdate:     mclock.Now(),\n\t}\n\tsq.wg.Add(2)\n\tgo sq.queueLoop()\n\tgo sq.threadCountLoop()\n\treturn sq\n}\n\n// newTask creates a new task with the given priority\nfunc (sq *servingQueue) newTask(peer *clientPeer, maxTime uint64, priority int64) *servingTask {\n\treturn &servingTask{\n\t\tsq:       sq,\n\t\tpeer:     peer,\n\t\tmaxTime:  maxTime,\n\t\texpTime:  maxTime,\n\t\tpriority: priority,\n\t}\n}\n\n// threadController is started in multiple goroutines and controls the execution\n// of tasks. The number of active thread controllers equals the allowed number of\n// concurrently running threads. It tries to fetch the highest priority queued\n// task first. If there are no queued tasks waiting then it can directly catch\n// run tokens from the token channel and allow the corresponding tasks to run\n// without entering the priority queue.\nfunc (sq *servingQueue) threadController() {\n\tfor {\n\t\ttoken := make(runToken)\n\t\tselect {\n\t\tcase best := <-sq.queueBestCh:\n\t\t\tbest.tokenCh <- token\n\t\tcase <-sq.stopThreadCh:\n\t\t\tsq.wg.Done()\n\t\t\treturn\n\t\tcase <-sq.quit:\n\t\t\tsq.wg.Done()\n\t\t\treturn\n\t\t}\n\t\t<-token\n\t\tselect {\n\t\tcase <-sq.stopThreadCh:\n\t\t\tsq.wg.Done()\n\t\t\treturn\n\t\tcase <-sq.quit:\n\t\t\tsq.wg.Done()\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t}\n}\n\ntype (\n\t// peerTasks lists the tasks received from a given peer when selecting peers to freeze\n\tpeerTasks struct {\n\t\tpeer     *clientPeer\n\t\tlist     []*servingTask\n\t\tsumTime  uint64\n\t\tpriority float64\n\t}\n\t// peerList is a sortable list of peerTasks\n\tpeerList []*peerTasks\n)\n\nfunc (l peerList) Len() int {\n\treturn len(l)\n}\n\nfunc (l peerList) Less(i, j int) bool {\n\treturn l[i].priority < l[j].priority\n}\n\nfunc (l peerList) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n\n// freezePeers selects the peers with the worst priority queued tasks and freezes\n// them until burstTime goes under burstDropLimit or all peers are frozen\nfunc (sq *servingQueue) freezePeers() {\n\tpeerMap := make(map[*clientPeer]*peerTasks)\n\tvar peerList peerList\n\tif sq.best != nil {\n\t\tsq.queue.Push(sq.best, sq.best.priority)\n\t}\n\tsq.best = nil\n\tfor sq.queue.Size() > 0 {\n\t\ttask := sq.queue.PopItem().(*servingTask)\n\t\ttasks := peerMap[task.peer]\n\t\tif tasks == nil {\n\t\t\tbufValue, bufLimit := task.peer.fcClient.BufferStatus()\n\t\t\tif bufLimit < 1 {\n\t\t\t\tbufLimit = 1\n\t\t\t}\n\t\t\ttasks = &peerTasks{\n\t\t\t\tpeer:     task.peer,\n\t\t\t\tpriority: float64(bufValue) / float64(bufLimit), // lower value comes first\n\t\t\t}\n\t\t\tpeerMap[task.peer] = tasks\n\t\t\tpeerList = append(peerList, tasks)\n\t\t}\n\t\ttasks.list = append(tasks.list, task)\n\t\ttasks.sumTime += task.expTime\n\t}\n\tsort.Sort(peerList)\n\tdrop := true\n\tfor _, tasks := range peerList {\n\t\tif drop {\n\t\t\ttasks.peer.freeze()\n\t\t\ttasks.peer.fcClient.Freeze()\n\t\t\tsq.queuedTime -= tasks.sumTime\n\t\t\tsqQueuedGauge.Update(int64(sq.queuedTime))\n\t\t\tclientFreezeMeter.Mark(1)\n\t\t\tdrop = sq.recentTime+sq.queuedTime > sq.burstDropLimit\n\t\t\tfor _, task := range tasks.list {\n\t\t\t\ttask.tokenCh <- nil\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, task := range tasks.list {\n\t\t\t\tsq.queue.Push(task, task.priority)\n\t\t\t}\n\t\t}\n\t}\n\tif sq.queue.Size() > 0 {\n\t\tsq.best = sq.queue.PopItem().(*servingTask)\n\t}\n}\n\n// updateRecentTime recalculates the recent serving time value\nfunc (sq *servingQueue) updateRecentTime() {\n\tsubTime := atomic.SwapUint64(&sq.servingTimeDiff, 0)\n\tnow := mclock.Now()\n\tdt := now - sq.lastUpdate\n\tsq.lastUpdate = now\n\tif dt > 0 {\n\t\tsubTime += uint64(float64(dt) * sq.burstDecRate)\n\t}\n\tif sq.recentTime > subTime {\n\t\tsq.recentTime -= subTime\n\t} else {\n\t\tsq.recentTime = 0\n\t}\n}\n\n// addTask inserts a task into the priority queue\nfunc (sq *servingQueue) addTask(task *servingTask) {\n\tif sq.best == nil {\n\t\tsq.best = task\n\t} else if task.priority > sq.best.priority {\n\t\tsq.queue.Push(sq.best, sq.best.priority)\n\t\tsq.best = task\n\t} else {\n\t\tsq.queue.Push(task, task.priority)\n\t}\n\tsq.updateRecentTime()\n\tsq.queuedTime += task.expTime\n\tsqServedGauge.Update(int64(sq.recentTime))\n\tsqQueuedGauge.Update(int64(sq.queuedTime))\n\tif sq.recentTime+sq.queuedTime > sq.burstLimit {\n\t\tsq.freezePeers()\n\t}\n}\n\n// queueLoop is an event loop running in a goroutine. It receives tasks from queueAddCh\n// and always tries to send the highest priority task to queueBestCh. Successfully sent\n// tasks are removed from the queue.\nfunc (sq *servingQueue) queueLoop() {\n\tfor {\n\t\tif sq.best != nil {\n\t\t\texpTime := sq.best.expTime\n\t\t\tselect {\n\t\t\tcase task := <-sq.queueAddCh:\n\t\t\t\tsq.addTask(task)\n\t\t\tcase sq.queueBestCh <- sq.best:\n\t\t\t\tsq.updateRecentTime()\n\t\t\t\tsq.queuedTime -= expTime\n\t\t\t\tsq.recentTime += expTime\n\t\t\t\tsqServedGauge.Update(int64(sq.recentTime))\n\t\t\t\tsqQueuedGauge.Update(int64(sq.queuedTime))\n\t\t\t\tif sq.queue.Size() == 0 {\n\t\t\t\t\tsq.best = nil\n\t\t\t\t} else {\n\t\t\t\t\tsq.best, _ = sq.queue.PopItem().(*servingTask)\n\t\t\t\t}\n\t\t\tcase <-sq.quit:\n\t\t\t\tsq.wg.Done()\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tselect {\n\t\t\tcase task := <-sq.queueAddCh:\n\t\t\t\tsq.addTask(task)\n\t\t\tcase <-sq.quit:\n\t\t\t\tsq.wg.Done()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\n// threadCountLoop is an event loop running in a goroutine. It adjusts the number\n// of active thread controller goroutines.\nfunc (sq *servingQueue) threadCountLoop() {\n\tvar threadCountTarget int\n\tfor {\n\t\tfor threadCountTarget > sq.threadCount {\n\t\t\tsq.wg.Add(1)\n\t\t\tgo sq.threadController()\n\t\t\tsq.threadCount++\n\t\t}\n\t\tif threadCountTarget < sq.threadCount {\n\t\t\tselect {\n\t\t\tcase threadCountTarget = <-sq.setThreadsCh:\n\t\t\tcase sq.stopThreadCh <- struct{}{}:\n\t\t\t\tsq.threadCount--\n\t\t\tcase <-sq.quit:\n\t\t\t\tsq.wg.Done()\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tselect {\n\t\t\tcase threadCountTarget = <-sq.setThreadsCh:\n\t\t\tcase <-sq.quit:\n\t\t\t\tsq.wg.Done()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\n// setThreads sets the allowed processing thread count, suspending tasks as soon as\n// possible if necessary.\nfunc (sq *servingQueue) setThreads(threadCount int) {\n\tselect {\n\tcase sq.setThreadsCh <- threadCount:\n\tcase <-sq.quit:\n\t\treturn\n\t}\n}\n\n// stop stops task processing as soon as possible and shuts down the serving queue.\nfunc (sq *servingQueue) stop() {\n\tclose(sq.quit)\n\tsq.wg.Wait()\n}\n"
  },
  {
    "path": "les/state_accessor.go",
    "content": "// Copyright 2021 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/state\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/core/vm\"\n\t\"github.com/ethereum/go-ethereum/light\"\n)\n\n// stateAtBlock retrieves the state database associated with a certain block.\nfunc (leth *LightEthereum) stateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error) {\n\treturn light.NewState(ctx, block.Header(), leth.odr), func() {}, nil\n}\n\n// statesInRange retrieves a batch of state databases associated with the specific\n// block ranges.\nfunc (leth *LightEthereum) statesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error) {\n\tvar states []*state.StateDB\n\tfor number := fromBlock.NumberU64(); number <= toBlock.NumberU64(); number++ {\n\t\theader, err := leth.blockchain.GetHeaderByNumberOdr(ctx, number)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tstates = append(states, light.NewState(ctx, header, leth.odr))\n\t}\n\treturn states, nil, nil\n}\n\n// stateAtTransaction returns the execution environment of a certain transaction.\nfunc (leth *LightEthereum) stateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) {\n\t// Short circuit if it's genesis block.\n\tif block.NumberU64() == 0 {\n\t\treturn nil, vm.BlockContext{}, nil, nil, errors.New(\"no transaction in genesis\")\n\t}\n\t// Create the parent state database\n\tparent, err := leth.blockchain.GetBlock(ctx, block.ParentHash(), block.NumberU64()-1)\n\tif err != nil {\n\t\treturn nil, vm.BlockContext{}, nil, nil, err\n\t}\n\tstatedb, _, err := leth.stateAtBlock(ctx, parent, reexec)\n\tif err != nil {\n\t\treturn nil, vm.BlockContext{}, nil, nil, err\n\t}\n\tif txIndex == 0 && len(block.Transactions()) == 0 {\n\t\treturn nil, vm.BlockContext{}, statedb, func() {}, nil\n\t}\n\t// Recompute transactions up to the target index.\n\tsigner := types.MakeSigner(leth.blockchain.Config(), block.Number())\n\tfor idx, tx := range block.Transactions() {\n\t\t// Assemble the transaction call message and return if the requested offset\n\t\tmsg, _ := tx.AsMessage(signer)\n\t\ttxContext := core.NewEVMTxContext(msg)\n\t\tcontext := core.NewEVMBlockContext(block.Header(), leth.blockchain, nil)\n\t\tstatedb.Prepare(tx.Hash(), block.Hash(), idx)\n\t\tif idx == txIndex {\n\t\t\treturn msg, context, statedb, func() {}, nil\n\t\t}\n\t\t// Not yet the searched for transaction, execute on top of the current state\n\t\tvmenv := vm.NewEVM(context, txContext, statedb, leth.blockchain.Config(), vm.Config{})\n\t\tif _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil {\n\t\t\treturn nil, vm.BlockContext{}, nil, nil, fmt.Errorf(\"transaction %#x failed: %v\", tx.Hash(), err)\n\t\t}\n\t\t// Ensure any modifications are committed to the state\n\t\t// Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect\n\t\tstatedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number()))\n\t}\n\treturn nil, vm.BlockContext{}, nil, nil, fmt.Errorf(\"transaction index %d out of range for block %#x\", txIndex, block.Hash())\n}\n"
  },
  {
    "path": "les/sync.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/eth/downloader\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/params\"\n)\n\nvar errInvalidCheckpoint = errors.New(\"invalid advertised checkpoint\")\n\nconst (\n\t// lightSync starts syncing from the current highest block.\n\t// If the chain is empty, syncing the entire header chain.\n\tlightSync = iota\n\n\t// legacyCheckpointSync starts syncing from a hardcoded checkpoint.\n\tlegacyCheckpointSync\n\n\t// checkpointSync starts syncing from a checkpoint signed by trusted\n\t// signer or hardcoded checkpoint for compatibility.\n\tcheckpointSync\n)\n\n// validateCheckpoint verifies the advertised checkpoint by peer is valid or not.\n//\n// Each network has several hard-coded checkpoint signer addresses. Only the\n// checkpoint issued by the specified signer is considered valid.\n//\n// In addition to the checkpoint registered in the registrar contract, there are\n// several legacy hardcoded checkpoints in our codebase. These checkpoints are\n// also considered as valid.\nfunc (h *clientHandler) validateCheckpoint(peer *serverPeer) error {\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*10)\n\tdefer cancel()\n\n\t// Fetch the block header corresponding to the checkpoint registration.\n\twrapPeer := &peerConnection{handler: h, peer: peer}\n\theader, err := wrapPeer.RetrieveSingleHeaderByNumber(ctx, peer.checkpointNumber)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Fetch block logs associated with the block header.\n\tlogs, err := light.GetUntrustedBlockLogs(ctx, h.backend.odr, header)\n\tif err != nil {\n\t\treturn err\n\t}\n\tevents := h.backend.oracle.Contract().LookupCheckpointEvents(logs, peer.checkpoint.SectionIndex, peer.checkpoint.Hash())\n\tif len(events) == 0 {\n\t\treturn errInvalidCheckpoint\n\t}\n\tvar (\n\t\tindex      = events[0].Index\n\t\thash       = events[0].CheckpointHash\n\t\tsignatures [][]byte\n\t)\n\tfor _, event := range events {\n\t\tsignatures = append(signatures, append(event.R[:], append(event.S[:], event.V)...))\n\t}\n\tvalid, signers := h.backend.oracle.VerifySigners(index, hash, signatures)\n\tif !valid {\n\t\treturn errInvalidCheckpoint\n\t}\n\tlog.Warn(\"Verified advertised checkpoint\", \"peer\", peer.id, \"signers\", len(signers))\n\treturn nil\n}\n\n// synchronise tries to sync up our local chain with a remote peer.\nfunc (h *clientHandler) synchronise(peer *serverPeer) {\n\t// Short circuit if the peer is nil.\n\tif peer == nil {\n\t\treturn\n\t}\n\t// Make sure the peer's TD is higher than our own.\n\tlatest := h.backend.blockchain.CurrentHeader()\n\tcurrentTd := rawdb.ReadTd(h.backend.chainDb, latest.Hash(), latest.Number.Uint64())\n\tif currentTd != nil && peer.Td().Cmp(currentTd) < 0 {\n\t\treturn\n\t}\n\t// Recap the checkpoint. The light client may be connected to several different\n\t// versions of the server.\n\t// (1) Old version server which can not provide stable checkpoint in the\n\t//     handshake packet.\n\t//     => Use local checkpoint or empty checkpoint\n\t// (2) New version server but simple checkpoint syncing is not enabled\n\t//     (e.g. mainnet, new testnet or private network)\n\t//     => Use local checkpoint or empty checkpoint\n\t// (3) New version server but the provided stable checkpoint is even lower\n\t//     than the local one.\n\t//     => Use local checkpoint\n\t// (4) New version server with valid and higher stable checkpoint\n\t//     => Use provided checkpoint\n\tvar (\n\t\tlocal      bool\n\t\tcheckpoint = &peer.checkpoint\n\t)\n\tif h.checkpoint != nil && h.checkpoint.SectionIndex >= peer.checkpoint.SectionIndex {\n\t\tlocal, checkpoint = true, h.checkpoint\n\t}\n\t// Replace the checkpoint with locally configured one If it's required by\n\t// users. Nil checkpoint means synchronization from the scratch.\n\tif h.backend.config.SyncFromCheckpoint {\n\t\tlocal, checkpoint = true, h.backend.config.Checkpoint\n\t\tif h.backend.config.Checkpoint == nil {\n\t\t\tcheckpoint = &params.TrustedCheckpoint{}\n\t\t}\n\t}\n\t// Determine whether we should run checkpoint syncing or normal light syncing.\n\t//\n\t// Here has four situations that we will disable the checkpoint syncing:\n\t//\n\t// 1. The checkpoint is empty\n\t// 2. The latest head block of the local chain is above the checkpoint.\n\t// 3. The checkpoint is local(replaced with local checkpoint)\n\t// 4. For some networks the checkpoint syncing is not activated.\n\tmode := checkpointSync\n\tswitch {\n\tcase checkpoint.Empty():\n\t\tmode = lightSync\n\t\tlog.Debug(\"Disable checkpoint syncing\", \"reason\", \"empty checkpoint\")\n\tcase latest.Number.Uint64() >= (checkpoint.SectionIndex+1)*h.backend.iConfig.ChtSize-1:\n\t\tmode = lightSync\n\t\tlog.Debug(\"Disable checkpoint syncing\", \"reason\", \"local chain beyond the checkpoint\")\n\tcase local:\n\t\tmode = legacyCheckpointSync\n\t\tlog.Debug(\"Disable checkpoint syncing\", \"reason\", \"checkpoint is hardcoded\")\n\tcase h.backend.oracle == nil || !h.backend.oracle.IsRunning():\n\t\tif h.checkpoint == nil {\n\t\t\tmode = lightSync // Downgrade to light sync unfortunately.\n\t\t} else {\n\t\t\tcheckpoint = h.checkpoint\n\t\t\tmode = legacyCheckpointSync\n\t\t}\n\t\tlog.Debug(\"Disable checkpoint syncing\", \"reason\", \"checkpoint syncing is not activated\")\n\t}\n\n\t// Notify testing framework if syncing has completed(for testing purpose).\n\tdefer func() {\n\t\tif h.syncEnd != nil {\n\t\t\th.syncEnd(h.backend.blockchain.CurrentHeader())\n\t\t}\n\t}()\n\n\tstart := time.Now()\n\tif mode == checkpointSync || mode == legacyCheckpointSync {\n\t\t// Validate the advertised checkpoint\n\t\tif mode == checkpointSync {\n\t\t\tif err := h.validateCheckpoint(peer); err != nil {\n\t\t\t\tlog.Debug(\"Failed to validate checkpoint\", \"reason\", err)\n\t\t\t\th.removePeer(peer.id)\n\t\t\t\treturn\n\t\t\t}\n\t\t\th.backend.blockchain.AddTrustedCheckpoint(checkpoint)\n\t\t}\n\t\tlog.Debug(\"Checkpoint syncing start\", \"peer\", peer.id, \"checkpoint\", checkpoint.SectionIndex)\n\n\t\t// Fetch the start point block header.\n\t\t//\n\t\t// For the ethash consensus engine, the start header is the block header\n\t\t// of the checkpoint.\n\t\t//\n\t\t// For the clique consensus engine, the start header is the block header\n\t\t// of the latest epoch covered by checkpoint.\n\t\tctx, cancel := context.WithTimeout(context.Background(), time.Second*5)\n\t\tdefer cancel()\n\t\tif !checkpoint.Empty() && !h.backend.blockchain.SyncCheckpoint(ctx, checkpoint) {\n\t\t\tlog.Debug(\"Sync checkpoint failed\")\n\t\t\th.removePeer(peer.id)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif h.syncStart != nil {\n\t\th.syncStart(h.backend.blockchain.CurrentHeader())\n\t}\n\t// Fetch the remaining block headers based on the current chain header.\n\tif err := h.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), downloader.LightSync); err != nil {\n\t\tlog.Debug(\"Synchronise failed\", \"reason\", err)\n\t\treturn\n\t}\n\tlog.Debug(\"Synchronise finished\", \"elapsed\", common.PrettyDuration(time.Since(start)))\n}\n"
  },
  {
    "path": "les/sync_test.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"fmt\"\n\t\"math/big\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/accounts/abi/bind\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/params\"\n)\n\n// Test light syncing which will download all headers from genesis.\nfunc TestLightSyncingLes3(t *testing.T) { testCheckpointSyncing(t, lpv3, 0) }\n\n// Test legacy checkpoint syncing which will download tail headers\n// based on a hardcoded checkpoint.\nfunc TestLegacyCheckpointSyncingLes3(t *testing.T) { testCheckpointSyncing(t, lpv3, 1) }\n\n// Test checkpoint syncing which will download tail headers based\n// on a verified checkpoint.\nfunc TestCheckpointSyncingLes3(t *testing.T) { testCheckpointSyncing(t, lpv3, 2) }\n\nfunc testCheckpointSyncing(t *testing.T, protocol int, syncMode int) {\n\tconfig := light.TestServerIndexerConfig\n\n\twaitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {\n\t\tfor {\n\t\t\tcs, _, _ := cIndexer.Sections()\n\t\t\tbts, _, _ := btIndexer.Sections()\n\t\t\tif cs >= 1 && bts >= 1 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t}\n\t// Generate 128+1 blocks (totally 1 CHT section)\n\tnetconfig := testnetConfig{\n\t\tblocks:    int(config.ChtSize + config.ChtConfirms),\n\t\tprotocol:  protocol,\n\t\tindexFn:   waitIndexers,\n\t\tnopruning: true,\n\t}\n\tserver, client, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\texpected := config.ChtSize + config.ChtConfirms\n\n\t// Checkpoint syncing or legacy checkpoint syncing.\n\tif syncMode == 1 || syncMode == 2 {\n\t\t// Assemble checkpoint 0\n\t\ts, _, head := server.chtIndexer.Sections()\n\t\tcp := &params.TrustedCheckpoint{\n\t\t\tSectionIndex: 0,\n\t\t\tSectionHead:  head,\n\t\t\tCHTRoot:      light.GetChtRoot(server.db, s-1, head),\n\t\t\tBloomRoot:    light.GetBloomTrieRoot(server.db, s-1, head),\n\t\t}\n\t\tif syncMode == 1 {\n\t\t\t// Register the assembled checkpoint as hardcoded one.\n\t\t\tclient.handler.checkpoint = cp\n\t\t\tclient.handler.backend.blockchain.AddTrustedCheckpoint(cp)\n\t\t} else {\n\t\t\t// Register the assembled checkpoint into oracle.\n\t\t\theader := server.backend.Blockchain().CurrentHeader()\n\n\t\t\tdata := append([]byte{0x19, 0x00}, append(oracleAddr.Bytes(), append([]byte{0, 0, 0, 0, 0, 0, 0, 0}, cp.Hash().Bytes()...)...)...)\n\t\t\tsig, _ := crypto.Sign(crypto.Keccak256(data), signerKey)\n\t\t\tsig[64] += 27 // Transform V from 0/1 to 27/28 according to the yellow paper\n\t\t\tauth, _ := bind.NewKeyedTransactorWithChainID(signerKey, big.NewInt(1337))\n\t\t\tif _, err := server.handler.server.oracle.Contract().RegisterCheckpoint(auth, cp.SectionIndex, cp.Hash().Bytes(), new(big.Int).Sub(header.Number, big.NewInt(1)), header.ParentHash, [][]byte{sig}); err != nil {\n\t\t\t\tt.Error(\"register checkpoint failed\", err)\n\t\t\t}\n\t\t\tserver.backend.Commit()\n\n\t\t\t// Wait for the checkpoint registration\n\t\t\tfor {\n\t\t\t\t_, hash, _, err := server.handler.server.oracle.Contract().Contract().GetLatestCheckpoint(nil)\n\t\t\t\tif err != nil || hash == [32]byte{} {\n\t\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\texpected += 1\n\t\t}\n\t}\n\n\tdone := make(chan error)\n\tclient.handler.syncEnd = func(header *types.Header) {\n\t\tif header.Number.Uint64() == expected {\n\t\t\tdone <- nil\n\t\t} else {\n\t\t\tdone <- fmt.Errorf(\"blockchain length mismatch, want %d, got %d\", expected, header.Number)\n\t\t}\n\t}\n\n\t// Create connected peer pair.\n\tpeer1, peer2, err := newTestPeerPair(\"peer\", protocol, server.handler, client.handler)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to connect testing peers %v\", err)\n\t}\n\tdefer peer1.close()\n\tdefer peer2.close()\n\n\tselect {\n\tcase err := <-done:\n\t\tif err != nil {\n\t\t\tt.Error(\"sync failed\", err)\n\t\t}\n\t\treturn\n\tcase <-time.NewTimer(10 * time.Second).C:\n\t\tt.Error(\"checkpoint syncing timeout\")\n\t}\n}\n\nfunc TestMissOracleBackendLES3(t *testing.T)             { testMissOracleBackend(t, true, lpv3) }\nfunc TestMissOracleBackendNoCheckpointLES3(t *testing.T) { testMissOracleBackend(t, false, lpv3) }\n\nfunc testMissOracleBackend(t *testing.T, hasCheckpoint bool, protocol int) {\n\tconfig := light.TestServerIndexerConfig\n\n\twaitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {\n\t\tfor {\n\t\t\tcs, _, _ := cIndexer.Sections()\n\t\t\tbts, _, _ := btIndexer.Sections()\n\t\t\tif cs >= 1 && bts >= 1 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t}\n\t// Generate 128+1 blocks (totally 1 CHT section)\n\tnetconfig := testnetConfig{\n\t\tblocks:    int(config.ChtSize + config.ChtConfirms),\n\t\tprotocol:  protocol,\n\t\tindexFn:   waitIndexers,\n\t\tnopruning: true,\n\t}\n\tserver, client, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\texpected := config.ChtSize + config.ChtConfirms\n\n\ts, _, head := server.chtIndexer.Sections()\n\tcp := &params.TrustedCheckpoint{\n\t\tSectionIndex: 0,\n\t\tSectionHead:  head,\n\t\tCHTRoot:      light.GetChtRoot(server.db, s-1, head),\n\t\tBloomRoot:    light.GetBloomTrieRoot(server.db, s-1, head),\n\t}\n\t// Register the assembled checkpoint into oracle.\n\theader := server.backend.Blockchain().CurrentHeader()\n\n\tdata := append([]byte{0x19, 0x00}, append(oracleAddr.Bytes(), append([]byte{0, 0, 0, 0, 0, 0, 0, 0}, cp.Hash().Bytes()...)...)...)\n\tsig, _ := crypto.Sign(crypto.Keccak256(data), signerKey)\n\tsig[64] += 27 // Transform V from 0/1 to 27/28 according to the yellow paper\n\tauth, _ := bind.NewKeyedTransactorWithChainID(signerKey, big.NewInt(1337))\n\tif _, err := server.handler.server.oracle.Contract().RegisterCheckpoint(auth, cp.SectionIndex, cp.Hash().Bytes(), new(big.Int).Sub(header.Number, big.NewInt(1)), header.ParentHash, [][]byte{sig}); err != nil {\n\t\tt.Error(\"register checkpoint failed\", err)\n\t}\n\tserver.backend.Commit()\n\n\t// Wait for the checkpoint registration\n\tfor {\n\t\t_, hash, _, err := server.handler.server.oracle.Contract().Contract().GetLatestCheckpoint(nil)\n\t\tif err != nil || hash == [32]byte{} {\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\texpected += 1\n\n\t// Explicitly set the oracle as nil. In normal use case it can happen\n\t// that user wants to unlock something which blocks the oracle backend\n\t// initialisation. But at the same time syncing starts.\n\t//\n\t// See https://github.com/ethereum/go-ethereum/issues/20097 for more detail.\n\t//\n\t// In this case, client should run light sync or legacy checkpoint sync\n\t// if hardcoded checkpoint is configured.\n\tclient.handler.backend.oracle = nil\n\n\t// For some private networks it can happen checkpoint syncing is enabled\n\t// but there is no hardcoded checkpoint configured.\n\tif hasCheckpoint {\n\t\tclient.handler.checkpoint = cp\n\t\tclient.handler.backend.blockchain.AddTrustedCheckpoint(cp)\n\t}\n\n\tdone := make(chan error)\n\tclient.handler.syncEnd = func(header *types.Header) {\n\t\tif header.Number.Uint64() == expected {\n\t\t\tdone <- nil\n\t\t} else {\n\t\t\tdone <- fmt.Errorf(\"blockchain length mismatch, want %d, got %d\", expected, header.Number)\n\t\t}\n\t}\n\t// Create connected peer pair.\n\tif _, _, err := newTestPeerPair(\"peer\", 2, server.handler, client.handler); err != nil {\n\t\tt.Fatalf(\"Failed to connect testing peers %v\", err)\n\t}\n\tselect {\n\tcase err := <-done:\n\t\tif err != nil {\n\t\t\tt.Error(\"sync failed\", err)\n\t\t}\n\t\treturn\n\tcase <-time.NewTimer(10 * time.Second).C:\n\t\tt.Error(\"checkpoint syncing timeout\")\n\t}\n}\n\nfunc TestSyncFromConfiguredCheckpointLES3(t *testing.T) { testSyncFromConfiguredCheckpoint(t, lpv3) }\n\nfunc testSyncFromConfiguredCheckpoint(t *testing.T, protocol int) {\n\tconfig := light.TestServerIndexerConfig\n\n\twaitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {\n\t\tfor {\n\t\t\tcs, _, _ := cIndexer.Sections()\n\t\t\tbts, _, _ := btIndexer.Sections()\n\t\t\tif cs >= 2 && bts >= 2 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t}\n\t// Generate 256+1 blocks (totally 2 CHT sections)\n\tnetconfig := testnetConfig{\n\t\tblocks:    int(2*config.ChtSize + config.ChtConfirms),\n\t\tprotocol:  protocol,\n\t\tindexFn:   waitIndexers,\n\t\tnopruning: true,\n\t}\n\tserver, client, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\t// Configure the local checkpoint(the first section)\n\thead := server.handler.blockchain.GetHeaderByNumber(config.ChtSize - 1).Hash()\n\tcp := &params.TrustedCheckpoint{\n\t\tSectionIndex: 0,\n\t\tSectionHead:  head,\n\t\tCHTRoot:      light.GetChtRoot(server.db, 0, head),\n\t\tBloomRoot:    light.GetBloomTrieRoot(server.db, 0, head),\n\t}\n\tclient.handler.backend.config.SyncFromCheckpoint = true\n\tclient.handler.backend.config.Checkpoint = cp\n\tclient.handler.checkpoint = cp\n\tclient.handler.backend.blockchain.AddTrustedCheckpoint(cp)\n\n\tvar (\n\t\tstart       = make(chan error, 1)\n\t\tend         = make(chan error, 1)\n\t\texpectStart = config.ChtSize - 1\n\t\texpectEnd   = 2*config.ChtSize + config.ChtConfirms\n\t)\n\tclient.handler.syncStart = func(header *types.Header) {\n\t\tif header.Number.Uint64() == expectStart {\n\t\t\tstart <- nil\n\t\t} else {\n\t\t\tstart <- fmt.Errorf(\"blockchain length mismatch, want %d, got %d\", expectStart, header.Number)\n\t\t}\n\t}\n\tclient.handler.syncEnd = func(header *types.Header) {\n\t\tif header.Number.Uint64() == expectEnd {\n\t\t\tend <- nil\n\t\t} else {\n\t\t\tend <- fmt.Errorf(\"blockchain length mismatch, want %d, got %d\", expectEnd, header.Number)\n\t\t}\n\t}\n\t// Create connected peer pair.\n\tif _, _, err := newTestPeerPair(\"peer\", 2, server.handler, client.handler); err != nil {\n\t\tt.Fatalf(\"Failed to connect testing peers %v\", err)\n\t}\n\n\tselect {\n\tcase err := <-start:\n\t\tif err != nil {\n\t\t\tt.Error(\"sync failed\", err)\n\t\t}\n\t\treturn\n\tcase <-time.NewTimer(10 * time.Second).C:\n\t\tt.Error(\"checkpoint syncing timeout\")\n\t}\n\n\tselect {\n\tcase err := <-end:\n\t\tif err != nil {\n\t\t\tt.Error(\"sync failed\", err)\n\t\t}\n\t\treturn\n\tcase <-time.NewTimer(10 * time.Second).C:\n\t\tt.Error(\"checkpoint syncing timeout\")\n\t}\n}\n\nfunc TestSyncAll(t *testing.T) { testSyncAll(t, lpv3) }\n\nfunc testSyncAll(t *testing.T, protocol int) {\n\tconfig := light.TestServerIndexerConfig\n\n\twaitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) {\n\t\tfor {\n\t\t\tcs, _, _ := cIndexer.Sections()\n\t\t\tbts, _, _ := btIndexer.Sections()\n\t\t\tif cs >= 2 && bts >= 2 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t}\n\t}\n\t// Generate 256+1 blocks (totally 2 CHT sections)\n\tnetconfig := testnetConfig{\n\t\tblocks:    int(2*config.ChtSize + config.ChtConfirms),\n\t\tprotocol:  protocol,\n\t\tindexFn:   waitIndexers,\n\t\tnopruning: true,\n\t}\n\tserver, client, tearDown := newClientServerEnv(t, netconfig)\n\tdefer tearDown()\n\n\tclient.handler.backend.config.SyncFromCheckpoint = true\n\n\tvar (\n\t\tstart       = make(chan error, 1)\n\t\tend         = make(chan error, 1)\n\t\texpectStart = uint64(0)\n\t\texpectEnd   = 2*config.ChtSize + config.ChtConfirms\n\t)\n\tclient.handler.syncStart = func(header *types.Header) {\n\t\tif header.Number.Uint64() == expectStart {\n\t\t\tstart <- nil\n\t\t} else {\n\t\t\tstart <- fmt.Errorf(\"blockchain length mismatch, want %d, got %d\", expectStart, header.Number)\n\t\t}\n\t}\n\tclient.handler.syncEnd = func(header *types.Header) {\n\t\tif header.Number.Uint64() == expectEnd {\n\t\t\tend <- nil\n\t\t} else {\n\t\t\tend <- fmt.Errorf(\"blockchain length mismatch, want %d, got %d\", expectEnd, header.Number)\n\t\t}\n\t}\n\t// Create connected peer pair.\n\tif _, _, err := newTestPeerPair(\"peer\", 2, server.handler, client.handler); err != nil {\n\t\tt.Fatalf(\"Failed to connect testing peers %v\", err)\n\t}\n\n\tselect {\n\tcase err := <-start:\n\t\tif err != nil {\n\t\t\tt.Error(\"sync failed\", err)\n\t\t}\n\t\treturn\n\tcase <-time.NewTimer(10 * time.Second).C:\n\t\tt.Error(\"checkpoint syncing timeout\")\n\t}\n\n\tselect {\n\tcase err := <-end:\n\t\tif err != nil {\n\t\t\tt.Error(\"sync failed\", err)\n\t\t}\n\t\treturn\n\tcase <-time.NewTimer(10 * time.Second).C:\n\t\tt.Error(\"checkpoint syncing timeout\")\n\t}\n}\n"
  },
  {
    "path": "les/test_helper.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\n// This file contains some shares testing functionality, common to multiple\n// different files and modules being tested. Client based network and Server\n// based network can be created easily with available APIs.\n\npackage les\n\nimport (\n\t\"context\"\n\t\"crypto/rand\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/accounts/abi/bind\"\n\t\"github.com/ethereum/go-ethereum/accounts/abi/bind/backends\"\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/consensus/ethash\"\n\t\"github.com/ethereum/go-ethereum/contracts/checkpointoracle/contract\"\n\t\"github.com/ethereum/go-ethereum/core\"\n\t\"github.com/ethereum/go-ethereum/core/forkid\"\n\t\"github.com/ethereum/go-ethereum/core/rawdb\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/eth/ethconfig\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/event\"\n\t\"github.com/ethereum/go-ethereum/les/checkpointoracle\"\n\t\"github.com/ethereum/go-ethereum/les/flowcontrol\"\n\t\"github.com/ethereum/go-ethereum/light\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n\t\"github.com/ethereum/go-ethereum/params\"\n)\n\nvar (\n\tbankKey, _ = crypto.GenerateKey()\n\tbankAddr   = crypto.PubkeyToAddress(bankKey.PublicKey)\n\tbankFunds  = big.NewInt(1000000000000000000)\n\n\tuserKey1, _ = crypto.GenerateKey()\n\tuserKey2, _ = crypto.GenerateKey()\n\tuserAddr1   = crypto.PubkeyToAddress(userKey1.PublicKey)\n\tuserAddr2   = crypto.PubkeyToAddress(userKey2.PublicKey)\n\n\ttestContractAddr         common.Address\n\ttestContractCode         = common.Hex2Bytes(\"606060405260cc8060106000396000f360606040526000357c01000000000000000000000000000000000000000000000000000000009004806360cd2685146041578063c16431b914606b57603f565b005b6055600480803590602001909190505060a9565b6040518082815260200191505060405180910390f35b60886004808035906020019091908035906020019091905050608a565b005b80600060005083606481101560025790900160005b50819055505b5050565b6000600060005082606481101560025790900160005b5054905060c7565b91905056\")\n\ttestContractCodeDeployed = testContractCode[16:]\n\ttestContractDeployed     = uint64(2)\n\n\ttestEventEmitterCode = common.Hex2Bytes(\"60606040523415600e57600080fd5b7f57050ab73f6b9ebdd9f76b8d4997793f48cf956e965ee070551b9ca0bb71584e60405160405180910390a160358060476000396000f3006060604052600080fd00a165627a7a723058203f727efcad8b5811f8cb1fc2620ce5e8c63570d697aef968172de296ea3994140029\")\n\n\t// Checkpoint oracle relative fields\n\toracleAddr   common.Address\n\tsignerKey, _ = crypto.GenerateKey()\n\tsignerAddr   = crypto.PubkeyToAddress(signerKey.PublicKey)\n)\n\nvar (\n\t// The block frequency for creating checkpoint(only used in test)\n\tsectionSize = big.NewInt(128)\n\n\t// The number of confirmations needed to generate a checkpoint(only used in test).\n\tprocessConfirms = big.NewInt(1)\n\n\t// The token bucket buffer limit for testing purpose.\n\ttestBufLimit = uint64(1000000)\n\n\t// The buffer recharging speed for testing purpose.\n\ttestBufRecharge = uint64(1000)\n)\n\n/*\ncontract test {\n\n    uint256[100] data;\n\n    function Put(uint256 addr, uint256 value) {\n        data[addr] = value;\n    }\n\n    function Get(uint256 addr) constant returns (uint256 value) {\n        return data[addr];\n    }\n}\n*/\n\n// prepare pre-commits specified number customized blocks into chain.\nfunc prepare(n int, backend *backends.SimulatedBackend) {\n\tvar (\n\t\tctx    = context.Background()\n\t\tsigner = types.HomesteadSigner{}\n\t)\n\tfor i := 0; i < n; i++ {\n\t\tswitch i {\n\t\tcase 0:\n\t\t\t// Builtin-block\n\t\t\t//    number: 1\n\t\t\t//    txs:    2\n\n\t\t\t// deploy checkpoint contract\n\t\t\tauth, _ := bind.NewKeyedTransactorWithChainID(bankKey, big.NewInt(1337))\n\t\t\toracleAddr, _, _, _ = contract.DeployCheckpointOracle(auth, backend, []common.Address{signerAddr}, sectionSize, processConfirms, big.NewInt(1))\n\n\t\t\t// bankUser transfers some ether to user1\n\t\t\tnonce, _ := backend.PendingNonceAt(ctx, bankAddr)\n\t\t\ttx, _ := types.SignTx(types.NewTransaction(nonce, userAddr1, big.NewInt(10000), params.TxGas, nil, nil), signer, bankKey)\n\t\t\tbackend.SendTransaction(ctx, tx)\n\t\tcase 1:\n\t\t\t// Builtin-block\n\t\t\t//    number: 2\n\t\t\t//    txs:    4\n\n\t\t\tbankNonce, _ := backend.PendingNonceAt(ctx, bankAddr)\n\t\t\tuserNonce1, _ := backend.PendingNonceAt(ctx, userAddr1)\n\n\t\t\t// bankUser transfers more ether to user1\n\t\t\ttx1, _ := types.SignTx(types.NewTransaction(bankNonce, userAddr1, big.NewInt(1000), params.TxGas, nil, nil), signer, bankKey)\n\t\t\tbackend.SendTransaction(ctx, tx1)\n\n\t\t\t// user1 relays ether to user2\n\t\t\ttx2, _ := types.SignTx(types.NewTransaction(userNonce1, userAddr2, big.NewInt(1000), params.TxGas, nil, nil), signer, userKey1)\n\t\t\tbackend.SendTransaction(ctx, tx2)\n\n\t\t\t// user1 deploys a test contract\n\t\t\ttx3, _ := types.SignTx(types.NewContractCreation(userNonce1+1, big.NewInt(0), 200000, big.NewInt(0), testContractCode), signer, userKey1)\n\t\t\tbackend.SendTransaction(ctx, tx3)\n\t\t\ttestContractAddr = crypto.CreateAddress(userAddr1, userNonce1+1)\n\n\t\t\t// user1 deploys a event contract\n\t\t\ttx4, _ := types.SignTx(types.NewContractCreation(userNonce1+2, big.NewInt(0), 200000, big.NewInt(0), testEventEmitterCode), signer, userKey1)\n\t\t\tbackend.SendTransaction(ctx, tx4)\n\t\tcase 2:\n\t\t\t// Builtin-block\n\t\t\t//    number: 3\n\t\t\t//    txs:    2\n\n\t\t\t// bankUser transfer some ether to signer\n\t\t\tbankNonce, _ := backend.PendingNonceAt(ctx, bankAddr)\n\t\t\ttx1, _ := types.SignTx(types.NewTransaction(bankNonce, signerAddr, big.NewInt(1000000000), params.TxGas, nil, nil), signer, bankKey)\n\t\t\tbackend.SendTransaction(ctx, tx1)\n\n\t\t\t// invoke test contract\n\t\t\tdata := common.Hex2Bytes(\"C16431B900000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001\")\n\t\t\ttx2, _ := types.SignTx(types.NewTransaction(bankNonce+1, testContractAddr, big.NewInt(0), 100000, nil, data), signer, bankKey)\n\t\t\tbackend.SendTransaction(ctx, tx2)\n\t\tcase 3:\n\t\t\t// Builtin-block\n\t\t\t//    number: 4\n\t\t\t//    txs:    1\n\n\t\t\t// invoke test contract\n\t\t\tbankNonce, _ := backend.PendingNonceAt(ctx, bankAddr)\n\t\t\tdata := common.Hex2Bytes(\"C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002\")\n\t\t\ttx, _ := types.SignTx(types.NewTransaction(bankNonce, testContractAddr, big.NewInt(0), 100000, nil, data), signer, bankKey)\n\t\t\tbackend.SendTransaction(ctx, tx)\n\t\t}\n\t\tbackend.Commit()\n\t}\n}\n\n// testIndexers creates a set of indexers with specified params for testing purpose.\nfunc testIndexers(db ethdb.Database, odr light.OdrBackend, config *light.IndexerConfig, disablePruning bool) []*core.ChainIndexer {\n\tvar indexers [3]*core.ChainIndexer\n\tindexers[0] = light.NewChtIndexer(db, odr, config.ChtSize, config.ChtConfirms, disablePruning)\n\tindexers[1] = core.NewBloomIndexer(db, config.BloomSize, config.BloomConfirms)\n\tindexers[2] = light.NewBloomTrieIndexer(db, odr, config.BloomSize, config.BloomTrieSize, disablePruning)\n\t// make bloomTrieIndexer as a child indexer of bloom indexer.\n\tindexers[1].AddChildIndexer(indexers[2])\n\treturn indexers[:]\n}\n\nfunc newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, indexers []*core.ChainIndexer, db ethdb.Database, peers *serverPeerSet, ulcServers []string, ulcFraction int) *clientHandler {\n\tvar (\n\t\tevmux  = new(event.TypeMux)\n\t\tengine = ethash.NewFaker()\n\t\tgspec  = core.Genesis{\n\t\t\tConfig:   params.AllEthashProtocolChanges,\n\t\t\tAlloc:    core.GenesisAlloc{bankAddr: {Balance: bankFunds}},\n\t\t\tGasLimit: 100000000,\n\t\t}\n\t\toracle *checkpointoracle.CheckpointOracle\n\t)\n\tgenesis := gspec.MustCommit(db)\n\tchain, _ := light.NewLightChain(odr, gspec.Config, engine, nil)\n\tif indexers != nil {\n\t\tcheckpointConfig := &params.CheckpointOracleConfig{\n\t\t\tAddress:   crypto.CreateAddress(bankAddr, 0),\n\t\t\tSigners:   []common.Address{signerAddr},\n\t\t\tThreshold: 1,\n\t\t}\n\t\tgetLocal := func(index uint64) params.TrustedCheckpoint {\n\t\t\tchtIndexer := indexers[0]\n\t\t\tsectionHead := chtIndexer.SectionHead(index)\n\t\t\treturn params.TrustedCheckpoint{\n\t\t\t\tSectionIndex: index,\n\t\t\t\tSectionHead:  sectionHead,\n\t\t\t\tCHTRoot:      light.GetChtRoot(db, index, sectionHead),\n\t\t\t\tBloomRoot:    light.GetBloomTrieRoot(db, index, sectionHead),\n\t\t\t}\n\t\t}\n\t\toracle = checkpointoracle.New(checkpointConfig, getLocal)\n\t}\n\tclient := &LightEthereum{\n\t\tlesCommons: lesCommons{\n\t\t\tgenesis:     genesis.Hash(),\n\t\t\tconfig:      &ethconfig.Config{LightPeers: 100, NetworkId: NetworkId},\n\t\t\tchainConfig: params.AllEthashProtocolChanges,\n\t\t\tiConfig:     light.TestClientIndexerConfig,\n\t\t\tchainDb:     db,\n\t\t\toracle:      oracle,\n\t\t\tchainReader: chain,\n\t\t\tcloseCh:     make(chan struct{}),\n\t\t},\n\t\tpeers:      peers,\n\t\treqDist:    odr.retriever.dist,\n\t\tretriever:  odr.retriever,\n\t\todr:        odr,\n\t\tengine:     engine,\n\t\tblockchain: chain,\n\t\teventMux:   evmux,\n\t}\n\tclient.handler = newClientHandler(ulcServers, ulcFraction, nil, client)\n\n\tif client.oracle != nil {\n\t\tclient.oracle.Start(backend)\n\t}\n\tclient.handler.start()\n\treturn client.handler\n}\n\nfunc newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Database, clock mclock.Clock) (*serverHandler, *backends.SimulatedBackend) {\n\tvar (\n\t\tgspec = core.Genesis{\n\t\t\tConfig:   params.AllEthashProtocolChanges,\n\t\t\tAlloc:    core.GenesisAlloc{bankAddr: {Balance: bankFunds}},\n\t\t\tGasLimit: 100000000,\n\t\t}\n\t\toracle *checkpointoracle.CheckpointOracle\n\t)\n\tgenesis := gspec.MustCommit(db)\n\n\t// create a simulation backend and pre-commit several customized block to the database.\n\tsimulation := backends.NewSimulatedBackendWithDatabase(db, gspec.Alloc, 100000000)\n\tprepare(blocks, simulation)\n\n\ttxpoolConfig := core.DefaultTxPoolConfig\n\ttxpoolConfig.Journal = \"\"\n\ttxpool := core.NewTxPool(txpoolConfig, gspec.Config, simulation.Blockchain())\n\tif indexers != nil {\n\t\tcheckpointConfig := &params.CheckpointOracleConfig{\n\t\t\tAddress:   crypto.CreateAddress(bankAddr, 0),\n\t\t\tSigners:   []common.Address{signerAddr},\n\t\t\tThreshold: 1,\n\t\t}\n\t\tgetLocal := func(index uint64) params.TrustedCheckpoint {\n\t\t\tchtIndexer := indexers[0]\n\t\t\tsectionHead := chtIndexer.SectionHead(index)\n\t\t\treturn params.TrustedCheckpoint{\n\t\t\t\tSectionIndex: index,\n\t\t\t\tSectionHead:  sectionHead,\n\t\t\t\tCHTRoot:      light.GetChtRoot(db, index, sectionHead),\n\t\t\t\tBloomRoot:    light.GetBloomTrieRoot(db, index, sectionHead),\n\t\t\t}\n\t\t}\n\t\toracle = checkpointoracle.New(checkpointConfig, getLocal)\n\t}\n\tns := nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup)\n\tserver := &LesServer{\n\t\tlesCommons: lesCommons{\n\t\t\tgenesis:     genesis.Hash(),\n\t\t\tconfig:      &ethconfig.Config{LightPeers: 100, NetworkId: NetworkId},\n\t\t\tchainConfig: params.AllEthashProtocolChanges,\n\t\t\tiConfig:     light.TestServerIndexerConfig,\n\t\t\tchainDb:     db,\n\t\t\tchainReader: simulation.Blockchain(),\n\t\t\toracle:      oracle,\n\t\t\tcloseCh:     make(chan struct{}),\n\t\t},\n\t\tns:           ns,\n\t\tbroadcaster:  newBroadcaster(ns),\n\t\tservingQueue: newServingQueue(int64(time.Millisecond*10), 1),\n\t\tdefParams: flowcontrol.ServerParams{\n\t\t\tBufLimit:    testBufLimit,\n\t\t\tMinRecharge: testBufRecharge,\n\t\t},\n\t\tfcManager: flowcontrol.NewClientManager(nil, clock),\n\t}\n\tserver.costTracker, server.minCapacity = newCostTracker(db, server.config)\n\tserver.costTracker.testCostList = testCostList(0) // Disable flow control mechanism.\n\tserver.clientPool = newClientPool(ns, db, testBufRecharge, defaultConnectedBias, clock, func(id enode.ID) {}, alwaysTrueFn)\n\tserver.clientPool.setLimits(10000, 10000) // Assign enough capacity for clientpool\n\tserver.handler = newServerHandler(server, simulation.Blockchain(), db, txpool, func() bool { return true })\n\tif server.oracle != nil {\n\t\tserver.oracle.Start(simulation)\n\t}\n\tserver.servingQueue.setThreads(4)\n\tns.Start()\n\tserver.handler.start()\n\treturn server.handler, simulation\n}\n\nfunc alwaysTrueFn() bool {\n\treturn true\n}\n\n// testPeer is a simulated peer to allow testing direct network calls.\ntype testPeer struct {\n\tcpeer *clientPeer\n\tspeer *serverPeer\n\n\tnet p2p.MsgReadWriter // Network layer reader/writer to simulate remote messaging\n\tapp *p2p.MsgPipeRW    // Application layer reader/writer to simulate the local side\n}\n\n// handshakeWithServer executes the handshake with the remote server peer.\nfunc (p *testPeer) handshakeWithServer(t *testing.T, td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID) {\n\t// It only works for the simulated client peer\n\tif p.cpeer == nil {\n\t\tt.Fatal(\"handshake for client peer only\")\n\t}\n\tvar sendList keyValueList\n\tsendList = sendList.add(\"protocolVersion\", uint64(p.cpeer.version))\n\tsendList = sendList.add(\"networkId\", uint64(NetworkId))\n\tsendList = sendList.add(\"headTd\", td)\n\tsendList = sendList.add(\"headHash\", head)\n\tsendList = sendList.add(\"headNum\", headNum)\n\tsendList = sendList.add(\"genesisHash\", genesis)\n\tif p.cpeer.version >= lpv4 {\n\t\tsendList = sendList.add(\"forkID\", &forkID)\n\t}\n\tif err := p2p.ExpectMsg(p.app, StatusMsg, nil); err != nil {\n\t\tt.Fatalf(\"status recv: %v\", err)\n\t}\n\tif err := p2p.Send(p.app, StatusMsg, sendList); err != nil {\n\t\tt.Fatalf(\"status send: %v\", err)\n\t}\n}\n\n// handshakeWithClient executes the handshake with the remote client peer.\nfunc (p *testPeer) handshakeWithClient(t *testing.T, td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID, costList RequestCostList, recentTxLookup uint64) {\n\t// It only works for the simulated client peer\n\tif p.speer == nil {\n\t\tt.Fatal(\"handshake for server peer only\")\n\t}\n\tvar sendList keyValueList\n\tsendList = sendList.add(\"protocolVersion\", uint64(p.speer.version))\n\tsendList = sendList.add(\"networkId\", uint64(NetworkId))\n\tsendList = sendList.add(\"headTd\", td)\n\tsendList = sendList.add(\"headHash\", head)\n\tsendList = sendList.add(\"headNum\", headNum)\n\tsendList = sendList.add(\"genesisHash\", genesis)\n\tsendList = sendList.add(\"serveHeaders\", nil)\n\tsendList = sendList.add(\"serveChainSince\", uint64(0))\n\tsendList = sendList.add(\"serveStateSince\", uint64(0))\n\tsendList = sendList.add(\"serveRecentState\", uint64(core.TriesInMemory-4))\n\tsendList = sendList.add(\"txRelay\", nil)\n\tsendList = sendList.add(\"flowControl/BL\", testBufLimit)\n\tsendList = sendList.add(\"flowControl/MRR\", testBufRecharge)\n\tsendList = sendList.add(\"flowControl/MRC\", costList)\n\tif p.speer.version >= lpv4 {\n\t\tsendList = sendList.add(\"forkID\", &forkID)\n\t\tsendList = sendList.add(\"recentTxLookup\", recentTxLookup)\n\t}\n\tif err := p2p.ExpectMsg(p.app, StatusMsg, nil); err != nil {\n\t\tt.Fatalf(\"status recv: %v\", err)\n\t}\n\tif err := p2p.Send(p.app, StatusMsg, sendList); err != nil {\n\t\tt.Fatalf(\"status send: %v\", err)\n\t}\n}\n\n// close terminates the local side of the peer, notifying the remote protocol\n// manager of termination.\nfunc (p *testPeer) close() {\n\tp.app.Close()\n}\n\nfunc newTestPeerPair(name string, version int, server *serverHandler, client *clientHandler) (*testPeer, *testPeer, error) {\n\t// Create a message pipe to communicate through\n\tapp, net := p2p.MsgPipe()\n\n\t// Generate a random id and create the peer\n\tvar id enode.ID\n\trand.Read(id[:])\n\n\tpeer1 := newClientPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)\n\tpeer2 := newServerPeer(version, NetworkId, false, p2p.NewPeer(id, name, nil), app)\n\n\t// Start the peer on a new thread\n\terrc1 := make(chan error, 1)\n\terrc2 := make(chan error, 1)\n\tgo func() {\n\t\tselect {\n\t\tcase <-server.closeCh:\n\t\t\terrc1 <- p2p.DiscQuitting\n\t\tcase errc1 <- server.handle(peer1):\n\t\t}\n\t}()\n\tgo func() {\n\t\tselect {\n\t\tcase <-client.closeCh:\n\t\t\terrc2 <- p2p.DiscQuitting\n\t\tcase errc2 <- client.handle(peer2):\n\t\t}\n\t}()\n\t// Ensure the connection is established or exits when any error occurs\n\tfor {\n\t\tselect {\n\t\tcase err := <-errc1:\n\t\t\treturn nil, nil, fmt.Errorf(\"Failed to establish protocol connection %v\", err)\n\t\tcase err := <-errc2:\n\t\t\treturn nil, nil, fmt.Errorf(\"Failed to establish protocol connection %v\", err)\n\t\tdefault:\n\t\t}\n\t\tif atomic.LoadUint32(&peer1.serving) == 1 && atomic.LoadUint32(&peer2.serving) == 1 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(50 * time.Millisecond)\n\t}\n\treturn &testPeer{cpeer: peer1, net: net, app: app}, &testPeer{speer: peer2, net: app, app: net}, nil\n}\n\ntype indexerCallback func(*core.ChainIndexer, *core.ChainIndexer, *core.ChainIndexer)\n\n// testClient represents a client object for testing with necessary auxiliary fields.\ntype testClient struct {\n\tclock   mclock.Clock\n\tdb      ethdb.Database\n\tpeer    *testPeer\n\thandler *clientHandler\n\n\tchtIndexer       *core.ChainIndexer\n\tbloomIndexer     *core.ChainIndexer\n\tbloomTrieIndexer *core.ChainIndexer\n}\n\n// newRawPeer creates a new server peer connects to the server and do the handshake.\nfunc (client *testClient) newRawPeer(t *testing.T, name string, version int, recentTxLookup uint64) (*testPeer, func(), <-chan error) {\n\t// Create a message pipe to communicate through\n\tapp, net := p2p.MsgPipe()\n\n\t// Generate a random id and create the peer\n\tvar id enode.ID\n\trand.Read(id[:])\n\tpeer := newServerPeer(version, NetworkId, false, p2p.NewPeer(id, name, nil), net)\n\n\t// Start the peer on a new thread\n\terrCh := make(chan error, 1)\n\tgo func() {\n\t\tselect {\n\t\tcase <-client.handler.closeCh:\n\t\t\terrCh <- p2p.DiscQuitting\n\t\tcase errCh <- client.handler.handle(peer):\n\t\t}\n\t}()\n\ttp := &testPeer{\n\t\tapp:   app,\n\t\tnet:   net,\n\t\tspeer: peer,\n\t}\n\tvar (\n\t\tgenesis = client.handler.backend.blockchain.Genesis()\n\t\thead    = client.handler.backend.blockchain.CurrentHeader()\n\t\ttd      = client.handler.backend.blockchain.GetTd(head.Hash(), head.Number.Uint64())\n\t)\n\tforkID := forkid.NewID(client.handler.backend.blockchain.Config(), genesis.Hash(), head.Number.Uint64())\n\ttp.handshakeWithClient(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID, testCostList(0), recentTxLookup) // disable flow control by default\n\n\t// Ensure the connection is established or exits when any error occurs\n\tfor {\n\t\tselect {\n\t\tcase <-errCh:\n\t\t\treturn nil, nil, nil\n\t\tdefault:\n\t\t}\n\t\tif atomic.LoadUint32(&peer.serving) == 1 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(50 * time.Millisecond)\n\t}\n\tclosePeer := func() {\n\t\ttp.speer.close()\n\t\ttp.close()\n\t}\n\treturn tp, closePeer, errCh\n}\n\n// testServer represents a server object for testing with necessary auxiliary fields.\ntype testServer struct {\n\tclock   mclock.Clock\n\tbackend *backends.SimulatedBackend\n\tdb      ethdb.Database\n\tpeer    *testPeer\n\thandler *serverHandler\n\n\tchtIndexer       *core.ChainIndexer\n\tbloomIndexer     *core.ChainIndexer\n\tbloomTrieIndexer *core.ChainIndexer\n}\n\n// newRawPeer creates a new client peer connects to the server and do the handshake.\nfunc (server *testServer) newRawPeer(t *testing.T, name string, version int) (*testPeer, func(), <-chan error) {\n\t// Create a message pipe to communicate through\n\tapp, net := p2p.MsgPipe()\n\n\t// Generate a random id and create the peer\n\tvar id enode.ID\n\trand.Read(id[:])\n\tpeer := newClientPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net)\n\n\t// Start the peer on a new thread\n\terrCh := make(chan error, 1)\n\tgo func() {\n\t\tselect {\n\t\tcase <-server.handler.closeCh:\n\t\t\terrCh <- p2p.DiscQuitting\n\t\tcase errCh <- server.handler.handle(peer):\n\t\t}\n\t}()\n\ttp := &testPeer{\n\t\tapp:   app,\n\t\tnet:   net,\n\t\tcpeer: peer,\n\t}\n\tvar (\n\t\tgenesis = server.handler.blockchain.Genesis()\n\t\thead    = server.handler.blockchain.CurrentHeader()\n\t\ttd      = server.handler.blockchain.GetTd(head.Hash(), head.Number.Uint64())\n\t)\n\tforkID := forkid.NewID(server.handler.blockchain.Config(), genesis.Hash(), head.Number.Uint64())\n\ttp.handshakeWithServer(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID)\n\n\t// Ensure the connection is established or exits when any error occurs\n\tfor {\n\t\tselect {\n\t\tcase <-errCh:\n\t\t\treturn nil, nil, nil\n\t\tdefault:\n\t\t}\n\t\tif atomic.LoadUint32(&peer.serving) == 1 {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(50 * time.Millisecond)\n\t}\n\tclosePeer := func() {\n\t\ttp.cpeer.close()\n\t\ttp.close()\n\t}\n\treturn tp, closePeer, errCh\n}\n\n// testnetConfig wraps all the configurations for testing network.\ntype testnetConfig struct {\n\tblocks      int\n\tprotocol    int\n\tindexFn     indexerCallback\n\tulcServers  []string\n\tulcFraction int\n\tsimClock    bool\n\tconnect     bool\n\tnopruning   bool\n}\n\nfunc newClientServerEnv(t *testing.T, config testnetConfig) (*testServer, *testClient, func()) {\n\tvar (\n\t\tsdb    = rawdb.NewMemoryDatabase()\n\t\tcdb    = rawdb.NewMemoryDatabase()\n\t\tspeers = newServerPeerSet()\n\t)\n\tvar clock mclock.Clock = &mclock.System{}\n\tif config.simClock {\n\t\tclock = &mclock.Simulated{}\n\t}\n\tdist := newRequestDistributor(speers, clock)\n\trm := newRetrieveManager(speers, dist, func() time.Duration { return time.Millisecond * 500 })\n\todr := NewLesOdr(cdb, light.TestClientIndexerConfig, speers, rm)\n\n\tsindexers := testIndexers(sdb, nil, light.TestServerIndexerConfig, true)\n\tcIndexers := testIndexers(cdb, odr, light.TestClientIndexerConfig, config.nopruning)\n\n\tscIndexer, sbIndexer, sbtIndexer := sindexers[0], sindexers[1], sindexers[2]\n\tccIndexer, cbIndexer, cbtIndexer := cIndexers[0], cIndexers[1], cIndexers[2]\n\todr.SetIndexers(ccIndexer, cbIndexer, cbtIndexer)\n\n\tserver, b := newTestServerHandler(config.blocks, sindexers, sdb, clock)\n\tclient := newTestClientHandler(b, odr, cIndexers, cdb, speers, config.ulcServers, config.ulcFraction)\n\n\tscIndexer.Start(server.blockchain)\n\tsbIndexer.Start(server.blockchain)\n\tccIndexer.Start(client.backend.blockchain)\n\tcbIndexer.Start(client.backend.blockchain)\n\n\tif config.indexFn != nil {\n\t\tconfig.indexFn(scIndexer, sbIndexer, sbtIndexer)\n\t}\n\tvar (\n\t\terr          error\n\t\tspeer, cpeer *testPeer\n\t)\n\tif config.connect {\n\t\tdone := make(chan struct{})\n\t\tclient.syncEnd = func(_ *types.Header) { close(done) }\n\t\tcpeer, speer, err = newTestPeerPair(\"peer\", config.protocol, server, client)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to connect testing peers %v\", err)\n\t\t}\n\t\tselect {\n\t\tcase <-done:\n\t\tcase <-time.After(10 * time.Second):\n\t\t\tt.Fatal(\"test peer did not connect and sync within 3s\")\n\t\t}\n\t}\n\ts := &testServer{\n\t\tclock:            clock,\n\t\tbackend:          b,\n\t\tdb:               sdb,\n\t\tpeer:             cpeer,\n\t\thandler:          server,\n\t\tchtIndexer:       scIndexer,\n\t\tbloomIndexer:     sbIndexer,\n\t\tbloomTrieIndexer: sbtIndexer,\n\t}\n\tc := &testClient{\n\t\tclock:            clock,\n\t\tdb:               cdb,\n\t\tpeer:             speer,\n\t\thandler:          client,\n\t\tchtIndexer:       ccIndexer,\n\t\tbloomIndexer:     cbIndexer,\n\t\tbloomTrieIndexer: cbtIndexer,\n\t}\n\tteardown := func() {\n\t\tif config.connect {\n\t\t\tspeer.close()\n\t\t\tcpeer.close()\n\t\t\tcpeer.cpeer.close()\n\t\t\tspeer.speer.close()\n\t\t}\n\t\tccIndexer.Close()\n\t\tcbIndexer.Close()\n\t\tscIndexer.Close()\n\t\tsbIndexer.Close()\n\t\tb.Close()\n\t}\n\treturn s, c, teardown\n}\n\n// NewFuzzerPeer creates a client peer for test purposes, and also returns\n// a function to close the peer: this is needed to avoid goroutine leaks in the\n// exec queue.\nfunc NewFuzzerPeer(version int) (p *clientPeer, closer func()) {\n\tp = newClientPeer(version, 0, p2p.NewPeer(enode.ID{}, \"\", nil), nil)\n\treturn p, func() { p.peerCommons.close() }\n}\n"
  },
  {
    "path": "les/txrelay.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"context\"\n\t\"sync\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/core/types\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\ntype lesTxRelay struct {\n\ttxSent       map[common.Hash]*types.Transaction\n\ttxPending    map[common.Hash]struct{}\n\tpeerList     []*serverPeer\n\tpeerStartPos int\n\tlock         sync.Mutex\n\tstop         chan struct{}\n\n\tretriever *retrieveManager\n}\n\nfunc newLesTxRelay(ps *serverPeerSet, retriever *retrieveManager) *lesTxRelay {\n\tr := &lesTxRelay{\n\t\ttxSent:    make(map[common.Hash]*types.Transaction),\n\t\ttxPending: make(map[common.Hash]struct{}),\n\t\tretriever: retriever,\n\t\tstop:      make(chan struct{}),\n\t}\n\tps.subscribe(r)\n\treturn r\n}\n\nfunc (ltrx *lesTxRelay) Stop() {\n\tclose(ltrx.stop)\n}\n\nfunc (ltrx *lesTxRelay) registerPeer(p *serverPeer) {\n\tltrx.lock.Lock()\n\tdefer ltrx.lock.Unlock()\n\n\t// Short circuit if the peer is announce only.\n\tif p.onlyAnnounce {\n\t\treturn\n\t}\n\tltrx.peerList = append(ltrx.peerList, p)\n}\n\nfunc (ltrx *lesTxRelay) unregisterPeer(p *serverPeer) {\n\tltrx.lock.Lock()\n\tdefer ltrx.lock.Unlock()\n\n\tfor i, peer := range ltrx.peerList {\n\t\tif peer == p {\n\t\t\t// Remove from the peer list\n\t\t\tltrx.peerList = append(ltrx.peerList[:i], ltrx.peerList[i+1:]...)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// send sends a list of transactions to at most a given number of peers.\nfunc (ltrx *lesTxRelay) send(txs types.Transactions, count int) {\n\tsendTo := make(map[*serverPeer]types.Transactions)\n\n\tltrx.peerStartPos++ // rotate the starting position of the peer list\n\tif ltrx.peerStartPos >= len(ltrx.peerList) {\n\t\tltrx.peerStartPos = 0\n\t}\n\n\tfor _, tx := range txs {\n\t\thash := tx.Hash()\n\t\t_, ok := ltrx.txSent[hash]\n\t\tif !ok {\n\t\t\tltrx.txSent[hash] = tx\n\t\t\tltrx.txPending[hash] = struct{}{}\n\t\t}\n\t\tif len(ltrx.peerList) > 0 {\n\t\t\tcnt := count\n\t\t\tpos := ltrx.peerStartPos\n\t\t\tfor {\n\t\t\t\tpeer := ltrx.peerList[pos]\n\t\t\t\tsendTo[peer] = append(sendTo[peer], tx)\n\t\t\t\tcnt--\n\t\t\t\tif cnt == 0 {\n\t\t\t\t\tbreak // sent it to the desired number of peers\n\t\t\t\t}\n\t\t\t\tpos++\n\t\t\t\tif pos == len(ltrx.peerList) {\n\t\t\t\t\tpos = 0\n\t\t\t\t}\n\t\t\t\tif pos == ltrx.peerStartPos {\n\t\t\t\t\tbreak // tried all available peers\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor p, list := range sendTo {\n\t\tpp := p\n\t\tll := list\n\t\tenc, _ := rlp.EncodeToBytes(ll)\n\n\t\treqID := genReqID()\n\t\trq := &distReq{\n\t\t\tgetCost: func(dp distPeer) uint64 {\n\t\t\t\tpeer := dp.(*serverPeer)\n\t\t\t\treturn peer.getTxRelayCost(len(ll), len(enc))\n\t\t\t},\n\t\t\tcanSend: func(dp distPeer) bool {\n\t\t\t\treturn !dp.(*serverPeer).onlyAnnounce && dp.(*serverPeer) == pp\n\t\t\t},\n\t\t\trequest: func(dp distPeer) func() {\n\t\t\t\tpeer := dp.(*serverPeer)\n\t\t\t\tcost := peer.getTxRelayCost(len(ll), len(enc))\n\t\t\t\tpeer.fcServer.QueuedRequest(reqID, cost)\n\t\t\t\treturn func() { peer.sendTxs(reqID, len(ll), enc) }\n\t\t\t},\n\t\t}\n\t\tgo ltrx.retriever.retrieve(context.Background(), reqID, rq, func(p distPeer, msg *Msg) error { return nil }, ltrx.stop)\n\t}\n}\n\nfunc (ltrx *lesTxRelay) Send(txs types.Transactions) {\n\tltrx.lock.Lock()\n\tdefer ltrx.lock.Unlock()\n\n\tltrx.send(txs, 3)\n}\n\nfunc (ltrx *lesTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) {\n\tltrx.lock.Lock()\n\tdefer ltrx.lock.Unlock()\n\n\tfor _, hash := range mined {\n\t\tdelete(ltrx.txPending, hash)\n\t}\n\n\tfor _, hash := range rollback {\n\t\tltrx.txPending[hash] = struct{}{}\n\t}\n\n\tif len(ltrx.txPending) > 0 {\n\t\ttxs := make(types.Transactions, len(ltrx.txPending))\n\t\ti := 0\n\t\tfor hash := range ltrx.txPending {\n\t\t\ttxs[i] = ltrx.txSent[hash]\n\t\t\ti++\n\t\t}\n\t\tltrx.send(txs, 1)\n\t}\n}\n\nfunc (ltrx *lesTxRelay) Discard(hashes []common.Hash) {\n\tltrx.lock.Lock()\n\tdefer ltrx.lock.Unlock()\n\n\tfor _, hash := range hashes {\n\t\tdelete(ltrx.txSent, hash)\n\t\tdelete(ltrx.txPending, hash)\n\t}\n}\n"
  },
  {
    "path": "les/ulc.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"errors\"\n\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n)\n\ntype ulc struct {\n\tkeys     map[string]bool\n\tfraction int\n}\n\n// newULC creates and returns an ultra light client instance.\nfunc newULC(servers []string, fraction int) (*ulc, error) {\n\tkeys := make(map[string]bool)\n\tfor _, id := range servers {\n\t\tnode, err := enode.Parse(enode.ValidSchemes, id)\n\t\tif err != nil {\n\t\t\tlog.Warn(\"Failed to parse trusted server\", \"id\", id, \"err\", err)\n\t\t\tcontinue\n\t\t}\n\t\tkeys[node.ID().String()] = true\n\t}\n\tif len(keys) == 0 {\n\t\treturn nil, errors.New(\"no trusted servers\")\n\t}\n\treturn &ulc{\n\t\tkeys:     keys,\n\t\tfraction: fraction,\n\t}, nil\n}\n\n// trusted return an indicator that whether the specified peer is trusted.\nfunc (u *ulc) trusted(p enode.ID) bool {\n\treturn u.keys[p.String()]\n}\n"
  },
  {
    "path": "les/ulc_test.go",
    "content": "// Copyright 2018 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage les\n\nimport (\n\t\"crypto/rand\"\n\t\"fmt\"\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/crypto\"\n\t\"github.com/ethereum/go-ethereum/p2p\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n)\n\nfunc TestULCAnnounceThresholdLes2(t *testing.T) { testULCAnnounceThreshold(t, 2) }\nfunc TestULCAnnounceThresholdLes3(t *testing.T) { testULCAnnounceThreshold(t, 3) }\n\nfunc testULCAnnounceThreshold(t *testing.T, protocol int) {\n\t// todo figure out why it takes fetcher so longer to fetcher the announced header.\n\tt.Skip(\"Sometimes it can failed\")\n\tvar cases = []struct {\n\t\theight    []int\n\t\tthreshold int\n\t\texpect    uint64\n\t}{\n\t\t{[]int{1}, 100, 1},\n\t\t{[]int{0, 0, 0}, 100, 0},\n\t\t{[]int{1, 2, 3}, 30, 3},\n\t\t{[]int{1, 2, 3}, 60, 2},\n\t\t{[]int{3, 2, 1}, 67, 1},\n\t\t{[]int{3, 2, 1}, 100, 1},\n\t}\n\tfor _, testcase := range cases {\n\t\tvar (\n\t\t\tservers   []*testServer\n\t\t\tteardowns []func()\n\t\t\tnodes     []*enode.Node\n\t\t\tids       []string\n\t\t)\n\t\tfor i := 0; i < len(testcase.height); i++ {\n\t\t\ts, n, teardown := newTestServerPeer(t, 0, protocol)\n\n\t\t\tservers = append(servers, s)\n\t\t\tnodes = append(nodes, n)\n\t\t\tteardowns = append(teardowns, teardown)\n\t\t\tids = append(ids, n.String())\n\t\t}\n\t\tc, teardown := newTestLightPeer(t, protocol, ids, testcase.threshold)\n\n\t\t// Connect all servers.\n\t\tfor i := 0; i < len(servers); i++ {\n\t\t\tconnect(servers[i].handler, nodes[i].ID(), c.handler, protocol)\n\t\t}\n\t\tfor i := 0; i < len(servers); i++ {\n\t\t\tfor j := 0; j < testcase.height[i]; j++ {\n\t\t\t\tservers[i].backend.Commit()\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(1500 * time.Millisecond) // Ensure the fetcher has done its work.\n\t\thead := c.handler.backend.blockchain.CurrentHeader().Number.Uint64()\n\t\tif head != testcase.expect {\n\t\t\tt.Fatalf(\"chain height mismatch, want %d, got %d\", testcase.expect, head)\n\t\t}\n\n\t\t// Release all servers and client resources.\n\t\tteardown()\n\t\tfor i := 0; i < len(teardowns); i++ {\n\t\t\tteardowns[i]()\n\t\t}\n\t}\n}\n\nfunc connect(server *serverHandler, serverId enode.ID, client *clientHandler, protocol int) (*serverPeer, *clientPeer, error) {\n\t// Create a message pipe to communicate through\n\tapp, net := p2p.MsgPipe()\n\n\tvar id enode.ID\n\trand.Read(id[:])\n\n\tpeer1 := newServerPeer(protocol, NetworkId, true, p2p.NewPeer(serverId, \"\", nil), net) // Mark server as trusted\n\tpeer2 := newClientPeer(protocol, NetworkId, p2p.NewPeer(id, \"\", nil), app)\n\n\t// Start the peerLight on a new thread\n\terrc1 := make(chan error, 1)\n\terrc2 := make(chan error, 1)\n\tgo func() {\n\t\tselect {\n\t\tcase <-server.closeCh:\n\t\t\terrc1 <- p2p.DiscQuitting\n\t\tcase errc1 <- server.handle(peer2):\n\t\t}\n\t}()\n\tgo func() {\n\t\tselect {\n\t\tcase <-client.closeCh:\n\t\t\terrc1 <- p2p.DiscQuitting\n\t\tcase errc1 <- client.handle(peer1):\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-time.After(time.Millisecond * 100):\n\tcase err := <-errc1:\n\t\treturn nil, nil, fmt.Errorf(\"peerLight handshake error: %v\", err)\n\tcase err := <-errc2:\n\t\treturn nil, nil, fmt.Errorf(\"peerFull handshake error: %v\", err)\n\t}\n\treturn peer1, peer2, nil\n}\n\n// newTestServerPeer creates server peer.\nfunc newTestServerPeer(t *testing.T, blocks int, protocol int) (*testServer, *enode.Node, func()) {\n\tnetconfig := testnetConfig{\n\t\tblocks:    blocks,\n\t\tprotocol:  protocol,\n\t\tnopruning: true,\n\t}\n\ts, _, teardown := newClientServerEnv(t, netconfig)\n\tkey, err := crypto.GenerateKey()\n\tif err != nil {\n\t\tt.Fatal(\"generate key err:\", err)\n\t}\n\ts.handler.server.privateKey = key\n\tn := enode.NewV4(&key.PublicKey, net.ParseIP(\"127.0.0.1\"), 35000, 35000)\n\treturn s, n, teardown\n}\n\n// newTestLightPeer creates node with light sync mode\nfunc newTestLightPeer(t *testing.T, protocol int, ulcServers []string, ulcFraction int) (*testClient, func()) {\n\tnetconfig := testnetConfig{\n\t\tprotocol:    protocol,\n\t\tulcServers:  ulcServers,\n\t\tulcFraction: ulcFraction,\n\t\tnopruning:   true,\n\t}\n\t_, c, teardown := newClientServerEnv(t, netconfig)\n\treturn c, teardown\n}\n"
  },
  {
    "path": "les/utils/exec_queue.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage utils\n\nimport \"sync\"\n\n// ExecQueue implements a queue that executes function calls in a single thread,\n// in the same order as they have been queued.\ntype ExecQueue struct {\n\tmu        sync.Mutex\n\tcond      *sync.Cond\n\tfuncs     []func()\n\tcloseWait chan struct{}\n}\n\n// NewExecQueue creates a new execution Queue.\nfunc NewExecQueue(capacity int) *ExecQueue {\n\tq := &ExecQueue{funcs: make([]func(), 0, capacity)}\n\tq.cond = sync.NewCond(&q.mu)\n\tgo q.loop()\n\treturn q\n}\n\nfunc (q *ExecQueue) loop() {\n\tfor f := q.waitNext(false); f != nil; f = q.waitNext(true) {\n\t\tf()\n\t}\n\tclose(q.closeWait)\n}\n\nfunc (q *ExecQueue) waitNext(drop bool) (f func()) {\n\tq.mu.Lock()\n\tif drop && len(q.funcs) > 0 {\n\t\t// Remove the function that just executed. We do this here instead of when\n\t\t// dequeuing so len(q.funcs) includes the function that is running.\n\t\tq.funcs = append(q.funcs[:0], q.funcs[1:]...)\n\t}\n\tfor !q.isClosed() {\n\t\tif len(q.funcs) > 0 {\n\t\t\tf = q.funcs[0]\n\t\t\tbreak\n\t\t}\n\t\tq.cond.Wait()\n\t}\n\tq.mu.Unlock()\n\treturn f\n}\n\nfunc (q *ExecQueue) isClosed() bool {\n\treturn q.closeWait != nil\n}\n\n// CanQueue returns true if more function calls can be added to the execution Queue.\nfunc (q *ExecQueue) CanQueue() bool {\n\tq.mu.Lock()\n\tok := !q.isClosed() && len(q.funcs) < cap(q.funcs)\n\tq.mu.Unlock()\n\treturn ok\n}\n\n// Queue adds a function call to the execution Queue. Returns true if successful.\nfunc (q *ExecQueue) Queue(f func()) bool {\n\tq.mu.Lock()\n\tok := !q.isClosed() && len(q.funcs) < cap(q.funcs)\n\tif ok {\n\t\tq.funcs = append(q.funcs, f)\n\t\tq.cond.Signal()\n\t}\n\tq.mu.Unlock()\n\treturn ok\n}\n\n// Clear drops all queued functions.\nfunc (q *ExecQueue) Clear() {\n\tq.mu.Lock()\n\tq.funcs = q.funcs[:0]\n\tq.mu.Unlock()\n}\n\n// Quit stops the exec Queue.\n//\n// Quit waits for the current execution to finish before returning.\nfunc (q *ExecQueue) Quit() {\n\tq.mu.Lock()\n\tif !q.isClosed() {\n\t\tq.closeWait = make(chan struct{})\n\t\tq.cond.Signal()\n\t}\n\tq.mu.Unlock()\n\t<-q.closeWait\n}\n"
  },
  {
    "path": "les/utils/exec_queue_test.go",
    "content": "// Copyright 2017 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage utils\n\nimport \"testing\"\n\nfunc TestExecQueue(t *testing.T) {\n\tvar (\n\t\tN        = 10000\n\t\tq        = NewExecQueue(N)\n\t\tcounter  int\n\t\texecd    = make(chan int)\n\t\ttestexit = make(chan struct{})\n\t)\n\tdefer q.Quit()\n\tdefer close(testexit)\n\n\tcheck := func(state string, wantOK bool) {\n\t\tc := counter\n\t\tcounter++\n\t\tqf := func() {\n\t\t\tselect {\n\t\t\tcase execd <- c:\n\t\t\tcase <-testexit:\n\t\t\t}\n\t\t}\n\t\tif q.CanQueue() != wantOK {\n\t\t\tt.Fatalf(\"CanQueue() == %t for %s\", !wantOK, state)\n\t\t}\n\t\tif q.Queue(qf) != wantOK {\n\t\t\tt.Fatalf(\"Queue() == %t for %s\", !wantOK, state)\n\t\t}\n\t}\n\n\tfor i := 0; i < N; i++ {\n\t\tcheck(\"queue below cap\", true)\n\t}\n\tcheck(\"full queue\", false)\n\tfor i := 0; i < N; i++ {\n\t\tif c := <-execd; c != i {\n\t\t\tt.Fatal(\"execution out of order\")\n\t\t}\n\t}\n\tq.Quit()\n\tcheck(\"closed queue\", false)\n}\n"
  },
  {
    "path": "les/utils/expiredvalue.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage utils\n\nimport (\n\t\"math\"\n\t\"sync\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n)\n\n// ExpiredValue is a scalar value that is continuously expired (decreased\n// exponentially) based on the provided logarithmic expiration offset value.\n//\n// The formula for value calculation is: base*2^(exp-logOffset). In order to\n// simplify the calculation of ExpiredValue, its value is expressed in the form\n// of an exponent with a base of 2.\n//\n// Also here is a trick to reduce a lot of calculations. In theory, when a value X\n// decays over time and then a new value Y is added, the final result should be\n// X*2^(exp-logOffset)+Y. However it's very hard to represent in memory.\n// So the trick is using the idea of inflation instead of exponential decay. At this\n// moment the temporary value becomes: X*2^exp+Y*2^logOffset_1, apply the exponential\n// decay when we actually want to calculate the value.\n//\n// e.g.\n// t0: V = 100\n// t1: add 30, inflationary value is: 100 + 30/0.3, 0.3 is the decay coefficient\n// t2: get value, decay coefficient is 0.2 now, final result is: 200*0.2 = 40\ntype ExpiredValue struct {\n\tBase, Exp uint64 // rlp encoding works by default\n}\n\n// ExpirationFactor is calculated from logOffset. 1 <= Factor < 2 and Factor*2^Exp\n// describes the multiplier applicable for additions and the divider for readouts.\n// If logOffset changes slowly then it saves some expensive operations to not calculate\n// them for each addition and readout but cache this intermediate form for some time.\n// It is also useful for structures where multiple values are expired with the same\n// Expirer.\ntype ExpirationFactor struct {\n\tExp    uint64\n\tFactor float64\n}\n\n// ExpFactor calculates ExpirationFactor based on logOffset\nfunc ExpFactor(logOffset Fixed64) ExpirationFactor {\n\treturn ExpirationFactor{Exp: logOffset.ToUint64(), Factor: logOffset.Fraction().Pow2()}\n}\n\n// Value calculates the expired value based on a floating point base and integer\n// power-of-2 exponent. This function should be used by multi-value expired structures.\nfunc (e ExpirationFactor) Value(base float64, exp uint64) float64 {\n\treturn base / e.Factor * math.Pow(2, float64(int64(exp-e.Exp)))\n}\n\n// value calculates the value at the given moment.\nfunc (e ExpiredValue) Value(logOffset Fixed64) uint64 {\n\toffset := Uint64ToFixed64(e.Exp) - logOffset\n\treturn uint64(float64(e.Base) * offset.Pow2())\n}\n\n// add adds a signed value at the given moment\nfunc (e *ExpiredValue) Add(amount int64, logOffset Fixed64) int64 {\n\tinteger, frac := logOffset.ToUint64(), logOffset.Fraction()\n\tfactor := frac.Pow2()\n\tbase := factor * float64(amount)\n\tif integer < e.Exp {\n\t\tbase /= math.Pow(2, float64(e.Exp-integer))\n\t}\n\tif integer > e.Exp {\n\t\te.Base >>= (integer - e.Exp)\n\t\te.Exp = integer\n\t}\n\tif base >= 0 || uint64(-base) <= e.Base {\n\t\t// The conversion from negative float64 to\n\t\t// uint64 is undefined in golang, and doesn't\n\t\t// work with ARMv8. More details at:\n\t\t// https://github.com/golang/go/issues/43047\n\t\tif base >= 0 {\n\t\t\te.Base += uint64(base)\n\t\t} else {\n\t\t\te.Base -= uint64(-base)\n\t\t}\n\t\treturn amount\n\t}\n\tnet := int64(-float64(e.Base) / factor)\n\te.Base = 0\n\treturn net\n}\n\n// addExp adds another ExpiredValue\nfunc (e *ExpiredValue) AddExp(a ExpiredValue) {\n\tif e.Exp > a.Exp {\n\t\ta.Base >>= (e.Exp - a.Exp)\n\t}\n\tif e.Exp < a.Exp {\n\t\te.Base >>= (a.Exp - e.Exp)\n\t\te.Exp = a.Exp\n\t}\n\te.Base += a.Base\n}\n\n// subExp subtracts another ExpiredValue\nfunc (e *ExpiredValue) SubExp(a ExpiredValue) {\n\tif e.Exp > a.Exp {\n\t\ta.Base >>= (e.Exp - a.Exp)\n\t}\n\tif e.Exp < a.Exp {\n\t\te.Base >>= (a.Exp - e.Exp)\n\t\te.Exp = a.Exp\n\t}\n\tif e.Base > a.Base {\n\t\te.Base -= a.Base\n\t} else {\n\t\te.Base = 0\n\t}\n}\n\n// IsZero returns true if the value is zero\nfunc (e *ExpiredValue) IsZero() bool {\n\treturn e.Base == 0\n}\n\n// LinearExpiredValue is very similar with the expiredValue which the value\n// will continuously expired. But the different part is it's expired linearly.\ntype LinearExpiredValue struct {\n\tOffset uint64         // The latest time offset\n\tVal    uint64         // The remaining value, can never be negative\n\tRate   mclock.AbsTime `rlp:\"-\"` // Expiration rate(by nanosecond), will ignored by RLP\n}\n\n// value calculates the value at the given moment. This function always has the\n// assumption that the given timestamp shouldn't less than the recorded one.\nfunc (e LinearExpiredValue) Value(now mclock.AbsTime) uint64 {\n\toffset := uint64(now / e.Rate)\n\tif e.Offset < offset {\n\t\tdiff := offset - e.Offset\n\t\tif e.Val >= diff {\n\t\t\te.Val -= diff\n\t\t} else {\n\t\t\te.Val = 0\n\t\t}\n\t}\n\treturn e.Val\n}\n\n// add adds a signed value at the given moment. This function always has the\n// assumption that the given timestamp shouldn't less than the recorded one.\nfunc (e *LinearExpiredValue) Add(amount int64, now mclock.AbsTime) uint64 {\n\toffset := uint64(now / e.Rate)\n\tif e.Offset < offset {\n\t\tdiff := offset - e.Offset\n\t\tif e.Val >= diff {\n\t\t\te.Val -= diff\n\t\t} else {\n\t\t\te.Val = 0\n\t\t}\n\t\te.Offset = offset\n\t}\n\tif amount < 0 && uint64(-amount) > e.Val {\n\t\te.Val = 0\n\t} else {\n\t\te.Val = uint64(int64(e.Val) + amount)\n\t}\n\treturn e.Val\n}\n\n// ValueExpirer controls value expiration rate\ntype ValueExpirer interface {\n\tSetRate(now mclock.AbsTime, rate float64)\n\tSetLogOffset(now mclock.AbsTime, logOffset Fixed64)\n\tLogOffset(now mclock.AbsTime) Fixed64\n}\n\n// Expirer changes logOffset with a linear rate which can be changed during operation.\n// It is not thread safe, if access by multiple goroutines is needed then it should be\n// encapsulated into a locked structure.\n// Note that if neither SetRate nor SetLogOffset are used during operation then LogOffset\n// is thread safe.\ntype Expirer struct {\n\tlock       sync.RWMutex\n\tlogOffset  Fixed64\n\trate       float64\n\tlastUpdate mclock.AbsTime\n}\n\n// SetRate changes the expiration rate which is the inverse of the time constant in\n// nanoseconds.\nfunc (e *Expirer) SetRate(now mclock.AbsTime, rate float64) {\n\te.lock.Lock()\n\tdefer e.lock.Unlock()\n\n\tdt := now - e.lastUpdate\n\tif dt > 0 {\n\t\te.logOffset += Fixed64(logToFixedFactor * float64(dt) * e.rate)\n\t}\n\te.lastUpdate = now\n\te.rate = rate\n}\n\n// SetLogOffset sets logOffset instantly.\nfunc (e *Expirer) SetLogOffset(now mclock.AbsTime, logOffset Fixed64) {\n\te.lock.Lock()\n\tdefer e.lock.Unlock()\n\n\te.lastUpdate = now\n\te.logOffset = logOffset\n}\n\n// LogOffset returns the current logarithmic offset.\nfunc (e *Expirer) LogOffset(now mclock.AbsTime) Fixed64 {\n\te.lock.RLock()\n\tdefer e.lock.RUnlock()\n\n\tdt := now - e.lastUpdate\n\tif dt <= 0 {\n\t\treturn e.logOffset\n\t}\n\treturn e.logOffset + Fixed64(logToFixedFactor*float64(dt)*e.rate)\n}\n\n// fixedFactor is the fixed point multiplier factor used by Fixed64.\nconst fixedFactor = 0x1000000\n\n// Fixed64 implements 64-bit fixed point arithmetic functions.\ntype Fixed64 int64\n\n// Uint64ToFixed64 converts uint64 integer to Fixed64 format.\nfunc Uint64ToFixed64(f uint64) Fixed64 {\n\treturn Fixed64(f * fixedFactor)\n}\n\n// float64ToFixed64 converts float64 to Fixed64 format.\nfunc Float64ToFixed64(f float64) Fixed64 {\n\treturn Fixed64(f * fixedFactor)\n}\n\n// toUint64 converts Fixed64 format to uint64.\nfunc (f64 Fixed64) ToUint64() uint64 {\n\treturn uint64(f64) / fixedFactor\n}\n\n// fraction returns the fractional part of a Fixed64 value.\nfunc (f64 Fixed64) Fraction() Fixed64 {\n\treturn f64 % fixedFactor\n}\n\nvar (\n\tlogToFixedFactor = float64(fixedFactor) / math.Log(2)\n\tfixedToLogFactor = math.Log(2) / float64(fixedFactor)\n)\n\n// pow2Fixed returns the base 2 power of the fixed point value.\nfunc (f64 Fixed64) Pow2() float64 {\n\treturn math.Exp(float64(f64) * fixedToLogFactor)\n}\n"
  },
  {
    "path": "les/utils/expiredvalue_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage utils\n\nimport (\n\t\"testing\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n)\n\nfunc TestValueExpiration(t *testing.T) {\n\tvar cases = []struct {\n\t\tinput      ExpiredValue\n\t\ttimeOffset Fixed64\n\t\texpect     uint64\n\t}{\n\t\t{ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 128},\n\t\t{ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(1), 64},\n\t\t{ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(2), 32},\n\t\t{ExpiredValue{Base: 128, Exp: 2}, Uint64ToFixed64(2), 128},\n\t\t{ExpiredValue{Base: 128, Exp: 2}, Uint64ToFixed64(3), 64},\n\t}\n\tfor _, c := range cases {\n\t\tif got := c.input.Value(c.timeOffset); got != c.expect {\n\t\t\tt.Fatalf(\"Value mismatch, want=%d, got=%d\", c.expect, got)\n\t\t}\n\t}\n}\n\nfunc TestValueAddition(t *testing.T) {\n\tvar cases = []struct {\n\t\tinput      ExpiredValue\n\t\taddend     int64\n\t\ttimeOffset Fixed64\n\t\texpect     uint64\n\t\texpectNet  int64\n\t}{\n\t\t// Addition\n\t\t{ExpiredValue{Base: 128, Exp: 0}, 128, Uint64ToFixed64(0), 256, 128},\n\t\t{ExpiredValue{Base: 128, Exp: 2}, 128, Uint64ToFixed64(0), 640, 128},\n\n\t\t// Addition with offset\n\t\t{ExpiredValue{Base: 128, Exp: 0}, 128, Uint64ToFixed64(1), 192, 128},\n\t\t{ExpiredValue{Base: 128, Exp: 2}, 128, Uint64ToFixed64(1), 384, 128},\n\t\t{ExpiredValue{Base: 128, Exp: 2}, 128, Uint64ToFixed64(3), 192, 128},\n\n\t\t// Subtraction\n\t\t{ExpiredValue{Base: 128, Exp: 0}, -64, Uint64ToFixed64(0), 64, -64},\n\t\t{ExpiredValue{Base: 128, Exp: 0}, -128, Uint64ToFixed64(0), 0, -128},\n\t\t{ExpiredValue{Base: 128, Exp: 0}, -192, Uint64ToFixed64(0), 0, -128},\n\n\t\t// Subtraction with offset\n\t\t{ExpiredValue{Base: 128, Exp: 0}, -64, Uint64ToFixed64(1), 0, -64},\n\t\t{ExpiredValue{Base: 128, Exp: 0}, -128, Uint64ToFixed64(1), 0, -64},\n\t\t{ExpiredValue{Base: 128, Exp: 2}, -128, Uint64ToFixed64(1), 128, -128},\n\t\t{ExpiredValue{Base: 128, Exp: 2}, -128, Uint64ToFixed64(2), 0, -128},\n\t}\n\tfor _, c := range cases {\n\t\tif net := c.input.Add(c.addend, c.timeOffset); net != c.expectNet {\n\t\t\tt.Fatalf(\"Net amount mismatch, want=%d, got=%d\", c.expectNet, net)\n\t\t}\n\t\tif got := c.input.Value(c.timeOffset); got != c.expect {\n\t\t\tt.Fatalf(\"Value mismatch, want=%d, got=%d\", c.expect, got)\n\t\t}\n\t}\n}\n\nfunc TestExpiredValueAddition(t *testing.T) {\n\tvar cases = []struct {\n\t\tinput      ExpiredValue\n\t\tanother    ExpiredValue\n\t\ttimeOffset Fixed64\n\t\texpect     uint64\n\t}{\n\t\t{ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 256},\n\t\t{ExpiredValue{Base: 128, Exp: 1}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 384},\n\t\t{ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 1}, Uint64ToFixed64(0), 384},\n\t\t{ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(1), 128},\n\t}\n\tfor _, c := range cases {\n\t\tc.input.AddExp(c.another)\n\t\tif got := c.input.Value(c.timeOffset); got != c.expect {\n\t\t\tt.Fatalf(\"Value mismatch, want=%d, got=%d\", c.expect, got)\n\t\t}\n\t}\n}\n\nfunc TestExpiredValueSubtraction(t *testing.T) {\n\tvar cases = []struct {\n\t\tinput      ExpiredValue\n\t\tanother    ExpiredValue\n\t\ttimeOffset Fixed64\n\t\texpect     uint64\n\t}{\n\t\t{ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 0},\n\t\t{ExpiredValue{Base: 128, Exp: 0}, ExpiredValue{Base: 128, Exp: 1}, Uint64ToFixed64(0), 0},\n\t\t{ExpiredValue{Base: 128, Exp: 1}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(0), 128},\n\t\t{ExpiredValue{Base: 128, Exp: 1}, ExpiredValue{Base: 128, Exp: 0}, Uint64ToFixed64(1), 64},\n\t}\n\tfor _, c := range cases {\n\t\tc.input.SubExp(c.another)\n\t\tif got := c.input.Value(c.timeOffset); got != c.expect {\n\t\t\tt.Fatalf(\"Value mismatch, want=%d, got=%d\", c.expect, got)\n\t\t}\n\t}\n}\n\nfunc TestLinearExpiredValue(t *testing.T) {\n\tvar cases = []struct {\n\t\tvalue  LinearExpiredValue\n\t\tnow    mclock.AbsTime\n\t\texpect uint64\n\t}{\n\t\t{LinearExpiredValue{\n\t\t\tOffset: 0,\n\t\t\tVal:    0,\n\t\t\tRate:   mclock.AbsTime(1),\n\t\t}, 0, 0},\n\n\t\t{LinearExpiredValue{\n\t\t\tOffset: 1,\n\t\t\tVal:    1,\n\t\t\tRate:   mclock.AbsTime(1),\n\t\t}, 0, 1},\n\n\t\t{LinearExpiredValue{\n\t\t\tOffset: 1,\n\t\t\tVal:    1,\n\t\t\tRate:   mclock.AbsTime(1),\n\t\t}, mclock.AbsTime(2), 0},\n\n\t\t{LinearExpiredValue{\n\t\t\tOffset: 1,\n\t\t\tVal:    1,\n\t\t\tRate:   mclock.AbsTime(1),\n\t\t}, mclock.AbsTime(3), 0},\n\t}\n\tfor _, c := range cases {\n\t\tif value := c.value.Value(c.now); value != c.expect {\n\t\t\tt.Fatalf(\"Value mismatch, want=%d, got=%d\", c.expect, value)\n\t\t}\n\t}\n}\n\nfunc TestLinearExpiredAddition(t *testing.T) {\n\tvar cases = []struct {\n\t\tvalue  LinearExpiredValue\n\t\tamount int64\n\t\tnow    mclock.AbsTime\n\t\texpect uint64\n\t}{\n\t\t{LinearExpiredValue{\n\t\t\tOffset: 0,\n\t\t\tVal:    0,\n\t\t\tRate:   mclock.AbsTime(1),\n\t\t}, -1, 0, 0},\n\n\t\t{LinearExpiredValue{\n\t\t\tOffset: 1,\n\t\t\tVal:    1,\n\t\t\tRate:   mclock.AbsTime(1),\n\t\t}, -1, 0, 0},\n\n\t\t{LinearExpiredValue{\n\t\t\tOffset: 1,\n\t\t\tVal:    2,\n\t\t\tRate:   mclock.AbsTime(1),\n\t\t}, -1, mclock.AbsTime(2), 0},\n\n\t\t{LinearExpiredValue{\n\t\t\tOffset: 1,\n\t\t\tVal:    2,\n\t\t\tRate:   mclock.AbsTime(1),\n\t\t}, -2, mclock.AbsTime(2), 0},\n\t}\n\tfor _, c := range cases {\n\t\tif value := c.value.Add(c.amount, c.now); value != c.expect {\n\t\t\tt.Fatalf(\"Value mismatch, want=%d, got=%d\", c.expect, value)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "les/utils/limiter.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage utils\n\nimport (\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n)\n\nconst maxSelectionWeight = 1000000000 // maximum selection weight of each individual node/address group\n\n// Limiter protects a network request serving mechanism from denial-of-service attacks.\n// It limits the total amount of resources used for serving requests while ensuring that\n// the most valuable connections always have a reasonable chance of being served.\ntype Limiter struct {\n\tlock sync.Mutex\n\tcond *sync.Cond\n\tquit bool\n\n\tnodes                          map[enode.ID]*nodeQueue\n\taddresses                      map[string]*addressGroup\n\taddressSelect, valueSelect     *WeightedRandomSelect\n\tmaxValue                       float64\n\tmaxCost, sumCost, sumCostLimit uint\n\tselectAddressNext              bool\n}\n\n// nodeQueue represents queued requests coming from a single node ID\ntype nodeQueue struct {\n\tqueue                   []request // always nil if penaltyCost != 0\n\tid                      enode.ID\n\taddress                 string\n\tvalue                   float64\n\tflatWeight, valueWeight uint64 // current selection weights in the address/value selectors\n\tsumCost                 uint   // summed cost of requests queued by the node\n\tpenaltyCost             uint   // cumulative cost of dropped requests since last processed request\n\tgroupIndex              int\n}\n\n// addressGroup is a group of node IDs that have sent their last requests from the same\n// network address\ntype addressGroup struct {\n\tnodes                      []*nodeQueue\n\tnodeSelect                 *WeightedRandomSelect\n\tsumFlatWeight, groupWeight uint64\n}\n\n// request represents an incoming request scheduled for processing\ntype request struct {\n\tprocess chan chan struct{}\n\tcost    uint\n}\n\n// flatWeight distributes weights equally between each active network address\nfunc flatWeight(item interface{}) uint64 { return item.(*nodeQueue).flatWeight }\n\n// add adds the node queue to the address group. It is the caller's responsibility to\n// add the address group to the address map and the address selector if it wasn't\n// there before.\nfunc (ag *addressGroup) add(nq *nodeQueue) {\n\tif nq.groupIndex != -1 {\n\t\tpanic(\"added node queue is already in an address group\")\n\t}\n\tl := len(ag.nodes)\n\tnq.groupIndex = l\n\tag.nodes = append(ag.nodes, nq)\n\tag.sumFlatWeight += nq.flatWeight\n\tag.groupWeight = ag.sumFlatWeight / uint64(l+1)\n\tag.nodeSelect.Update(ag.nodes[l])\n}\n\n// update updates the selection weight of the node queue inside the address group.\n// It is the caller's responsibility to update the group's selection weight in the\n// address selector.\nfunc (ag *addressGroup) update(nq *nodeQueue, weight uint64) {\n\tif nq.groupIndex == -1 || nq.groupIndex >= len(ag.nodes) || ag.nodes[nq.groupIndex] != nq {\n\t\tpanic(\"updated node queue is not in this address group\")\n\t}\n\tag.sumFlatWeight += weight - nq.flatWeight\n\tnq.flatWeight = weight\n\tag.groupWeight = ag.sumFlatWeight / uint64(len(ag.nodes))\n\tag.nodeSelect.Update(nq)\n}\n\n// remove removes the node queue from the address group. It is the caller's responsibility\n// to remove the address group from the address map if it is empty.\nfunc (ag *addressGroup) remove(nq *nodeQueue) {\n\tif nq.groupIndex == -1 || nq.groupIndex >= len(ag.nodes) || ag.nodes[nq.groupIndex] != nq {\n\t\tpanic(\"removed node queue is not in this address group\")\n\t}\n\n\tl := len(ag.nodes) - 1\n\tif nq.groupIndex != l {\n\t\tag.nodes[nq.groupIndex] = ag.nodes[l]\n\t\tag.nodes[nq.groupIndex].groupIndex = nq.groupIndex\n\t}\n\tnq.groupIndex = -1\n\tag.nodes = ag.nodes[:l]\n\tag.sumFlatWeight -= nq.flatWeight\n\tif l >= 1 {\n\t\tag.groupWeight = ag.sumFlatWeight / uint64(l)\n\t} else {\n\t\tag.groupWeight = 0\n\t}\n\tag.nodeSelect.Remove(nq)\n}\n\n// choose selects one of the node queues belonging to the address group\nfunc (ag *addressGroup) choose() *nodeQueue {\n\treturn ag.nodeSelect.Choose().(*nodeQueue)\n}\n\n// NewLimiter creates a new Limiter\nfunc NewLimiter(sumCostLimit uint) *Limiter {\n\tl := &Limiter{\n\t\taddressSelect: NewWeightedRandomSelect(func(item interface{}) uint64 { return item.(*addressGroup).groupWeight }),\n\t\tvalueSelect:   NewWeightedRandomSelect(func(item interface{}) uint64 { return item.(*nodeQueue).valueWeight }),\n\t\tnodes:         make(map[enode.ID]*nodeQueue),\n\t\taddresses:     make(map[string]*addressGroup),\n\t\tsumCostLimit:  sumCostLimit,\n\t}\n\tl.cond = sync.NewCond(&l.lock)\n\tgo l.processLoop()\n\treturn l\n}\n\n// selectionWeights calculates the selection weights of a node for both the address and\n// the value selector. The selection weight depends on the next request cost or the\n// summed cost of recently dropped requests.\nfunc (l *Limiter) selectionWeights(reqCost uint, value float64) (flatWeight, valueWeight uint64) {\n\tif value > l.maxValue {\n\t\tl.maxValue = value\n\t}\n\tif value > 0 {\n\t\t// normalize value to <= 1\n\t\tvalue /= l.maxValue\n\t}\n\tif reqCost > l.maxCost {\n\t\tl.maxCost = reqCost\n\t}\n\trelCost := float64(reqCost) / float64(l.maxCost)\n\tvar f float64\n\tif relCost <= 0.001 {\n\t\tf = 1\n\t} else {\n\t\tf = 0.001 / relCost\n\t}\n\tf *= maxSelectionWeight\n\tflatWeight, valueWeight = uint64(f), uint64(f*value)\n\tif flatWeight == 0 {\n\t\tflatWeight = 1\n\t}\n\treturn\n}\n\n// Add adds a new request to the node queue belonging to the given id. Value belongs\n// to the requesting node. A higher value gives the request a higher chance of being\n// served quickly in case of heavy load or a DDoS attack. Cost is a rough estimate\n// of the serving cost of the request. A lower cost also gives the request a\n// better chance.\nfunc (l *Limiter) Add(id enode.ID, address string, value float64, reqCost uint) chan chan struct{} {\n\tl.lock.Lock()\n\tdefer l.lock.Unlock()\n\n\tprocess := make(chan chan struct{}, 1)\n\tif l.quit {\n\t\tclose(process)\n\t\treturn process\n\t}\n\tif reqCost == 0 {\n\t\treqCost = 1\n\t}\n\tif nq, ok := l.nodes[id]; ok {\n\t\tif nq.queue != nil {\n\t\t\tnq.queue = append(nq.queue, request{process, reqCost})\n\t\t\tnq.sumCost += reqCost\n\t\t\tnq.value = value\n\t\t\tif address != nq.address {\n\t\t\t\t// known id sending request from a new address, move to different address group\n\t\t\t\tl.removeFromGroup(nq)\n\t\t\t\tl.addToGroup(nq, address)\n\t\t\t}\n\t\t} else {\n\t\t\t// already waiting on a penalty, just add to the penalty cost and drop the request\n\t\t\tnq.penaltyCost += reqCost\n\t\t\tl.update(nq)\n\t\t\tclose(process)\n\t\t\treturn process\n\t\t}\n\t} else {\n\t\tnq := &nodeQueue{\n\t\t\tqueue:      []request{{process, reqCost}},\n\t\t\tid:         id,\n\t\t\tvalue:      value,\n\t\t\tsumCost:    reqCost,\n\t\t\tgroupIndex: -1,\n\t\t}\n\t\tnq.flatWeight, nq.valueWeight = l.selectionWeights(reqCost, value)\n\t\tif len(l.nodes) == 0 {\n\t\t\tl.cond.Signal()\n\t\t}\n\t\tl.nodes[id] = nq\n\t\tif nq.valueWeight != 0 {\n\t\t\tl.valueSelect.Update(nq)\n\t\t}\n\t\tl.addToGroup(nq, address)\n\t}\n\tl.sumCost += reqCost\n\tif l.sumCost > l.sumCostLimit {\n\t\tl.dropRequests()\n\t}\n\treturn process\n}\n\n// update updates the selection weights of the node queue\nfunc (l *Limiter) update(nq *nodeQueue) {\n\tvar cost uint\n\tif nq.queue != nil {\n\t\tcost = nq.queue[0].cost\n\t} else {\n\t\tcost = nq.penaltyCost\n\t}\n\tflatWeight, valueWeight := l.selectionWeights(cost, nq.value)\n\tag := l.addresses[nq.address]\n\tag.update(nq, flatWeight)\n\tl.addressSelect.Update(ag)\n\tnq.valueWeight = valueWeight\n\tl.valueSelect.Update(nq)\n}\n\n// addToGroup adds the node queue to the given address group. The group is created if\n// it does not exist yet.\nfunc (l *Limiter) addToGroup(nq *nodeQueue, address string) {\n\tnq.address = address\n\tag := l.addresses[address]\n\tif ag == nil {\n\t\tag = &addressGroup{nodeSelect: NewWeightedRandomSelect(flatWeight)}\n\t\tl.addresses[address] = ag\n\t}\n\tag.add(nq)\n\tl.addressSelect.Update(ag)\n}\n\n// removeFromGroup removes the node queue from its address group\nfunc (l *Limiter) removeFromGroup(nq *nodeQueue) {\n\tag := l.addresses[nq.address]\n\tag.remove(nq)\n\tif len(ag.nodes) == 0 {\n\t\tdelete(l.addresses, nq.address)\n\t}\n\tl.addressSelect.Update(ag)\n}\n\n// remove removes the node queue from its address group, the nodes map and the value\n// selector\nfunc (l *Limiter) remove(nq *nodeQueue) {\n\tl.removeFromGroup(nq)\n\tif nq.valueWeight != 0 {\n\t\tl.valueSelect.Remove(nq)\n\t}\n\tdelete(l.nodes, nq.id)\n}\n\n// choose selects the next node queue to process.\nfunc (l *Limiter) choose() *nodeQueue {\n\tif l.valueSelect.IsEmpty() || l.selectAddressNext {\n\t\tif ag, ok := l.addressSelect.Choose().(*addressGroup); ok {\n\t\t\tl.selectAddressNext = false\n\t\t\treturn ag.choose()\n\t\t}\n\t}\n\tnq, _ := l.valueSelect.Choose().(*nodeQueue)\n\tl.selectAddressNext = true\n\treturn nq\n}\n\n// processLoop processes requests sequentially\nfunc (l *Limiter) processLoop() {\n\tl.lock.Lock()\n\tdefer l.lock.Unlock()\n\n\tfor {\n\t\tif l.quit {\n\t\t\tfor _, nq := range l.nodes {\n\t\t\t\tfor _, request := range nq.queue {\n\t\t\t\t\tclose(request.process)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tnq := l.choose()\n\t\tif nq == nil {\n\t\t\tl.cond.Wait()\n\t\t\tcontinue\n\t\t}\n\t\tif nq.queue != nil {\n\t\t\trequest := nq.queue[0]\n\t\t\tnq.queue = nq.queue[1:]\n\t\t\tnq.sumCost -= request.cost\n\t\t\tl.sumCost -= request.cost\n\t\t\tl.lock.Unlock()\n\t\t\tch := make(chan struct{})\n\t\t\trequest.process <- ch\n\t\t\t<-ch\n\t\t\tl.lock.Lock()\n\t\t\tif len(nq.queue) > 0 {\n\t\t\t\tl.update(nq)\n\t\t\t} else {\n\t\t\t\tl.remove(nq)\n\t\t\t}\n\t\t} else {\n\t\t\t// penalized queue removed, next request will be added to a clean queue\n\t\t\tl.remove(nq)\n\t\t}\n\t}\n}\n\n// Stop stops the processing loop. All queued and future requests are rejected.\nfunc (l *Limiter) Stop() {\n\tl.lock.Lock()\n\tdefer l.lock.Unlock()\n\n\tl.quit = true\n\tl.cond.Signal()\n}\n\ntype (\n\tdropList     []dropListItem\n\tdropListItem struct {\n\t\tnq       *nodeQueue\n\t\tpriority float64\n\t}\n)\n\nfunc (l dropList) Len() int {\n\treturn len(l)\n}\n\nfunc (l dropList) Less(i, j int) bool {\n\treturn l[i].priority < l[j].priority\n}\n\nfunc (l dropList) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n\n// dropRequests selects the nodes with the highest queued request cost to selection\n// weight ratio and drops their queued request. The empty node queues stay in the\n// selectors with a low selection weight in order to penalize these nodes.\nfunc (l *Limiter) dropRequests() {\n\tvar (\n\t\tsumValue float64\n\t\tlist     dropList\n\t)\n\tfor _, nq := range l.nodes {\n\t\tsumValue += nq.value\n\t}\n\tfor _, nq := range l.nodes {\n\t\tif nq.sumCost == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tw := 1 / float64(len(l.addresses)*len(l.addresses[nq.address].nodes))\n\t\tif sumValue > 0 {\n\t\t\tw += nq.value / sumValue\n\t\t}\n\t\tlist = append(list, dropListItem{\n\t\t\tnq:       nq,\n\t\t\tpriority: w / float64(nq.sumCost),\n\t\t})\n\t}\n\tsort.Sort(list)\n\tfor _, item := range list {\n\t\tfor _, request := range item.nq.queue {\n\t\t\tclose(request.process)\n\t\t}\n\t\t// make the queue penalized; no more requests are accepted until the node is\n\t\t// selected based on the penalty cost which is the cumulative cost of all dropped\n\t\t// requests. This ensures that sending excess requests is always penalized\n\t\t// and incentivizes the sender to stop for a while if no replies are received.\n\t\titem.nq.queue = nil\n\t\titem.nq.penaltyCost = item.nq.sumCost\n\t\tl.sumCost -= item.nq.sumCost // penalty costs are not counted in sumCost\n\t\titem.nq.sumCost = 0\n\t\tl.update(item.nq)\n\t\tif l.sumCost <= l.sumCostLimit/2 {\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "les/utils/limiter_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage utils\n\nimport (\n\t\"math/rand\"\n\t\"testing\"\n\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n)\n\nconst (\n\tltTolerance = 0.03\n\tltRounds    = 7\n)\n\ntype (\n\tltNode struct {\n\t\taddr, id         int\n\t\tvalue, exp       float64\n\t\tcost             uint\n\t\treqRate          float64\n\t\treqMax, runCount int\n\t\tlastTotalCost    uint\n\n\t\tserved, dropped int\n\t}\n\n\tltResult struct {\n\t\tnode *ltNode\n\t\tch   chan struct{}\n\t}\n\n\tlimTest struct {\n\t\tlimiter            *Limiter\n\t\tresults            chan ltResult\n\t\trunCount           int\n\t\texpCost, totalCost uint\n\t}\n)\n\nfunc (lt *limTest) request(n *ltNode) {\n\tvar (\n\t\taddress string\n\t\tid      enode.ID\n\t)\n\tif n.addr >= 0 {\n\t\taddress = string([]byte{byte(n.addr)})\n\t} else {\n\t\tvar b [32]byte\n\t\trand.Read(b[:])\n\t\taddress = string(b[:])\n\t}\n\tif n.id >= 0 {\n\t\tid = enode.ID{byte(n.id)}\n\t} else {\n\t\trand.Read(id[:])\n\t}\n\tlt.runCount++\n\tn.runCount++\n\tcch := lt.limiter.Add(id, address, n.value, n.cost)\n\tgo func() {\n\t\tlt.results <- ltResult{n, <-cch}\n\t}()\n}\n\nfunc (lt *limTest) moreRequests(n *ltNode) {\n\tmaxStart := int(float64(lt.totalCost-n.lastTotalCost) * n.reqRate)\n\tif maxStart != 0 {\n\t\tn.lastTotalCost = lt.totalCost\n\t}\n\tfor n.reqMax > n.runCount && maxStart > 0 {\n\t\tlt.request(n)\n\t\tmaxStart--\n\t}\n}\n\nfunc (lt *limTest) process() {\n\tres := <-lt.results\n\tlt.runCount--\n\tres.node.runCount--\n\tif res.ch != nil {\n\t\tres.node.served++\n\t\tif res.node.exp != 0 {\n\t\t\tlt.expCost += res.node.cost\n\t\t}\n\t\tlt.totalCost += res.node.cost\n\t\tclose(res.ch)\n\t} else {\n\t\tres.node.dropped++\n\t}\n}\n\nfunc TestLimiter(t *testing.T) {\n\tlimTests := [][]*ltNode{\n\t\t{ // one id from an individual address and two ids from a shared address\n\t\t\t{addr: 0, id: 0, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.5},\n\t\t\t{addr: 1, id: 1, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25},\n\t\t\t{addr: 1, id: 2, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25},\n\t\t},\n\t\t{ // varying request costs\n\t\t\t{addr: 0, id: 0, value: 0, cost: 10, reqRate: 0.2, reqMax: 1, exp: 0.5},\n\t\t\t{addr: 1, id: 1, value: 0, cost: 3, reqRate: 0.5, reqMax: 1, exp: 0.25},\n\t\t\t{addr: 1, id: 2, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25},\n\t\t},\n\t\t{ // different request rate\n\t\t\t{addr: 0, id: 0, value: 0, cost: 1, reqRate: 2, reqMax: 2, exp: 0.5},\n\t\t\t{addr: 1, id: 1, value: 0, cost: 1, reqRate: 10, reqMax: 10, exp: 0.25},\n\t\t\t{addr: 1, id: 2, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25},\n\t\t},\n\t\t{ // adding value\n\t\t\t{addr: 0, id: 0, value: 3, cost: 1, reqRate: 1, reqMax: 1, exp: (0.5 + 0.3) / 2},\n\t\t\t{addr: 1, id: 1, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25 / 2},\n\t\t\t{addr: 1, id: 2, value: 7, cost: 1, reqRate: 1, reqMax: 1, exp: (0.25 + 0.7) / 2},\n\t\t},\n\t\t{ // DoS attack from a single address with a single id\n\t\t\t{addr: 0, id: 0, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},\n\t\t\t{addr: 1, id: 1, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},\n\t\t\t{addr: 2, id: 2, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},\n\t\t\t{addr: 3, id: 3, value: 0, cost: 1, reqRate: 10, reqMax: 1000000000, exp: 0},\n\t\t},\n\t\t{ // DoS attack from a single address with different ids\n\t\t\t{addr: 0, id: 0, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},\n\t\t\t{addr: 1, id: 1, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},\n\t\t\t{addr: 2, id: 2, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},\n\t\t\t{addr: 3, id: -1, value: 0, cost: 1, reqRate: 1, reqMax: 1000000000, exp: 0},\n\t\t},\n\t\t{ // DDoS attack from different addresses with a single id\n\t\t\t{addr: 0, id: 0, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},\n\t\t\t{addr: 1, id: 1, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},\n\t\t\t{addr: 2, id: 2, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},\n\t\t\t{addr: -1, id: 3, value: 0, cost: 1, reqRate: 1, reqMax: 1000000000, exp: 0},\n\t\t},\n\t\t{ // DDoS attack from different addresses with different ids\n\t\t\t{addr: 0, id: 0, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},\n\t\t\t{addr: 1, id: 1, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},\n\t\t\t{addr: 2, id: 2, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333},\n\t\t\t{addr: -1, id: -1, value: 0, cost: 1, reqRate: 1, reqMax: 1000000000, exp: 0},\n\t\t},\n\t}\n\n\tlt := &limTest{\n\t\tlimiter: NewLimiter(100),\n\t\tresults: make(chan ltResult),\n\t}\n\tfor _, test := range limTests {\n\t\tlt.expCost, lt.totalCost = 0, 0\n\t\titerCount := 10000\n\t\tfor j := 0; j < ltRounds; j++ {\n\t\t\t// try to reach expected target range in multiple rounds with increasing iteration counts\n\t\t\tlast := j == ltRounds-1\n\t\t\tfor _, n := range test {\n\t\t\t\tlt.request(n)\n\t\t\t}\n\t\t\tfor i := 0; i < iterCount; i++ {\n\t\t\t\tlt.process()\n\t\t\t\tfor _, n := range test {\n\t\t\t\t\tlt.moreRequests(n)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor lt.runCount > 0 {\n\t\t\t\tlt.process()\n\t\t\t}\n\t\t\tif spamRatio := 1 - float64(lt.expCost)/float64(lt.totalCost); spamRatio > 0.5*(1+ltTolerance) {\n\t\t\t\tt.Errorf(\"Spam ratio too high (%f)\", spamRatio)\n\t\t\t}\n\t\t\tfail, success := false, true\n\t\t\tfor _, n := range test {\n\t\t\t\tif n.exp != 0 {\n\t\t\t\t\tif n.dropped > 0 {\n\t\t\t\t\t\tt.Errorf(\"Dropped %d requests of non-spam node\", n.dropped)\n\t\t\t\t\t\tfail = true\n\t\t\t\t\t}\n\t\t\t\t\tr := float64(n.served) * float64(n.cost) / float64(lt.expCost)\n\t\t\t\t\tif r < n.exp*(1-ltTolerance) || r > n.exp*(1+ltTolerance) {\n\t\t\t\t\t\tif last {\n\t\t\t\t\t\t\t// print error only if the target is still not reached in the last round\n\t\t\t\t\t\t\tt.Errorf(\"Request ratio (%f) does not match expected value (%f)\", r, n.exp)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tsuccess = false\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif fail || success {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// neither failed nor succeeded; try more iterations to reach probability targets\n\t\t\titerCount *= 2\n\t\t}\n\t}\n\tlt.limiter.Stop()\n}\n"
  },
  {
    "path": "les/utils/timeutils.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage utils\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n)\n\ntype UpdateTimer struct {\n\tclock     mclock.Clock\n\tlock      sync.Mutex\n\tlast      mclock.AbsTime\n\tthreshold time.Duration\n}\n\nfunc NewUpdateTimer(clock mclock.Clock, threshold time.Duration) *UpdateTimer {\n\t// We don't accept the update threshold less than 0.\n\tif threshold < 0 {\n\t\treturn nil\n\t}\n\t// Don't panic for lazy users\n\tif clock == nil {\n\t\tclock = mclock.System{}\n\t}\n\treturn &UpdateTimer{\n\t\tclock:     clock,\n\t\tlast:      clock.Now(),\n\t\tthreshold: threshold,\n\t}\n}\n\nfunc (t *UpdateTimer) Update(callback func(diff time.Duration) bool) bool {\n\treturn t.UpdateAt(t.clock.Now(), callback)\n}\n\nfunc (t *UpdateTimer) UpdateAt(at mclock.AbsTime, callback func(diff time.Duration) bool) bool {\n\tt.lock.Lock()\n\tdefer t.lock.Unlock()\n\n\tdiff := time.Duration(at - t.last)\n\tif diff < 0 {\n\t\tdiff = 0\n\t}\n\tif diff < t.threshold {\n\t\treturn false\n\t}\n\tif callback(diff) {\n\t\tt.last = at\n\t\treturn true\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "les/utils/timeutils_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage utils\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n)\n\nfunc TestUpdateTimer(t *testing.T) {\n\ttimer := NewUpdateTimer(mclock.System{}, -1)\n\tif timer != nil {\n\t\tt.Fatalf(\"Create update timer with negative threshold\")\n\t}\n\tsim := &mclock.Simulated{}\n\ttimer = NewUpdateTimer(sim, time.Second)\n\tif updated := timer.Update(func(diff time.Duration) bool { return true }); updated {\n\t\tt.Fatalf(\"Update the clock without reaching the threshold\")\n\t}\n\tsim.Run(time.Second)\n\tif updated := timer.Update(func(diff time.Duration) bool { return true }); !updated {\n\t\tt.Fatalf(\"Doesn't update the clock when reaching the threshold\")\n\t}\n\tif updated := timer.UpdateAt(sim.Now()+mclock.AbsTime(time.Second), func(diff time.Duration) bool { return true }); !updated {\n\t\tt.Fatalf(\"Doesn't update the clock when reaching the threshold\")\n\t}\n\ttimer = NewUpdateTimer(sim, 0)\n\tif updated := timer.Update(func(diff time.Duration) bool { return true }); !updated {\n\t\tt.Fatalf(\"Doesn't update the clock without threshold limitaion\")\n\t}\n}\n"
  },
  {
    "path": "les/utils/weighted_select.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage utils\n\nimport (\n\t\"math\"\n\t\"math/rand\"\n\n\t\"github.com/ethereum/go-ethereum/log\"\n)\n\ntype (\n\t// WeightedRandomSelect is capable of weighted random selection from a set of items\n\tWeightedRandomSelect struct {\n\t\troot *wrsNode\n\t\tidx  map[WrsItem]int\n\t\twfn  WeightFn\n\t}\n\tWrsItem  interface{}\n\tWeightFn func(interface{}) uint64\n)\n\n// NewWeightedRandomSelect returns a new WeightedRandomSelect structure\nfunc NewWeightedRandomSelect(wfn WeightFn) *WeightedRandomSelect {\n\treturn &WeightedRandomSelect{root: &wrsNode{maxItems: wrsBranches}, idx: make(map[WrsItem]int), wfn: wfn}\n}\n\n// Update updates an item's weight, adds it if it was non-existent or removes it if\n// the new weight is zero. Note that explicitly updating decreasing weights is not necessary.\nfunc (w *WeightedRandomSelect) Update(item WrsItem) {\n\tw.setWeight(item, w.wfn(item))\n}\n\n// Remove removes an item from the set\nfunc (w *WeightedRandomSelect) Remove(item WrsItem) {\n\tw.setWeight(item, 0)\n}\n\n// IsEmpty returns true if the set is empty\nfunc (w *WeightedRandomSelect) IsEmpty() bool {\n\treturn w.root.sumCost == 0\n}\n\n// setWeight sets an item's weight to a specific value (removes it if zero)\nfunc (w *WeightedRandomSelect) setWeight(item WrsItem, weight uint64) {\n\tif weight > math.MaxInt64-w.root.sumCost {\n\t\t// old weight is still included in sumCost, remove and check again\n\t\tw.setWeight(item, 0)\n\t\tif weight > math.MaxInt64-w.root.sumCost {\n\t\t\tlog.Error(\"WeightedRandomSelect overflow\", \"sumCost\", w.root.sumCost, \"new weight\", weight)\n\t\t\tweight = math.MaxInt64 - w.root.sumCost\n\t\t}\n\t}\n\tidx, ok := w.idx[item]\n\tif ok {\n\t\tw.root.setWeight(idx, weight)\n\t\tif weight == 0 {\n\t\t\tdelete(w.idx, item)\n\t\t}\n\t} else {\n\t\tif weight != 0 {\n\t\t\tif w.root.itemCnt == w.root.maxItems {\n\t\t\t\t// add a new level\n\t\t\t\tnewRoot := &wrsNode{sumCost: w.root.sumCost, itemCnt: w.root.itemCnt, level: w.root.level + 1, maxItems: w.root.maxItems * wrsBranches}\n\t\t\t\tnewRoot.items[0] = w.root\n\t\t\t\tnewRoot.weights[0] = w.root.sumCost\n\t\t\t\tw.root = newRoot\n\t\t\t}\n\t\t\tw.idx[item] = w.root.insert(item, weight)\n\t\t}\n\t}\n}\n\n// Choose randomly selects an item from the set, with a chance proportional to its\n// current weight. If the weight of the chosen element has been decreased since the\n// last stored value, returns it with a newWeight/oldWeight chance, otherwise just\n// updates its weight and selects another one\nfunc (w *WeightedRandomSelect) Choose() WrsItem {\n\tfor {\n\t\tif w.root.sumCost == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tval := uint64(rand.Int63n(int64(w.root.sumCost)))\n\t\tchoice, lastWeight := w.root.choose(val)\n\t\tweight := w.wfn(choice)\n\t\tif weight != lastWeight {\n\t\t\tw.setWeight(choice, weight)\n\t\t}\n\t\tif weight >= lastWeight || uint64(rand.Int63n(int64(lastWeight))) < weight {\n\t\t\treturn choice\n\t\t}\n\t}\n}\n\nconst wrsBranches = 8 // max number of branches in the wrsNode tree\n\n// wrsNode is a node of a tree structure that can store WrsItems or further wrsNodes.\ntype wrsNode struct {\n\titems                    [wrsBranches]interface{}\n\tweights                  [wrsBranches]uint64\n\tsumCost                  uint64\n\tlevel, itemCnt, maxItems int\n}\n\n// insert recursively inserts a new item to the tree and returns the item index\nfunc (n *wrsNode) insert(item WrsItem, weight uint64) int {\n\tbranch := 0\n\tfor n.items[branch] != nil && (n.level == 0 || n.items[branch].(*wrsNode).itemCnt == n.items[branch].(*wrsNode).maxItems) {\n\t\tbranch++\n\t\tif branch == wrsBranches {\n\t\t\tpanic(nil)\n\t\t}\n\t}\n\tn.itemCnt++\n\tn.sumCost += weight\n\tn.weights[branch] += weight\n\tif n.level == 0 {\n\t\tn.items[branch] = item\n\t\treturn branch\n\t}\n\tvar subNode *wrsNode\n\tif n.items[branch] == nil {\n\t\tsubNode = &wrsNode{maxItems: n.maxItems / wrsBranches, level: n.level - 1}\n\t\tn.items[branch] = subNode\n\t} else {\n\t\tsubNode = n.items[branch].(*wrsNode)\n\t}\n\tsubIdx := subNode.insert(item, weight)\n\treturn subNode.maxItems*branch + subIdx\n}\n\n// setWeight updates the weight of a certain item (which should exist) and returns\n// the change of the last weight value stored in the tree\nfunc (n *wrsNode) setWeight(idx int, weight uint64) uint64 {\n\tif n.level == 0 {\n\t\toldWeight := n.weights[idx]\n\t\tn.weights[idx] = weight\n\t\tdiff := weight - oldWeight\n\t\tn.sumCost += diff\n\t\tif weight == 0 {\n\t\t\tn.items[idx] = nil\n\t\t\tn.itemCnt--\n\t\t}\n\t\treturn diff\n\t}\n\tbranchItems := n.maxItems / wrsBranches\n\tbranch := idx / branchItems\n\tdiff := n.items[branch].(*wrsNode).setWeight(idx-branch*branchItems, weight)\n\tn.weights[branch] += diff\n\tn.sumCost += diff\n\tif weight == 0 {\n\t\tn.itemCnt--\n\t}\n\treturn diff\n}\n\n// choose recursively selects an item from the tree and returns it along with its weight\nfunc (n *wrsNode) choose(val uint64) (WrsItem, uint64) {\n\tfor i, w := range n.weights {\n\t\tif val < w {\n\t\t\tif n.level == 0 {\n\t\t\t\treturn n.items[i].(WrsItem), n.weights[i]\n\t\t\t}\n\t\t\treturn n.items[i].(*wrsNode).choose(val)\n\t\t}\n\t\tval -= w\n\t}\n\tpanic(nil)\n}\n"
  },
  {
    "path": "les/utils/weighted_select_test.go",
    "content": "// Copyright 2016 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage utils\n\nimport (\n\t\"math/rand\"\n\t\"testing\"\n)\n\ntype testWrsItem struct {\n\tidx  int\n\twidx *int\n}\n\nfunc testWeight(i interface{}) uint64 {\n\tt := i.(*testWrsItem)\n\tw := *t.widx\n\tif w == -1 || w == t.idx {\n\t\treturn uint64(t.idx + 1)\n\t}\n\treturn 0\n}\n\nfunc TestWeightedRandomSelect(t *testing.T) {\n\ttestFn := func(cnt int) {\n\t\ts := NewWeightedRandomSelect(testWeight)\n\t\tw := -1\n\t\tlist := make([]testWrsItem, cnt)\n\t\tfor i := range list {\n\t\t\tlist[i] = testWrsItem{idx: i, widx: &w}\n\t\t\ts.Update(&list[i])\n\t\t}\n\t\tw = rand.Intn(cnt)\n\t\tc := s.Choose()\n\t\tif c == nil {\n\t\t\tt.Errorf(\"expected item, got nil\")\n\t\t} else {\n\t\t\tif c.(*testWrsItem).idx != w {\n\t\t\t\tt.Errorf(\"expected another item\")\n\t\t\t}\n\t\t}\n\t\tw = -2\n\t\tif s.Choose() != nil {\n\t\t\tt.Errorf(\"expected nil, got item\")\n\t\t}\n\t}\n\ttestFn(1)\n\ttestFn(10)\n\ttestFn(100)\n\ttestFn(1000)\n\ttestFn(10000)\n\ttestFn(100000)\n\ttestFn(1000000)\n}\n"
  },
  {
    "path": "les/vflux/client/api.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage client\n\nimport (\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n)\n\n// PrivateClientAPI implements the vflux client side API\ntype PrivateClientAPI struct {\n\tvt *ValueTracker\n}\n\n// NewPrivateClientAPI creates a PrivateClientAPI\nfunc NewPrivateClientAPI(vt *ValueTracker) *PrivateClientAPI {\n\treturn &PrivateClientAPI{vt}\n}\n\n// parseNodeStr converts either an enode address or a plain hex node id to enode.ID\nfunc parseNodeStr(nodeStr string) (enode.ID, error) {\n\tif id, err := enode.ParseID(nodeStr); err == nil {\n\t\treturn id, nil\n\t}\n\tif node, err := enode.Parse(enode.ValidSchemes, nodeStr); err == nil {\n\t\treturn node.ID(), nil\n\t} else {\n\t\treturn enode.ID{}, err\n\t}\n}\n\n// RequestStats returns the current contents of the reference request basket, with\n// request values meaning average per request rather than total.\nfunc (api *PrivateClientAPI) RequestStats() []RequestStatsItem {\n\treturn api.vt.RequestStats()\n}\n\n// Distribution returns a distribution as a series of (X, Y) chart coordinates,\n// where the X axis is the response time in seconds while the Y axis is the amount of\n// service value received with a response time close to the X coordinate.\n// The distribution is optionally normalized to a sum of 1.\n// If nodeStr == \"\" then the global distribution is returned, otherwise the individual\n// distribution of the specified server node.\nfunc (api *PrivateClientAPI) Distribution(nodeStr string, normalized bool) (RtDistribution, error) {\n\tvar expFactor utils.ExpirationFactor\n\tif !normalized {\n\t\texpFactor = utils.ExpFactor(api.vt.StatsExpirer().LogOffset(mclock.Now()))\n\t}\n\tif nodeStr == \"\" {\n\t\treturn api.vt.RtStats().Distribution(normalized, expFactor), nil\n\t}\n\tif id, err := parseNodeStr(nodeStr); err == nil {\n\t\treturn api.vt.GetNode(id).RtStats().Distribution(normalized, expFactor), nil\n\t} else {\n\t\treturn RtDistribution{}, err\n\t}\n}\n\n// Timeout suggests a timeout value based on either the global distribution or the\n// distribution of the specified node. The parameter is the desired rate of timeouts\n// assuming a similar distribution in the future.\n// Note that the actual timeout should have a sensible minimum bound so that operating\n// under ideal working conditions for a long time (for example, using a local server\n// with very low response times) will not make it very hard for the system to accommodate\n// longer response times in the future.\nfunc (api *PrivateClientAPI) Timeout(nodeStr string, failRate float64) (float64, error) {\n\tif nodeStr == \"\" {\n\t\treturn float64(api.vt.RtStats().Timeout(failRate)) / float64(time.Second), nil\n\t}\n\tif id, err := parseNodeStr(nodeStr); err == nil {\n\t\treturn float64(api.vt.GetNode(id).RtStats().Timeout(failRate)) / float64(time.Second), nil\n\t} else {\n\t\treturn 0, err\n\t}\n}\n\n// Value calculates the total service value provided either globally or by the specified\n// server node, using a weight function based on the given timeout.\nfunc (api *PrivateClientAPI) Value(nodeStr string, timeout float64) (float64, error) {\n\twt := TimeoutWeights(time.Duration(timeout * float64(time.Second)))\n\texpFactor := utils.ExpFactor(api.vt.StatsExpirer().LogOffset(mclock.Now()))\n\tif nodeStr == \"\" {\n\t\treturn api.vt.RtStats().Value(wt, expFactor), nil\n\t}\n\tif id, err := parseNodeStr(nodeStr); err == nil {\n\t\treturn api.vt.GetNode(id).RtStats().Value(wt, expFactor), nil\n\t} else {\n\t\treturn 0, err\n\t}\n}\n"
  },
  {
    "path": "les/vflux/client/fillset.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage client\n\nimport (\n\t\"sync\"\n\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n)\n\n// FillSet tries to read nodes from an input iterator and add them to a node set by\n// setting the specified node state flag(s) until the size of the set reaches the target.\n// Note that other mechanisms (like other FillSet instances reading from different inputs)\n// can also set the same flag(s) and FillSet will always care about the total number of\n// nodes having those flags.\ntype FillSet struct {\n\tlock          sync.Mutex\n\tcond          *sync.Cond\n\tns            *nodestate.NodeStateMachine\n\tinput         enode.Iterator\n\tclosed        bool\n\tflags         nodestate.Flags\n\tcount, target int\n}\n\n// NewFillSet creates a new FillSet\nfunc NewFillSet(ns *nodestate.NodeStateMachine, input enode.Iterator, flags nodestate.Flags) *FillSet {\n\tfs := &FillSet{\n\t\tns:    ns,\n\t\tinput: input,\n\t\tflags: flags,\n\t}\n\tfs.cond = sync.NewCond(&fs.lock)\n\n\tns.SubscribeState(flags, func(n *enode.Node, oldState, newState nodestate.Flags) {\n\t\tfs.lock.Lock()\n\t\tif oldState.Equals(flags) {\n\t\t\tfs.count--\n\t\t}\n\t\tif newState.Equals(flags) {\n\t\t\tfs.count++\n\t\t}\n\t\tif fs.target > fs.count {\n\t\t\tfs.cond.Signal()\n\t\t}\n\t\tfs.lock.Unlock()\n\t})\n\n\tgo fs.readLoop()\n\treturn fs\n}\n\n// readLoop keeps reading nodes from the input and setting the specified flags for them\n// whenever the node set size is under the current target\nfunc (fs *FillSet) readLoop() {\n\tfor {\n\t\tfs.lock.Lock()\n\t\tfor fs.target <= fs.count && !fs.closed {\n\t\t\tfs.cond.Wait()\n\t\t}\n\n\t\tfs.lock.Unlock()\n\t\tif !fs.input.Next() {\n\t\t\treturn\n\t\t}\n\t\tfs.ns.SetState(fs.input.Node(), fs.flags, nodestate.Flags{}, 0)\n\t}\n}\n\n// SetTarget sets the current target for node set size. If the previous target was not\n// reached and FillSet was still waiting for the next node from the input then the next\n// incoming node will be added to the set regardless of the target. This ensures that\n// all nodes coming from the input are eventually added to the set.\nfunc (fs *FillSet) SetTarget(target int) {\n\tfs.lock.Lock()\n\tdefer fs.lock.Unlock()\n\n\tfs.target = target\n\tif fs.target > fs.count {\n\t\tfs.cond.Signal()\n\t}\n}\n\n// Close shuts FillSet down and closes the input iterator\nfunc (fs *FillSet) Close() {\n\tfs.lock.Lock()\n\tdefer fs.lock.Unlock()\n\n\tfs.closed = true\n\tfs.input.Close()\n\tfs.cond.Signal()\n}\n"
  },
  {
    "path": "les/vflux/client/queueiterator.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage client\n\nimport (\n\t\"sync\"\n\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n)\n\n// QueueIterator returns nodes from the specified selectable set in the same order as\n// they entered the set.\ntype QueueIterator struct {\n\tlock sync.Mutex\n\tcond *sync.Cond\n\n\tns           *nodestate.NodeStateMachine\n\tqueue        []*enode.Node\n\tnextNode     *enode.Node\n\twaitCallback func(bool)\n\tfifo, closed bool\n}\n\n// NewQueueIterator creates a new QueueIterator. Nodes are selectable if they have all the required\n// and none of the disabled flags set. When a node is selected the selectedFlag is set which also\n// disables further selectability until it is removed or times out.\nfunc NewQueueIterator(ns *nodestate.NodeStateMachine, requireFlags, disableFlags nodestate.Flags, fifo bool, waitCallback func(bool)) *QueueIterator {\n\tqi := &QueueIterator{\n\t\tns:           ns,\n\t\tfifo:         fifo,\n\t\twaitCallback: waitCallback,\n\t}\n\tqi.cond = sync.NewCond(&qi.lock)\n\n\tns.SubscribeState(requireFlags.Or(disableFlags), func(n *enode.Node, oldState, newState nodestate.Flags) {\n\t\toldMatch := oldState.HasAll(requireFlags) && oldState.HasNone(disableFlags)\n\t\tnewMatch := newState.HasAll(requireFlags) && newState.HasNone(disableFlags)\n\t\tif newMatch == oldMatch {\n\t\t\treturn\n\t\t}\n\n\t\tqi.lock.Lock()\n\t\tdefer qi.lock.Unlock()\n\n\t\tif newMatch {\n\t\t\tqi.queue = append(qi.queue, n)\n\t\t} else {\n\t\t\tid := n.ID()\n\t\t\tfor i, qn := range qi.queue {\n\t\t\t\tif qn.ID() == id {\n\t\t\t\t\tcopy(qi.queue[i:len(qi.queue)-1], qi.queue[i+1:])\n\t\t\t\t\tqi.queue = qi.queue[:len(qi.queue)-1]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tqi.cond.Signal()\n\t})\n\treturn qi\n}\n\n// Next moves to the next selectable node.\nfunc (qi *QueueIterator) Next() bool {\n\tqi.lock.Lock()\n\tif !qi.closed && len(qi.queue) == 0 {\n\t\tif qi.waitCallback != nil {\n\t\t\tqi.waitCallback(true)\n\t\t}\n\t\tfor !qi.closed && len(qi.queue) == 0 {\n\t\t\tqi.cond.Wait()\n\t\t}\n\t\tif qi.waitCallback != nil {\n\t\t\tqi.waitCallback(false)\n\t\t}\n\t}\n\tif qi.closed {\n\t\tqi.nextNode = nil\n\t\tqi.lock.Unlock()\n\t\treturn false\n\t}\n\t// Move to the next node in queue.\n\tif qi.fifo {\n\t\tqi.nextNode = qi.queue[0]\n\t\tcopy(qi.queue[:len(qi.queue)-1], qi.queue[1:])\n\t\tqi.queue = qi.queue[:len(qi.queue)-1]\n\t} else {\n\t\tqi.nextNode = qi.queue[len(qi.queue)-1]\n\t\tqi.queue = qi.queue[:len(qi.queue)-1]\n\t}\n\tqi.lock.Unlock()\n\treturn true\n}\n\n// Close ends the iterator.\nfunc (qi *QueueIterator) Close() {\n\tqi.lock.Lock()\n\tqi.closed = true\n\tqi.lock.Unlock()\n\tqi.cond.Signal()\n}\n\n// Node returns the current node.\nfunc (qi *QueueIterator) Node() *enode.Node {\n\tqi.lock.Lock()\n\tdefer qi.lock.Unlock()\n\n\treturn qi.nextNode\n}\n"
  },
  {
    "path": "les/vflux/client/queueiterator_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage client\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/enr\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n)\n\nfunc testNode(i int) *enode.Node {\n\treturn enode.SignNull(new(enr.Record), testNodeID(i))\n}\n\nfunc TestQueueIteratorFIFO(t *testing.T) {\n\ttestQueueIterator(t, true)\n}\n\nfunc TestQueueIteratorLIFO(t *testing.T) {\n\ttestQueueIterator(t, false)\n}\n\nfunc testQueueIterator(t *testing.T, fifo bool) {\n\tns := nodestate.NewNodeStateMachine(nil, nil, &mclock.Simulated{}, testSetup)\n\tqi := NewQueueIterator(ns, sfTest2, sfTest3.Or(sfTest4), fifo, nil)\n\tns.Start()\n\tfor i := 1; i <= iterTestNodeCount; i++ {\n\t\tns.SetState(testNode(i), sfTest1, nodestate.Flags{}, 0)\n\t}\n\tnext := func() int {\n\t\tch := make(chan struct{})\n\t\tgo func() {\n\t\t\tqi.Next()\n\t\t\tclose(ch)\n\t\t}()\n\t\tselect {\n\t\tcase <-ch:\n\t\tcase <-time.After(time.Second * 5):\n\t\t\tt.Fatalf(\"Iterator.Next() timeout\")\n\t\t}\n\t\tnode := qi.Node()\n\t\tns.SetState(node, sfTest4, nodestate.Flags{}, 0)\n\t\treturn testNodeIndex(node.ID())\n\t}\n\texp := func(i int) {\n\t\tn := next()\n\t\tif n != i {\n\t\t\tt.Errorf(\"Wrong item returned by iterator (expected %d, got %d)\", i, n)\n\t\t}\n\t}\n\texplist := func(list []int) {\n\t\tfor i := range list {\n\t\t\tif fifo {\n\t\t\t\texp(list[i])\n\t\t\t} else {\n\t\t\t\texp(list[len(list)-1-i])\n\t\t\t}\n\t\t}\n\t}\n\n\tns.SetState(testNode(1), sfTest2, nodestate.Flags{}, 0)\n\tns.SetState(testNode(2), sfTest2, nodestate.Flags{}, 0)\n\tns.SetState(testNode(3), sfTest2, nodestate.Flags{}, 0)\n\texplist([]int{1, 2, 3})\n\tns.SetState(testNode(4), sfTest2, nodestate.Flags{}, 0)\n\tns.SetState(testNode(5), sfTest2, nodestate.Flags{}, 0)\n\tns.SetState(testNode(6), sfTest2, nodestate.Flags{}, 0)\n\tns.SetState(testNode(5), sfTest3, nodestate.Flags{}, 0)\n\texplist([]int{4, 6})\n\tns.SetState(testNode(1), nodestate.Flags{}, sfTest4, 0)\n\tns.SetState(testNode(2), nodestate.Flags{}, sfTest4, 0)\n\tns.SetState(testNode(3), nodestate.Flags{}, sfTest4, 0)\n\tns.SetState(testNode(2), sfTest3, nodestate.Flags{}, 0)\n\tns.SetState(testNode(2), nodestate.Flags{}, sfTest3, 0)\n\texplist([]int{1, 3, 2})\n\tns.Stop()\n}\n"
  },
  {
    "path": "les/vflux/client/requestbasket.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage client\n\nimport (\n\t\"io\"\n\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\nconst basketFactor = 1000000 // reference basket amount and value scale factor\n\n// referenceBasket keeps track of global request usage statistics and the usual prices\n// of each used request type relative to each other. The amounts in the basket are scaled\n// up by basketFactor because of the exponential expiration of long-term statistical data.\n// Values are scaled so that the sum of all amounts and the sum of all values are equal.\n//\n// reqValues represent the internal relative value estimates for each request type and are\n// calculated as value / amount. The average reqValue of all used requests is 1.\n// In other words: SUM(refBasket[type].amount * reqValue[type]) = SUM(refBasket[type].amount)\ntype referenceBasket struct {\n\tbasket    requestBasket\n\treqValues []float64 // contents are read only, new slice is created for each update\n}\n\n// serverBasket collects served request amount and value statistics for a single server.\n//\n// Values are gradually transferred to the global reference basket with a long time\n// constant so that each server basket represents long term usage and price statistics.\n// When the transferred part is added to the reference basket the values are scaled so\n// that their sum equals the total value calculated according to the previous reqValues.\n// The ratio of request values coming from the server basket represent the pricing of\n// the specific server and modify the global estimates with a weight proportional to\n// the amount of service provided by the server.\ntype serverBasket struct {\n\tbasket   requestBasket\n\trvFactor float64\n}\n\ntype (\n\t// requestBasket holds amounts and values for each request type.\n\t// These values are exponentially expired (see utils.ExpiredValue). The power of 2\n\t// exponent is applicable to all values within.\n\trequestBasket struct {\n\t\titems []basketItem\n\t\texp   uint64\n\t}\n\t// basketItem holds amount and value for a single request type. Value is the total\n\t// relative request value accumulated for served requests while amount is the counter\n\t// for each request type.\n\t// Note that these values are both scaled up by basketFactor because of the exponential\n\t// expiration.\n\tbasketItem struct {\n\t\tamount, value uint64\n\t}\n)\n\n// setExp sets the power of 2 exponent of the structure, scaling base values (the amounts\n// and request values) up or down if necessary.\nfunc (b *requestBasket) setExp(exp uint64) {\n\tif exp > b.exp {\n\t\tshift := exp - b.exp\n\t\tfor i, item := range b.items {\n\t\t\titem.amount >>= shift\n\t\t\titem.value >>= shift\n\t\t\tb.items[i] = item\n\t\t}\n\t\tb.exp = exp\n\t}\n\tif exp < b.exp {\n\t\tshift := b.exp - exp\n\t\tfor i, item := range b.items {\n\t\t\titem.amount <<= shift\n\t\t\titem.value <<= shift\n\t\t\tb.items[i] = item\n\t\t}\n\t\tb.exp = exp\n\t}\n}\n\n// init initializes a new server basket with the given service vector size (number of\n// different request types)\nfunc (s *serverBasket) init(size int) {\n\tif s.basket.items == nil {\n\t\ts.basket.items = make([]basketItem, size)\n\t}\n}\n\n// add adds the give type and amount of requests to the basket. Cost is calculated\n// according to the server's own cost table.\nfunc (s *serverBasket) add(reqType, reqAmount uint32, reqCost uint64, expFactor utils.ExpirationFactor) {\n\ts.basket.setExp(expFactor.Exp)\n\ti := &s.basket.items[reqType]\n\ti.amount += uint64(float64(uint64(reqAmount)*basketFactor) * expFactor.Factor)\n\ti.value += uint64(float64(reqCost) * s.rvFactor * expFactor.Factor)\n}\n\n// updateRvFactor updates the request value factor that scales server costs into the\n// local value dimensions.\nfunc (s *serverBasket) updateRvFactor(rvFactor float64) {\n\ts.rvFactor = rvFactor\n}\n\n// transfer decreases amounts and values in the basket with the given ratio and\n// moves the removed amounts into a new basket which is returned and can be added\n// to the global reference basket.\nfunc (s *serverBasket) transfer(ratio float64) requestBasket {\n\tres := requestBasket{\n\t\titems: make([]basketItem, len(s.basket.items)),\n\t\texp:   s.basket.exp,\n\t}\n\tfor i, v := range s.basket.items {\n\t\tta := uint64(float64(v.amount) * ratio)\n\t\ttv := uint64(float64(v.value) * ratio)\n\t\tif ta > v.amount {\n\t\t\tta = v.amount\n\t\t}\n\t\tif tv > v.value {\n\t\t\ttv = v.value\n\t\t}\n\t\ts.basket.items[i] = basketItem{v.amount - ta, v.value - tv}\n\t\tres.items[i] = basketItem{ta, tv}\n\t}\n\treturn res\n}\n\n// init initializes the reference basket with the given service vector size (number of\n// different request types)\nfunc (r *referenceBasket) init(size int) {\n\tr.reqValues = make([]float64, size)\n\tr.normalize()\n\tr.updateReqValues()\n}\n\n// add adds the transferred part of a server basket to the reference basket while scaling\n// value amounts so that their sum equals the total value calculated according to the\n// previous reqValues.\nfunc (r *referenceBasket) add(newBasket requestBasket) {\n\tr.basket.setExp(newBasket.exp)\n\t// scale newBasket to match service unit value\n\tvar (\n\t\ttotalCost  uint64\n\t\ttotalValue float64\n\t)\n\tfor i, v := range newBasket.items {\n\t\ttotalCost += v.value\n\t\ttotalValue += float64(v.amount) * r.reqValues[i]\n\t}\n\tif totalCost > 0 {\n\t\t// add to reference with scaled values\n\t\tscaleValues := totalValue / float64(totalCost)\n\t\tfor i, v := range newBasket.items {\n\t\t\tr.basket.items[i].amount += v.amount\n\t\t\tr.basket.items[i].value += uint64(float64(v.value) * scaleValues)\n\t\t}\n\t}\n\tr.updateReqValues()\n}\n\n// updateReqValues recalculates reqValues after adding transferred baskets. Note that\n// values should be normalized first.\nfunc (r *referenceBasket) updateReqValues() {\n\tr.reqValues = make([]float64, len(r.reqValues))\n\tfor i, b := range r.basket.items {\n\t\tif b.amount > 0 {\n\t\t\tr.reqValues[i] = float64(b.value) / float64(b.amount)\n\t\t} else {\n\t\t\tr.reqValues[i] = 0\n\t\t}\n\t}\n}\n\n// normalize ensures that the sum of values equal the sum of amounts in the basket.\nfunc (r *referenceBasket) normalize() {\n\tvar sumAmount, sumValue uint64\n\tfor _, b := range r.basket.items {\n\t\tsumAmount += b.amount\n\t\tsumValue += b.value\n\t}\n\tadd := float64(int64(sumAmount-sumValue)) / float64(sumValue)\n\tfor i, b := range r.basket.items {\n\t\tb.value += uint64(int64(float64(b.value) * add))\n\t\tr.basket.items[i] = b\n\t}\n}\n\n// reqValueFactor calculates the request value factor applicable to the server with\n// the given announced request cost list\nfunc (r *referenceBasket) reqValueFactor(costList []uint64) float64 {\n\tvar (\n\t\ttotalCost  float64\n\t\ttotalValue uint64\n\t)\n\tfor i, b := range r.basket.items {\n\t\ttotalCost += float64(costList[i]) * float64(b.amount) // use floats to avoid overflow\n\t\ttotalValue += b.value\n\t}\n\tif totalCost < 1 {\n\t\treturn 0\n\t}\n\treturn float64(totalValue) * basketFactor / totalCost\n}\n\n// EncodeRLP implements rlp.Encoder\nfunc (b *basketItem) EncodeRLP(w io.Writer) error {\n\treturn rlp.Encode(w, []interface{}{b.amount, b.value})\n}\n\n// DecodeRLP implements rlp.Decoder\nfunc (b *basketItem) DecodeRLP(s *rlp.Stream) error {\n\tvar item struct {\n\t\tAmount, Value uint64\n\t}\n\tif err := s.Decode(&item); err != nil {\n\t\treturn err\n\t}\n\tb.amount, b.value = item.Amount, item.Value\n\treturn nil\n}\n\n// EncodeRLP implements rlp.Encoder\nfunc (r *requestBasket) EncodeRLP(w io.Writer) error {\n\treturn rlp.Encode(w, []interface{}{r.items, r.exp})\n}\n\n// DecodeRLP implements rlp.Decoder\nfunc (r *requestBasket) DecodeRLP(s *rlp.Stream) error {\n\tvar enc struct {\n\t\tItems []basketItem\n\t\tExp   uint64\n\t}\n\tif err := s.Decode(&enc); err != nil {\n\t\treturn err\n\t}\n\tr.items, r.exp = enc.Items, enc.Exp\n\treturn nil\n}\n\n// convertMapping converts a basket loaded from the database into the current format.\n// If the available request types and their mapping into the service vector differ from\n// the one used when saving the basket then this function reorders old fields and fills\n// in previously unknown fields by scaling up amounts and values taken from the\n// initialization basket.\nfunc (r requestBasket) convertMapping(oldMapping, newMapping []string, initBasket requestBasket) requestBasket {\n\tnameMap := make(map[string]int)\n\tfor i, name := range oldMapping {\n\t\tnameMap[name] = i\n\t}\n\trc := requestBasket{items: make([]basketItem, len(newMapping))}\n\tvar scale, oldScale, newScale float64\n\tfor i, name := range newMapping {\n\t\tif ii, ok := nameMap[name]; ok {\n\t\t\trc.items[i] = r.items[ii]\n\t\t\toldScale += float64(initBasket.items[i].amount) * float64(initBasket.items[i].amount)\n\t\t\tnewScale += float64(rc.items[i].amount) * float64(initBasket.items[i].amount)\n\t\t}\n\t}\n\tif oldScale > 1e-10 {\n\t\tscale = newScale / oldScale\n\t} else {\n\t\tscale = 1\n\t}\n\tfor i, name := range newMapping {\n\t\tif _, ok := nameMap[name]; !ok {\n\t\t\trc.items[i].amount = uint64(float64(initBasket.items[i].amount) * scale)\n\t\t\trc.items[i].value = uint64(float64(initBasket.items[i].value) * scale)\n\t\t}\n\t}\n\treturn rc\n}\n"
  },
  {
    "path": "les/vflux/client/requestbasket_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage client\n\nimport (\n\t\"math/rand\"\n\t\"testing\"\n\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n)\n\nfunc checkU64(t *testing.T, name string, value, exp uint64) {\n\tif value != exp {\n\t\tt.Errorf(\"Incorrect value for %s: got %d, expected %d\", name, value, exp)\n\t}\n}\n\nfunc checkF64(t *testing.T, name string, value, exp, tol float64) {\n\tif value < exp-tol || value > exp+tol {\n\t\tt.Errorf(\"Incorrect value for %s: got %f, expected %f\", name, value, exp)\n\t}\n}\n\nfunc TestServerBasket(t *testing.T) {\n\tvar s serverBasket\n\ts.init(2)\n\t// add some requests with different request value factors\n\ts.updateRvFactor(1)\n\tnoexp := utils.ExpirationFactor{Factor: 1}\n\ts.add(0, 1000, 10000, noexp)\n\ts.add(1, 3000, 60000, noexp)\n\ts.updateRvFactor(10)\n\ts.add(0, 4000, 4000, noexp)\n\ts.add(1, 2000, 4000, noexp)\n\ts.updateRvFactor(10)\n\t// check basket contents directly\n\tcheckU64(t, \"s.basket[0].amount\", s.basket.items[0].amount, 5000*basketFactor)\n\tcheckU64(t, \"s.basket[0].value\", s.basket.items[0].value, 50000)\n\tcheckU64(t, \"s.basket[1].amount\", s.basket.items[1].amount, 5000*basketFactor)\n\tcheckU64(t, \"s.basket[1].value\", s.basket.items[1].value, 100000)\n\t// transfer 50% of the contents of the basket\n\ttransfer1 := s.transfer(0.5)\n\tcheckU64(t, \"transfer1[0].amount\", transfer1.items[0].amount, 2500*basketFactor)\n\tcheckU64(t, \"transfer1[0].value\", transfer1.items[0].value, 25000)\n\tcheckU64(t, \"transfer1[1].amount\", transfer1.items[1].amount, 2500*basketFactor)\n\tcheckU64(t, \"transfer1[1].value\", transfer1.items[1].value, 50000)\n\t// add more requests\n\ts.updateRvFactor(100)\n\ts.add(0, 1000, 100, noexp)\n\t// transfer 25% of the contents of the basket\n\ttransfer2 := s.transfer(0.25)\n\tcheckU64(t, \"transfer2[0].amount\", transfer2.items[0].amount, (2500+1000)/4*basketFactor)\n\tcheckU64(t, \"transfer2[0].value\", transfer2.items[0].value, (25000+10000)/4)\n\tcheckU64(t, \"transfer2[1].amount\", transfer2.items[1].amount, 2500/4*basketFactor)\n\tcheckU64(t, \"transfer2[1].value\", transfer2.items[1].value, 50000/4)\n}\n\nfunc TestConvertMapping(t *testing.T) {\n\tb := requestBasket{items: []basketItem{{3, 3}, {1, 1}, {2, 2}}}\n\toldMap := []string{\"req3\", \"req1\", \"req2\"}\n\tnewMap := []string{\"req1\", \"req2\", \"req3\", \"req4\"}\n\tinit := requestBasket{items: []basketItem{{2, 2}, {4, 4}, {6, 6}, {8, 8}}}\n\tbc := b.convertMapping(oldMap, newMap, init)\n\tcheckU64(t, \"bc[0].amount\", bc.items[0].amount, 1)\n\tcheckU64(t, \"bc[1].amount\", bc.items[1].amount, 2)\n\tcheckU64(t, \"bc[2].amount\", bc.items[2].amount, 3)\n\tcheckU64(t, \"bc[3].amount\", bc.items[3].amount, 4) // 8 should be scaled down to 4\n}\n\nfunc TestReqValueFactor(t *testing.T) {\n\tvar ref referenceBasket\n\tref.basket = requestBasket{items: make([]basketItem, 4)}\n\tfor i := range ref.basket.items {\n\t\tref.basket.items[i].amount = uint64(i+1) * basketFactor\n\t\tref.basket.items[i].value = uint64(i+1) * basketFactor\n\t}\n\tref.init(4)\n\trvf := ref.reqValueFactor([]uint64{1000, 2000, 3000, 4000})\n\t// expected value is (1000000+2000000+3000000+4000000) / (1*1000+2*2000+3*3000+4*4000) = 10000000/30000 = 333.333\n\tcheckF64(t, \"reqValueFactor\", rvf, 333.333, 1)\n}\n\nfunc TestNormalize(t *testing.T) {\n\tfor cycle := 0; cycle < 100; cycle += 1 {\n\t\t// Initialize data for testing\n\t\tvalueRange, lower := 1000000, 1000000\n\t\tref := referenceBasket{basket: requestBasket{items: make([]basketItem, 10)}}\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tref.basket.items[i].amount = uint64(rand.Intn(valueRange) + lower)\n\t\t\tref.basket.items[i].value = uint64(rand.Intn(valueRange) + lower)\n\t\t}\n\t\tref.normalize()\n\n\t\t// Check whether SUM(amount) ~= SUM(value)\n\t\tvar sumAmount, sumValue uint64\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tsumAmount += ref.basket.items[i].amount\n\t\t\tsumValue += ref.basket.items[i].value\n\t\t}\n\t\tvar epsilon = 0.01\n\t\tif float64(sumAmount)*(1+epsilon) < float64(sumValue) || float64(sumAmount)*(1-epsilon) > float64(sumValue) {\n\t\t\tt.Fatalf(\"Failed to normalize sumAmount: %d sumValue: %d\", sumAmount, sumValue)\n\t\t}\n\t}\n}\n\nfunc TestReqValueAdjustment(t *testing.T) {\n\tvar s1, s2 serverBasket\n\ts1.init(3)\n\ts2.init(3)\n\tcost1 := []uint64{30000, 60000, 90000}\n\tcost2 := []uint64{100000, 200000, 300000}\n\tvar ref referenceBasket\n\tref.basket = requestBasket{items: make([]basketItem, 3)}\n\tfor i := range ref.basket.items {\n\t\tref.basket.items[i].amount = 123 * basketFactor\n\t\tref.basket.items[i].value = 123 * basketFactor\n\t}\n\tref.init(3)\n\t// initial reqValues are expected to be {1, 1, 1}\n\tcheckF64(t, \"reqValues[0]\", ref.reqValues[0], 1, 0.01)\n\tcheckF64(t, \"reqValues[1]\", ref.reqValues[1], 1, 0.01)\n\tcheckF64(t, \"reqValues[2]\", ref.reqValues[2], 1, 0.01)\n\tvar logOffset utils.Fixed64\n\tfor period := 0; period < 1000; period++ {\n\t\texp := utils.ExpFactor(logOffset)\n\t\ts1.updateRvFactor(ref.reqValueFactor(cost1))\n\t\ts2.updateRvFactor(ref.reqValueFactor(cost2))\n\t\t// throw in random requests into each basket using their internal pricing\n\t\tfor i := 0; i < 1000; i++ {\n\t\t\treqType, reqAmount := uint32(rand.Intn(3)), uint32(rand.Intn(10)+1)\n\t\t\treqCost := uint64(reqAmount) * cost1[reqType]\n\t\t\ts1.add(reqType, reqAmount, reqCost, exp)\n\t\t\treqType, reqAmount = uint32(rand.Intn(3)), uint32(rand.Intn(10)+1)\n\t\t\treqCost = uint64(reqAmount) * cost2[reqType]\n\t\t\ts2.add(reqType, reqAmount, reqCost, exp)\n\t\t}\n\t\tref.add(s1.transfer(0.1))\n\t\tref.add(s2.transfer(0.1))\n\t\tref.normalize()\n\t\tref.updateReqValues()\n\t\tlogOffset += utils.Float64ToFixed64(0.1)\n\t}\n\tcheckF64(t, \"reqValues[0]\", ref.reqValues[0], 0.5, 0.01)\n\tcheckF64(t, \"reqValues[1]\", ref.reqValues[1], 1, 0.01)\n\tcheckF64(t, \"reqValues[2]\", ref.reqValues[2], 1.5, 0.01)\n}\n"
  },
  {
    "path": "les/vflux/client/serverpool.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage client\n\nimport (\n\t\"errors\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/metrics\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/enr\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\nconst (\n\tminTimeout          = time.Millisecond * 500 // minimum request timeout suggested by the server pool\n\ttimeoutRefresh      = time.Second * 5        // recalculate timeout if older than this\n\tdialCost            = 10000                  // cost of a TCP dial (used for known node selection weight calculation)\n\tdialWaitStep        = 1.5                    // exponential multiplier of redial wait time when no value was provided by the server\n\tqueryCost           = 500                    // cost of a UDP pre-negotiation query\n\tqueryWaitStep       = 1.02                   // exponential multiplier of redial wait time when no value was provided by the server\n\twaitThreshold       = time.Hour * 2000       // drop node if waiting time is over the threshold\n\tnodeWeightMul       = 1000000                // multiplier constant for node weight calculation\n\tnodeWeightThreshold = 100                    // minimum weight for keeping a node in the the known (valuable) set\n\tminRedialWait       = 10                     // minimum redial wait time in seconds\n\tpreNegLimit         = 5                      // maximum number of simultaneous pre-negotiation queries\n\twarnQueryFails      = 20                     // number of consecutive UDP query failures before we print a warning\n\tmaxQueryFails       = 100                    // number of consecutive UDP query failures when then chance of skipping a query reaches 50%\n)\n\n// ServerPool provides a node iterator for dial candidates. The output is a mix of newly discovered\n// nodes, a weighted random selection of known (previously valuable) nodes and trusted/paid nodes.\ntype ServerPool struct {\n\tclock    mclock.Clock\n\tunixTime func() int64\n\tdb       ethdb.KeyValueStore\n\n\tns                  *nodestate.NodeStateMachine\n\tvt                  *ValueTracker\n\tmixer               *enode.FairMix\n\tmixSources          []enode.Iterator\n\tdialIterator        enode.Iterator\n\tvalidSchemes        enr.IdentityScheme\n\ttrustedURLs         []string\n\tfillSet             *FillSet\n\tstarted, queryFails uint32\n\n\ttimeoutLock      sync.RWMutex\n\ttimeout          time.Duration\n\ttimeWeights      ResponseTimeWeights\n\ttimeoutRefreshed mclock.AbsTime\n\n\tsuggestedTimeoutGauge, totalValueGauge metrics.Gauge\n\tsessionValueMeter                      metrics.Meter\n}\n\n// nodeHistory keeps track of dial costs which determine node weight together with the\n// service value calculated by ValueTracker.\ntype nodeHistory struct {\n\tdialCost                       utils.ExpiredValue\n\tredialWaitStart, redialWaitEnd int64 // unix time (seconds)\n}\n\ntype nodeHistoryEnc struct {\n\tDialCost                       utils.ExpiredValue\n\tRedialWaitStart, RedialWaitEnd uint64\n}\n\n// queryFunc sends a pre-negotiation query and blocks until a response arrives or timeout occurs.\n// It returns 1 if the remote node has confirmed that connection is possible, 0 if not\n// possible and -1 if no response arrived (timeout).\ntype QueryFunc func(*enode.Node) int\n\nvar (\n\tclientSetup       = &nodestate.Setup{Version: 2}\n\tsfHasValue        = clientSetup.NewPersistentFlag(\"hasValue\")\n\tsfQuery           = clientSetup.NewFlag(\"query\")\n\tsfCanDial         = clientSetup.NewFlag(\"canDial\")\n\tsfDialing         = clientSetup.NewFlag(\"dialed\")\n\tsfWaitDialTimeout = clientSetup.NewFlag(\"dialTimeout\")\n\tsfConnected       = clientSetup.NewFlag(\"connected\")\n\tsfRedialWait      = clientSetup.NewFlag(\"redialWait\")\n\tsfAlwaysConnect   = clientSetup.NewFlag(\"alwaysConnect\")\n\tsfDialProcess     = nodestate.MergeFlags(sfQuery, sfCanDial, sfDialing, sfConnected, sfRedialWait)\n\n\tsfiNodeHistory = clientSetup.NewPersistentField(\"nodeHistory\", reflect.TypeOf(nodeHistory{}),\n\t\tfunc(field interface{}) ([]byte, error) {\n\t\t\tif n, ok := field.(nodeHistory); ok {\n\t\t\t\tne := nodeHistoryEnc{\n\t\t\t\t\tDialCost:        n.dialCost,\n\t\t\t\t\tRedialWaitStart: uint64(n.redialWaitStart),\n\t\t\t\t\tRedialWaitEnd:   uint64(n.redialWaitEnd),\n\t\t\t\t}\n\t\t\t\tenc, err := rlp.EncodeToBytes(&ne)\n\t\t\t\treturn enc, err\n\t\t\t}\n\t\t\treturn nil, errors.New(\"invalid field type\")\n\t\t},\n\t\tfunc(enc []byte) (interface{}, error) {\n\t\t\tvar ne nodeHistoryEnc\n\t\t\terr := rlp.DecodeBytes(enc, &ne)\n\t\t\tn := nodeHistory{\n\t\t\t\tdialCost:        ne.DialCost,\n\t\t\t\tredialWaitStart: int64(ne.RedialWaitStart),\n\t\t\t\tredialWaitEnd:   int64(ne.RedialWaitEnd),\n\t\t\t}\n\t\t\treturn n, err\n\t\t},\n\t)\n\tsfiNodeWeight     = clientSetup.NewField(\"nodeWeight\", reflect.TypeOf(uint64(0)))\n\tsfiConnectedStats = clientSetup.NewField(\"connectedStats\", reflect.TypeOf(ResponseTimeStats{}))\n\tsfiLocalAddress   = clientSetup.NewPersistentField(\"localAddress\", reflect.TypeOf(&enr.Record{}),\n\t\tfunc(field interface{}) ([]byte, error) {\n\t\t\tif enr, ok := field.(*enr.Record); ok {\n\t\t\t\tenc, err := rlp.EncodeToBytes(enr)\n\t\t\t\treturn enc, err\n\t\t\t}\n\t\t\treturn nil, errors.New(\"invalid field type\")\n\t\t},\n\t\tfunc(enc []byte) (interface{}, error) {\n\t\t\tvar enr enr.Record\n\t\t\tif err := rlp.DecodeBytes(enc, &enr); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn &enr, nil\n\t\t},\n\t)\n)\n\n// NewServerPool creates a new server pool\nfunc NewServerPool(db ethdb.KeyValueStore, dbKey []byte, mixTimeout time.Duration, query QueryFunc, clock mclock.Clock, trustedURLs []string, requestList []RequestInfo) (*ServerPool, enode.Iterator) {\n\ts := &ServerPool{\n\t\tdb:           db,\n\t\tclock:        clock,\n\t\tunixTime:     func() int64 { return time.Now().Unix() },\n\t\tvalidSchemes: enode.ValidSchemes,\n\t\ttrustedURLs:  trustedURLs,\n\t\tvt:           NewValueTracker(db, &mclock.System{}, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000)),\n\t\tns:           nodestate.NewNodeStateMachine(db, []byte(string(dbKey)+\"ns:\"), clock, clientSetup),\n\t}\n\ts.recalTimeout()\n\ts.mixer = enode.NewFairMix(mixTimeout)\n\tknownSelector := NewWrsIterator(s.ns, sfHasValue, sfDialProcess, sfiNodeWeight)\n\talwaysConnect := NewQueueIterator(s.ns, sfAlwaysConnect, sfDialProcess, true, nil)\n\ts.mixSources = append(s.mixSources, knownSelector)\n\ts.mixSources = append(s.mixSources, alwaysConnect)\n\n\ts.dialIterator = s.mixer\n\tif query != nil {\n\t\ts.dialIterator = s.addPreNegFilter(s.dialIterator, query)\n\t}\n\n\ts.ns.SubscribeState(nodestate.MergeFlags(sfWaitDialTimeout, sfConnected), func(n *enode.Node, oldState, newState nodestate.Flags) {\n\t\tif oldState.Equals(sfWaitDialTimeout) && newState.IsEmpty() {\n\t\t\t// dial timeout, no connection\n\t\t\ts.setRedialWait(n, dialCost, dialWaitStep)\n\t\t\ts.ns.SetStateSub(n, nodestate.Flags{}, sfDialing, 0)\n\t\t}\n\t})\n\n\treturn s, &serverPoolIterator{\n\t\tdialIterator: s.dialIterator,\n\t\tnextFn: func(node *enode.Node) {\n\t\t\ts.ns.Operation(func() {\n\t\t\t\ts.ns.SetStateSub(node, sfDialing, sfCanDial, 0)\n\t\t\t\ts.ns.SetStateSub(node, sfWaitDialTimeout, nodestate.Flags{}, time.Second*10)\n\t\t\t})\n\t\t},\n\t\tnodeFn: s.DialNode,\n\t}\n}\n\ntype serverPoolIterator struct {\n\tdialIterator enode.Iterator\n\tnextFn       func(*enode.Node)\n\tnodeFn       func(*enode.Node) *enode.Node\n}\n\n// Next implements enode.Iterator\nfunc (s *serverPoolIterator) Next() bool {\n\tif s.dialIterator.Next() {\n\t\ts.nextFn(s.dialIterator.Node())\n\t\treturn true\n\t}\n\treturn false\n}\n\n// Node implements enode.Iterator\nfunc (s *serverPoolIterator) Node() *enode.Node {\n\treturn s.nodeFn(s.dialIterator.Node())\n}\n\n// Close implements enode.Iterator\nfunc (s *serverPoolIterator) Close() {\n\ts.dialIterator.Close()\n}\n\n// AddMetrics adds metrics to the server pool. Should be called before Start().\nfunc (s *ServerPool) AddMetrics(\n\tsuggestedTimeoutGauge, totalValueGauge, serverSelectableGauge, serverConnectedGauge metrics.Gauge,\n\tsessionValueMeter, serverDialedMeter metrics.Meter) {\n\n\ts.suggestedTimeoutGauge = suggestedTimeoutGauge\n\ts.totalValueGauge = totalValueGauge\n\ts.sessionValueMeter = sessionValueMeter\n\tif serverSelectableGauge != nil {\n\t\ts.ns.AddLogMetrics(sfHasValue, sfDialProcess, \"selectable\", nil, nil, serverSelectableGauge)\n\t}\n\tif serverDialedMeter != nil {\n\t\ts.ns.AddLogMetrics(sfDialing, nodestate.Flags{}, \"dialed\", serverDialedMeter, nil, nil)\n\t}\n\tif serverConnectedGauge != nil {\n\t\ts.ns.AddLogMetrics(sfConnected, nodestate.Flags{}, \"connected\", nil, nil, serverConnectedGauge)\n\t}\n}\n\n// AddSource adds a node discovery source to the server pool (should be called before start)\nfunc (s *ServerPool) AddSource(source enode.Iterator) {\n\tif source != nil {\n\t\ts.mixSources = append(s.mixSources, source)\n\t}\n}\n\n// addPreNegFilter installs a node filter mechanism that performs a pre-negotiation query.\n// Nodes that are filtered out and does not appear on the output iterator are put back\n// into redialWait state.\nfunc (s *ServerPool) addPreNegFilter(input enode.Iterator, query QueryFunc) enode.Iterator {\n\ts.fillSet = NewFillSet(s.ns, input, sfQuery)\n\ts.ns.SubscribeState(sfDialProcess, func(n *enode.Node, oldState, newState nodestate.Flags) {\n\t\tif !newState.Equals(sfQuery) {\n\t\t\tif newState.HasAll(sfQuery) {\n\t\t\t\t// remove query flag if the node is already somewhere in the dial process\n\t\t\t\ts.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tfails := atomic.LoadUint32(&s.queryFails)\n\t\tfailMax := fails\n\t\tif failMax > maxQueryFails {\n\t\t\tfailMax = maxQueryFails\n\t\t}\n\t\tif rand.Intn(maxQueryFails*2) < int(failMax) {\n\t\t\t// skip pre-negotiation with increasing chance, max 50%\n\t\t\t// this ensures that the client can operate even if UDP is not working at all\n\t\t\ts.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10)\n\t\t\t// set canDial before resetting queried so that FillSet will not read more\n\t\t\t// candidates unnecessarily\n\t\t\ts.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0)\n\t\t\treturn\n\t\t}\n\t\tgo func() {\n\t\t\tq := query(n)\n\t\t\tif q == -1 {\n\t\t\t\tatomic.AddUint32(&s.queryFails, 1)\n\t\t\t\tfails++\n\t\t\t\tif fails%warnQueryFails == 0 {\n\t\t\t\t\t// warn if a large number of consecutive queries have failed\n\t\t\t\t\tlog.Warn(\"UDP connection queries failed\", \"count\", fails)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tatomic.StoreUint32(&s.queryFails, 0)\n\t\t\t}\n\t\t\ts.ns.Operation(func() {\n\t\t\t\t// we are no longer running in the operation that the callback belongs to, start a new one because of setRedialWait\n\t\t\t\tif q == 1 {\n\t\t\t\t\ts.ns.SetStateSub(n, sfCanDial, nodestate.Flags{}, time.Second*10)\n\t\t\t\t} else {\n\t\t\t\t\ts.setRedialWait(n, queryCost, queryWaitStep)\n\t\t\t\t}\n\t\t\t\ts.ns.SetStateSub(n, nodestate.Flags{}, sfQuery, 0)\n\t\t\t})\n\t\t}()\n\t})\n\treturn NewQueueIterator(s.ns, sfCanDial, nodestate.Flags{}, false, func(waiting bool) {\n\t\tif waiting {\n\t\t\ts.fillSet.SetTarget(preNegLimit)\n\t\t} else {\n\t\t\ts.fillSet.SetTarget(0)\n\t\t}\n\t})\n}\n\n// start starts the server pool. Note that NodeStateMachine should be started first.\nfunc (s *ServerPool) Start() {\n\ts.ns.Start()\n\tfor _, iter := range s.mixSources {\n\t\t// add sources to mixer at startup because the mixer instantly tries to read them\n\t\t// which should only happen after NodeStateMachine has been started\n\t\ts.mixer.AddSource(iter)\n\t}\n\tfor _, url := range s.trustedURLs {\n\t\tif node, err := enode.Parse(s.validSchemes, url); err == nil {\n\t\t\ts.ns.SetState(node, sfAlwaysConnect, nodestate.Flags{}, 0)\n\t\t} else {\n\t\t\tlog.Error(\"Invalid trusted server URL\", \"url\", url, \"error\", err)\n\t\t}\n\t}\n\tunixTime := s.unixTime()\n\ts.ns.Operation(func() {\n\t\ts.ns.ForEach(sfHasValue, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {\n\t\t\ts.calculateWeight(node)\n\t\t\tif n, ok := s.ns.GetField(node, sfiNodeHistory).(nodeHistory); ok && n.redialWaitEnd > unixTime {\n\t\t\t\twait := n.redialWaitEnd - unixTime\n\t\t\t\tlastWait := n.redialWaitEnd - n.redialWaitStart\n\t\t\t\tif wait > lastWait {\n\t\t\t\t\t// if the time until expiration is larger than the last suggested\n\t\t\t\t\t// waiting time then the system clock was probably adjusted\n\t\t\t\t\twait = lastWait\n\t\t\t\t}\n\t\t\t\ts.ns.SetStateSub(node, sfRedialWait, nodestate.Flags{}, time.Duration(wait)*time.Second)\n\t\t\t}\n\t\t})\n\t})\n\tatomic.StoreUint32(&s.started, 1)\n}\n\n// stop stops the server pool\nfunc (s *ServerPool) Stop() {\n\tif s.fillSet != nil {\n\t\ts.fillSet.Close()\n\t}\n\ts.ns.Operation(func() {\n\t\ts.ns.ForEach(sfConnected, nodestate.Flags{}, func(n *enode.Node, state nodestate.Flags) {\n\t\t\t// recalculate weight of connected nodes in order to update hasValue flag if necessary\n\t\t\ts.calculateWeight(n)\n\t\t})\n\t})\n\ts.ns.Stop()\n\ts.vt.Stop()\n}\n\n// RegisterNode implements serverPeerSubscriber\nfunc (s *ServerPool) RegisterNode(node *enode.Node) (*NodeValueTracker, error) {\n\tif atomic.LoadUint32(&s.started) == 0 {\n\t\treturn nil, errors.New(\"server pool not started yet\")\n\t}\n\tnvt := s.vt.Register(node.ID())\n\ts.ns.Operation(func() {\n\t\ts.ns.SetStateSub(node, sfConnected, sfDialing.Or(sfWaitDialTimeout), 0)\n\t\ts.ns.SetFieldSub(node, sfiConnectedStats, nvt.RtStats())\n\t\tif node.IP().IsLoopback() {\n\t\t\ts.ns.SetFieldSub(node, sfiLocalAddress, node.Record())\n\t\t}\n\t})\n\treturn nvt, nil\n}\n\n// UnregisterNode implements serverPeerSubscriber\nfunc (s *ServerPool) UnregisterNode(node *enode.Node) {\n\ts.ns.Operation(func() {\n\t\ts.setRedialWait(node, dialCost, dialWaitStep)\n\t\ts.ns.SetStateSub(node, nodestate.Flags{}, sfConnected, 0)\n\t\ts.ns.SetFieldSub(node, sfiConnectedStats, nil)\n\t})\n\ts.vt.Unregister(node.ID())\n}\n\n// recalTimeout calculates the current recommended timeout. This value is used by\n// the client as a \"soft timeout\" value. It also affects the service value calculation\n// of individual nodes.\nfunc (s *ServerPool) recalTimeout() {\n\t// Use cached result if possible, avoid recalculating too frequently.\n\ts.timeoutLock.RLock()\n\trefreshed := s.timeoutRefreshed\n\ts.timeoutLock.RUnlock()\n\tnow := s.clock.Now()\n\tif refreshed != 0 && time.Duration(now-refreshed) < timeoutRefresh {\n\t\treturn\n\t}\n\t// Cached result is stale, recalculate a new one.\n\trts := s.vt.RtStats()\n\n\t// Add a fake statistic here. It is an easy way to initialize with some\n\t// conservative values when the database is new. As soon as we have a\n\t// considerable amount of real stats this small value won't matter.\n\trts.Add(time.Second*2, 10, s.vt.StatsExpFactor())\n\n\t// Use either 10% failure rate timeout or twice the median response time\n\t// as the recommended timeout.\n\ttimeout := minTimeout\n\tif t := rts.Timeout(0.1); t > timeout {\n\t\ttimeout = t\n\t}\n\tif t := rts.Timeout(0.5) * 2; t > timeout {\n\t\ttimeout = t\n\t}\n\ts.timeoutLock.Lock()\n\tif s.timeout != timeout {\n\t\ts.timeout = timeout\n\t\ts.timeWeights = TimeoutWeights(s.timeout)\n\n\t\tif s.suggestedTimeoutGauge != nil {\n\t\t\ts.suggestedTimeoutGauge.Update(int64(s.timeout / time.Millisecond))\n\t\t}\n\t\tif s.totalValueGauge != nil {\n\t\t\ts.totalValueGauge.Update(int64(rts.Value(s.timeWeights, s.vt.StatsExpFactor())))\n\t\t}\n\t}\n\ts.timeoutRefreshed = now\n\ts.timeoutLock.Unlock()\n}\n\n// GetTimeout returns the recommended request timeout.\nfunc (s *ServerPool) GetTimeout() time.Duration {\n\ts.recalTimeout()\n\ts.timeoutLock.RLock()\n\tdefer s.timeoutLock.RUnlock()\n\treturn s.timeout\n}\n\n// getTimeoutAndWeight returns the recommended request timeout as well as the\n// response time weight which is necessary to calculate service value.\nfunc (s *ServerPool) getTimeoutAndWeight() (time.Duration, ResponseTimeWeights) {\n\ts.recalTimeout()\n\ts.timeoutLock.RLock()\n\tdefer s.timeoutLock.RUnlock()\n\treturn s.timeout, s.timeWeights\n}\n\n// addDialCost adds the given amount of dial cost to the node history and returns the current\n// amount of total dial cost\nfunc (s *ServerPool) addDialCost(n *nodeHistory, amount int64) uint64 {\n\tlogOffset := s.vt.StatsExpirer().LogOffset(s.clock.Now())\n\tif amount > 0 {\n\t\tn.dialCost.Add(amount, logOffset)\n\t}\n\ttotalDialCost := n.dialCost.Value(logOffset)\n\tif totalDialCost < dialCost {\n\t\ttotalDialCost = dialCost\n\t}\n\treturn totalDialCost\n}\n\n// serviceValue returns the service value accumulated in this session and in total\nfunc (s *ServerPool) serviceValue(node *enode.Node) (sessionValue, totalValue float64) {\n\tnvt := s.vt.GetNode(node.ID())\n\tif nvt == nil {\n\t\treturn 0, 0\n\t}\n\tcurrentStats := nvt.RtStats()\n\t_, timeWeights := s.getTimeoutAndWeight()\n\texpFactor := s.vt.StatsExpFactor()\n\n\ttotalValue = currentStats.Value(timeWeights, expFactor)\n\tif connStats, ok := s.ns.GetField(node, sfiConnectedStats).(ResponseTimeStats); ok {\n\t\tdiff := currentStats\n\t\tdiff.SubStats(&connStats)\n\t\tsessionValue = diff.Value(timeWeights, expFactor)\n\t\tif s.sessionValueMeter != nil {\n\t\t\ts.sessionValueMeter.Mark(int64(sessionValue))\n\t\t}\n\t}\n\treturn\n}\n\n// updateWeight calculates the node weight and updates the nodeWeight field and the\n// hasValue flag. It also saves the node state if necessary.\n// Note: this function should run inside a NodeStateMachine operation\nfunc (s *ServerPool) updateWeight(node *enode.Node, totalValue float64, totalDialCost uint64) {\n\tweight := uint64(totalValue * nodeWeightMul / float64(totalDialCost))\n\tif weight >= nodeWeightThreshold {\n\t\ts.ns.SetStateSub(node, sfHasValue, nodestate.Flags{}, 0)\n\t\ts.ns.SetFieldSub(node, sfiNodeWeight, weight)\n\t} else {\n\t\ts.ns.SetStateSub(node, nodestate.Flags{}, sfHasValue, 0)\n\t\ts.ns.SetFieldSub(node, sfiNodeWeight, nil)\n\t\ts.ns.SetFieldSub(node, sfiNodeHistory, nil)\n\t\ts.ns.SetFieldSub(node, sfiLocalAddress, nil)\n\t}\n\ts.ns.Persist(node) // saved if node history or hasValue changed\n}\n\n// setRedialWait calculates and sets the redialWait timeout based on the service value\n// and dial cost accumulated during the last session/attempt and in total.\n// The waiting time is raised exponentially if no service value has been received in order\n// to prevent dialing an unresponsive node frequently for a very long time just because it\n// was useful in the past. It can still be occasionally dialed though and once it provides\n// a significant amount of service value again its waiting time is quickly reduced or reset\n// to the minimum.\n// Note: node weight is also recalculated and updated by this function.\n// Note 2: this function should run inside a NodeStateMachine operation\nfunc (s *ServerPool) setRedialWait(node *enode.Node, addDialCost int64, waitStep float64) {\n\tn, _ := s.ns.GetField(node, sfiNodeHistory).(nodeHistory)\n\tsessionValue, totalValue := s.serviceValue(node)\n\ttotalDialCost := s.addDialCost(&n, addDialCost)\n\n\t// if the current dial session has yielded at least the average value/dial cost ratio\n\t// then the waiting time should be reset to the minimum. If the session value\n\t// is below average but still positive then timeout is limited to the ratio of\n\t// average / current service value multiplied by the minimum timeout. If the attempt\n\t// was unsuccessful then timeout is raised exponentially without limitation.\n\t// Note: dialCost is used in the formula below even if dial was not attempted at all\n\t// because the pre-negotiation query did not return a positive result. In this case\n\t// the ratio has no meaning anyway and waitFactor is always raised, though in smaller\n\t// steps because queries are cheaper and therefore we can allow more failed attempts.\n\tunixTime := s.unixTime()\n\tplannedTimeout := float64(n.redialWaitEnd - n.redialWaitStart) // last planned redialWait timeout\n\tvar actualWait float64                                         // actual waiting time elapsed\n\tif unixTime > n.redialWaitEnd {\n\t\t// the planned timeout has elapsed\n\t\tactualWait = plannedTimeout\n\t} else {\n\t\t// if the node was redialed earlier then we do not raise the planned timeout\n\t\t// exponentially because that could lead to the timeout rising very high in\n\t\t// a short amount of time\n\t\t// Note that in case of an early redial actualWait also includes the dial\n\t\t// timeout or connection time of the last attempt but it still serves its\n\t\t// purpose of preventing the timeout rising quicker than linearly as a function\n\t\t// of total time elapsed without a successful connection.\n\t\tactualWait = float64(unixTime - n.redialWaitStart)\n\t}\n\t// raise timeout exponentially if the last planned timeout has elapsed\n\t// (use at least the last planned timeout otherwise)\n\tnextTimeout := actualWait * waitStep\n\tif plannedTimeout > nextTimeout {\n\t\tnextTimeout = plannedTimeout\n\t}\n\t// we reduce the waiting time if the server has provided service value during the\n\t// connection (but never under the minimum)\n\ta := totalValue * dialCost * float64(minRedialWait)\n\tb := float64(totalDialCost) * sessionValue\n\tif a < b*nextTimeout {\n\t\tnextTimeout = a / b\n\t}\n\tif nextTimeout < minRedialWait {\n\t\tnextTimeout = minRedialWait\n\t}\n\twait := time.Duration(float64(time.Second) * nextTimeout)\n\tif wait < waitThreshold {\n\t\tn.redialWaitStart = unixTime\n\t\tn.redialWaitEnd = unixTime + int64(nextTimeout)\n\t\ts.ns.SetFieldSub(node, sfiNodeHistory, n)\n\t\ts.ns.SetStateSub(node, sfRedialWait, nodestate.Flags{}, wait)\n\t\ts.updateWeight(node, totalValue, totalDialCost)\n\t} else {\n\t\t// discard known node statistics if waiting time is very long because the node\n\t\t// hasn't been responsive for a very long time\n\t\ts.ns.SetFieldSub(node, sfiNodeHistory, nil)\n\t\ts.ns.SetFieldSub(node, sfiNodeWeight, nil)\n\t\ts.ns.SetStateSub(node, nodestate.Flags{}, sfHasValue, 0)\n\t}\n}\n\n// calculateWeight calculates and sets the node weight without altering the node history.\n// This function should be called during startup and shutdown only, otherwise setRedialWait\n// will keep the weights updated as the underlying statistics are adjusted.\n// Note: this function should run inside a NodeStateMachine operation\nfunc (s *ServerPool) calculateWeight(node *enode.Node) {\n\tn, _ := s.ns.GetField(node, sfiNodeHistory).(nodeHistory)\n\t_, totalValue := s.serviceValue(node)\n\ttotalDialCost := s.addDialCost(&n, 0)\n\ts.updateWeight(node, totalValue, totalDialCost)\n}\n\n// API returns the vflux client API\nfunc (s *ServerPool) API() *PrivateClientAPI {\n\treturn NewPrivateClientAPI(s.vt)\n}\n\ntype dummyIdentity enode.ID\n\nfunc (id dummyIdentity) Verify(r *enr.Record, sig []byte) error { return nil }\nfunc (id dummyIdentity) NodeAddr(r *enr.Record) []byte          { return id[:] }\n\n// DialNode replaces the given enode with a locally generated one containing the ENR\n// stored in the sfiLocalAddress field if present. This workaround ensures that nodes\n// on the local network can be dialed at the local address if a connection has been\n// successfully established previously.\n// Note that NodeStateMachine always remembers the enode with the latest version of\n// the remote signed ENR. ENR filtering should be performed on that version while\n// dialNode should be used for dialing the node over TCP or UDP.\nfunc (s *ServerPool) DialNode(n *enode.Node) *enode.Node {\n\tif enr, ok := s.ns.GetField(n, sfiLocalAddress).(*enr.Record); ok {\n\t\tn, _ := enode.New(dummyIdentity(n.ID()), enr)\n\t\treturn n\n\t}\n\treturn n\n}\n\n// Persist immediately stores the state of a node in the node database\nfunc (s *ServerPool) Persist(n *enode.Node) {\n\ts.ns.Persist(n)\n}\n"
  },
  {
    "path": "les/vflux/client/serverpool_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage client\n\nimport (\n\t\"math/rand\"\n\t\"strconv\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/ethdb/memorydb\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/enr\"\n)\n\nconst (\n\tspTestNodes  = 1000\n\tspTestTarget = 5\n\tspTestLength = 10000\n\tspMinTotal   = 40000\n\tspMaxTotal   = 50000\n)\n\nfunc testNodeID(i int) enode.ID {\n\treturn enode.ID{42, byte(i % 256), byte(i / 256)}\n}\n\nfunc testNodeIndex(id enode.ID) int {\n\tif id[0] != 42 {\n\t\treturn -1\n\t}\n\treturn int(id[1]) + int(id[2])*256\n}\n\ntype ServerPoolTest struct {\n\tdb                   ethdb.KeyValueStore\n\tclock                *mclock.Simulated\n\tquit                 chan struct{}\n\tpreNeg, preNegFail   bool\n\tvt                   *ValueTracker\n\tsp                   *ServerPool\n\tspi                  enode.Iterator\n\tinput                enode.Iterator\n\ttestNodes            []spTestNode\n\ttrusted              []string\n\twaitCount, waitEnded int32\n\n\tcycle, conn, servedConn  int\n\tserviceCycles, dialCount int\n\tdisconnect               map[int][]int\n}\n\ntype spTestNode struct {\n\tconnectCycles, waitCycles int\n\tnextConnCycle, totalConn  int\n\tconnected, service        bool\n\tnode                      *enode.Node\n}\n\nfunc newServerPoolTest(preNeg, preNegFail bool) *ServerPoolTest {\n\tnodes := make([]*enode.Node, spTestNodes)\n\tfor i := range nodes {\n\t\tnodes[i] = enode.SignNull(&enr.Record{}, testNodeID(i))\n\t}\n\treturn &ServerPoolTest{\n\t\tclock:      &mclock.Simulated{},\n\t\tdb:         memorydb.New(),\n\t\tinput:      enode.CycleNodes(nodes),\n\t\ttestNodes:  make([]spTestNode, spTestNodes),\n\t\tpreNeg:     preNeg,\n\t\tpreNegFail: preNegFail,\n\t}\n}\n\nfunc (s *ServerPoolTest) beginWait() {\n\t// ensure that dialIterator and the maximal number of pre-neg queries are not all stuck in a waiting state\n\tfor atomic.AddInt32(&s.waitCount, 1) > preNegLimit {\n\t\tatomic.AddInt32(&s.waitCount, -1)\n\t\ts.clock.Run(time.Second)\n\t}\n}\n\nfunc (s *ServerPoolTest) endWait() {\n\tatomic.AddInt32(&s.waitCount, -1)\n\tatomic.AddInt32(&s.waitEnded, 1)\n}\n\nfunc (s *ServerPoolTest) addTrusted(i int) {\n\ts.trusted = append(s.trusted, enode.SignNull(&enr.Record{}, testNodeID(i)).String())\n}\n\nfunc (s *ServerPoolTest) start() {\n\tvar testQuery QueryFunc\n\tif s.preNeg {\n\t\ttestQuery = func(node *enode.Node) int {\n\t\t\tidx := testNodeIndex(node.ID())\n\t\t\tn := &s.testNodes[idx]\n\t\t\tcanConnect := !n.connected && n.connectCycles != 0 && s.cycle >= n.nextConnCycle\n\t\t\tif s.preNegFail {\n\t\t\t\t// simulate a scenario where UDP queries never work\n\t\t\t\ts.beginWait()\n\t\t\t\ts.clock.Sleep(time.Second * 5)\n\t\t\t\ts.endWait()\n\t\t\t\treturn -1\n\t\t\t}\n\t\t\tswitch idx % 3 {\n\t\t\tcase 0:\n\t\t\t\t// pre-neg returns true only if connection is possible\n\t\t\t\tif canConnect {\n\t\t\t\t\treturn 1\n\t\t\t\t}\n\t\t\t\treturn 0\n\t\t\tcase 1:\n\t\t\t\t// pre-neg returns true but connection might still fail\n\t\t\t\treturn 1\n\t\t\tcase 2:\n\t\t\t\t// pre-neg returns true if connection is possible, otherwise timeout (node unresponsive)\n\t\t\t\tif canConnect {\n\t\t\t\t\treturn 1\n\t\t\t\t}\n\t\t\t\ts.beginWait()\n\t\t\t\ts.clock.Sleep(time.Second * 5)\n\t\t\t\ts.endWait()\n\t\t\t\treturn -1\n\t\t\t}\n\t\t\treturn -1\n\t\t}\n\t}\n\n\trequestList := make([]RequestInfo, testReqTypes)\n\tfor i := range requestList {\n\t\trequestList[i] = RequestInfo{Name: \"testreq\" + strconv.Itoa(i), InitAmount: 1, InitValue: 1}\n\t}\n\n\ts.sp, s.spi = NewServerPool(s.db, []byte(\"sp:\"), 0, testQuery, s.clock, s.trusted, requestList)\n\ts.sp.AddSource(s.input)\n\ts.sp.validSchemes = enode.ValidSchemesForTesting\n\ts.sp.unixTime = func() int64 { return int64(s.clock.Now()) / int64(time.Second) }\n\ts.disconnect = make(map[int][]int)\n\ts.sp.Start()\n\ts.quit = make(chan struct{})\n\tgo func() {\n\t\tlast := int32(-1)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(time.Millisecond * 100):\n\t\t\t\tc := atomic.LoadInt32(&s.waitEnded)\n\t\t\t\tif c == last {\n\t\t\t\t\t// advance clock if test is stuck (might happen in rare cases)\n\t\t\t\t\ts.clock.Run(time.Second)\n\t\t\t\t}\n\t\t\t\tlast = c\n\t\t\tcase <-s.quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (s *ServerPoolTest) stop() {\n\tclose(s.quit)\n\ts.sp.Stop()\n\ts.spi.Close()\n\tfor i := range s.testNodes {\n\t\tn := &s.testNodes[i]\n\t\tif n.connected {\n\t\t\tn.totalConn += s.cycle\n\t\t}\n\t\tn.connected = false\n\t\tn.node = nil\n\t\tn.nextConnCycle = 0\n\t}\n\ts.conn, s.servedConn = 0, 0\n}\n\nfunc (s *ServerPoolTest) run() {\n\tfor count := spTestLength; count > 0; count-- {\n\t\tif dcList := s.disconnect[s.cycle]; dcList != nil {\n\t\t\tfor _, idx := range dcList {\n\t\t\t\tn := &s.testNodes[idx]\n\t\t\t\ts.sp.UnregisterNode(n.node)\n\t\t\t\tn.totalConn += s.cycle\n\t\t\t\tn.connected = false\n\t\t\t\tn.node = nil\n\t\t\t\ts.conn--\n\t\t\t\tif n.service {\n\t\t\t\t\ts.servedConn--\n\t\t\t\t}\n\t\t\t\tn.nextConnCycle = s.cycle + n.waitCycles\n\t\t\t}\n\t\t\tdelete(s.disconnect, s.cycle)\n\t\t}\n\t\tif s.conn < spTestTarget {\n\t\t\ts.dialCount++\n\t\t\ts.beginWait()\n\t\t\ts.spi.Next()\n\t\t\ts.endWait()\n\t\t\tdial := s.spi.Node()\n\t\t\tid := dial.ID()\n\t\t\tidx := testNodeIndex(id)\n\t\t\tn := &s.testNodes[idx]\n\t\t\tif !n.connected && n.connectCycles != 0 && s.cycle >= n.nextConnCycle {\n\t\t\t\ts.conn++\n\t\t\t\tif n.service {\n\t\t\t\t\ts.servedConn++\n\t\t\t\t}\n\t\t\t\tn.totalConn -= s.cycle\n\t\t\t\tn.connected = true\n\t\t\t\tdc := s.cycle + n.connectCycles\n\t\t\t\ts.disconnect[dc] = append(s.disconnect[dc], idx)\n\t\t\t\tn.node = dial\n\t\t\t\tnv, _ := s.sp.RegisterNode(n.node)\n\t\t\t\tif n.service {\n\t\t\t\t\tnv.Served([]ServedRequest{{ReqType: 0, Amount: 100}}, 0)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ts.serviceCycles += s.servedConn\n\t\ts.clock.Run(time.Second)\n\t\ts.cycle++\n\t}\n}\n\nfunc (s *ServerPoolTest) setNodes(count, conn, wait int, service, trusted bool) (res []int) {\n\tfor ; count > 0; count-- {\n\t\tidx := rand.Intn(spTestNodes)\n\t\tfor s.testNodes[idx].connectCycles != 0 || s.testNodes[idx].connected {\n\t\t\tidx = rand.Intn(spTestNodes)\n\t\t}\n\t\tres = append(res, idx)\n\t\ts.testNodes[idx] = spTestNode{\n\t\t\tconnectCycles: conn,\n\t\t\twaitCycles:    wait,\n\t\t\tservice:       service,\n\t\t}\n\t\tif trusted {\n\t\t\ts.addTrusted(idx)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (s *ServerPoolTest) resetNodes() {\n\tfor i, n := range s.testNodes {\n\t\tif n.connected {\n\t\t\tn.totalConn += s.cycle\n\t\t\ts.sp.UnregisterNode(n.node)\n\t\t}\n\t\ts.testNodes[i] = spTestNode{totalConn: n.totalConn}\n\t}\n\ts.conn, s.servedConn = 0, 0\n\ts.disconnect = make(map[int][]int)\n\ts.trusted = nil\n}\n\nfunc (s *ServerPoolTest) checkNodes(t *testing.T, nodes []int) {\n\tvar sum int\n\tfor _, idx := range nodes {\n\t\tn := &s.testNodes[idx]\n\t\tif n.connected {\n\t\t\tn.totalConn += s.cycle\n\t\t}\n\t\tsum += n.totalConn\n\t\tn.totalConn = 0\n\t\tif n.connected {\n\t\t\tn.totalConn -= s.cycle\n\t\t}\n\t}\n\tif sum < spMinTotal || sum > spMaxTotal {\n\t\tt.Errorf(\"Total connection amount %d outside expected range %d to %d\", sum, spMinTotal, spMaxTotal)\n\t}\n}\n\nfunc TestServerPool(t *testing.T)               { testServerPool(t, false, false) }\nfunc TestServerPoolWithPreNeg(t *testing.T)     { testServerPool(t, true, false) }\nfunc TestServerPoolWithPreNegFail(t *testing.T) { testServerPool(t, true, true) }\nfunc testServerPool(t *testing.T, preNeg, fail bool) {\n\ts := newServerPoolTest(preNeg, fail)\n\tnodes := s.setNodes(100, 200, 200, true, false)\n\ts.setNodes(100, 20, 20, false, false)\n\ts.start()\n\ts.run()\n\ts.stop()\n\ts.checkNodes(t, nodes)\n}\n\nfunc TestServerPoolChangedNodes(t *testing.T)           { testServerPoolChangedNodes(t, false) }\nfunc TestServerPoolChangedNodesWithPreNeg(t *testing.T) { testServerPoolChangedNodes(t, true) }\nfunc testServerPoolChangedNodes(t *testing.T, preNeg bool) {\n\ts := newServerPoolTest(preNeg, false)\n\tnodes := s.setNodes(100, 200, 200, true, false)\n\ts.setNodes(100, 20, 20, false, false)\n\ts.start()\n\ts.run()\n\ts.checkNodes(t, nodes)\n\tfor i := 0; i < 3; i++ {\n\t\ts.resetNodes()\n\t\tnodes := s.setNodes(100, 200, 200, true, false)\n\t\ts.setNodes(100, 20, 20, false, false)\n\t\ts.run()\n\t\ts.checkNodes(t, nodes)\n\t}\n\ts.stop()\n}\n\nfunc TestServerPoolRestartNoDiscovery(t *testing.T) { testServerPoolRestartNoDiscovery(t, false) }\nfunc TestServerPoolRestartNoDiscoveryWithPreNeg(t *testing.T) {\n\ttestServerPoolRestartNoDiscovery(t, true)\n}\nfunc testServerPoolRestartNoDiscovery(t *testing.T, preNeg bool) {\n\ts := newServerPoolTest(preNeg, false)\n\tnodes := s.setNodes(100, 200, 200, true, false)\n\ts.setNodes(100, 20, 20, false, false)\n\ts.start()\n\ts.run()\n\ts.stop()\n\ts.checkNodes(t, nodes)\n\ts.input = nil\n\ts.start()\n\ts.run()\n\ts.stop()\n\ts.checkNodes(t, nodes)\n}\n\nfunc TestServerPoolTrustedNoDiscovery(t *testing.T) { testServerPoolTrustedNoDiscovery(t, false) }\nfunc TestServerPoolTrustedNoDiscoveryWithPreNeg(t *testing.T) {\n\ttestServerPoolTrustedNoDiscovery(t, true)\n}\nfunc testServerPoolTrustedNoDiscovery(t *testing.T, preNeg bool) {\n\ts := newServerPoolTest(preNeg, false)\n\ttrusted := s.setNodes(200, 200, 200, true, true)\n\ts.input = nil\n\ts.start()\n\ts.run()\n\ts.stop()\n\ts.checkNodes(t, trusted)\n}\n"
  },
  {
    "path": "les/vflux/client/timestats.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage client\n\nimport (\n\t\"io\"\n\t\"math\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\nconst (\n\tminResponseTime   = time.Millisecond * 50\n\tmaxResponseTime   = time.Second * 10\n\ttimeStatLength    = 32\n\tweightScaleFactor = 1000000\n)\n\n// ResponseTimeStats is the response time distribution of a set of answered requests,\n// weighted with request value, either served by a single server or aggregated for\n// multiple servers.\n// It it a fixed length (timeStatLength) distribution vector with linear interpolation.\n// The X axis (the time values) are not linear, they should be transformed with\n// TimeToStatScale and StatScaleToTime.\ntype (\n\tResponseTimeStats struct {\n\t\tstats [timeStatLength]uint64\n\t\texp   uint64\n\t}\n\tResponseTimeWeights [timeStatLength]float64\n)\n\nvar timeStatsLogFactor = (timeStatLength - 1) / (math.Log(float64(maxResponseTime)/float64(minResponseTime)) + 1)\n\n// TimeToStatScale converts a response time to a distribution vector index. The index\n// is represented by a float64 so that linear interpolation can be applied.\nfunc TimeToStatScale(d time.Duration) float64 {\n\tif d < 0 {\n\t\treturn 0\n\t}\n\tr := float64(d) / float64(minResponseTime)\n\tif r > 1 {\n\t\tr = math.Log(r) + 1\n\t}\n\tr *= timeStatsLogFactor\n\tif r > timeStatLength-1 {\n\t\treturn timeStatLength - 1\n\t}\n\treturn r\n}\n\n// StatScaleToTime converts a distribution vector index to a response time. The index\n// is represented by a float64 so that linear interpolation can be applied.\nfunc StatScaleToTime(r float64) time.Duration {\n\tr /= timeStatsLogFactor\n\tif r > 1 {\n\t\tr = math.Exp(r - 1)\n\t}\n\treturn time.Duration(r * float64(minResponseTime))\n}\n\n// TimeoutWeights calculates the weight function used for calculating service value\n// based on the response time distribution of the received service.\n// It is based on the request timeout value of the system. It consists of a half cosine\n// function starting with 1, crossing zero at timeout and reaching -1 at 2*timeout.\n// After 2*timeout the weight is constant -1.\nfunc TimeoutWeights(timeout time.Duration) (res ResponseTimeWeights) {\n\tfor i := range res {\n\t\tt := StatScaleToTime(float64(i))\n\t\tif t < 2*timeout {\n\t\t\tres[i] = math.Cos(math.Pi / 2 * float64(t) / float64(timeout))\n\t\t} else {\n\t\t\tres[i] = -1\n\t\t}\n\t}\n\treturn\n}\n\n// EncodeRLP implements rlp.Encoder\nfunc (rt *ResponseTimeStats) EncodeRLP(w io.Writer) error {\n\tenc := struct {\n\t\tStats [timeStatLength]uint64\n\t\tExp   uint64\n\t}{rt.stats, rt.exp}\n\treturn rlp.Encode(w, &enc)\n}\n\n// DecodeRLP implements rlp.Decoder\nfunc (rt *ResponseTimeStats) DecodeRLP(s *rlp.Stream) error {\n\tvar enc struct {\n\t\tStats [timeStatLength]uint64\n\t\tExp   uint64\n\t}\n\tif err := s.Decode(&enc); err != nil {\n\t\treturn err\n\t}\n\trt.stats, rt.exp = enc.Stats, enc.Exp\n\treturn nil\n}\n\n// Add adds a new response time with the given weight to the distribution.\nfunc (rt *ResponseTimeStats) Add(respTime time.Duration, weight float64, expFactor utils.ExpirationFactor) {\n\trt.setExp(expFactor.Exp)\n\tweight *= expFactor.Factor * weightScaleFactor\n\tr := TimeToStatScale(respTime)\n\ti := int(r)\n\tr -= float64(i)\n\trt.stats[i] += uint64(weight * (1 - r))\n\tif i < timeStatLength-1 {\n\t\trt.stats[i+1] += uint64(weight * r)\n\t}\n}\n\n// setExp sets the power of 2 exponent of the structure, scaling base values (the vector\n// itself) up or down if necessary.\nfunc (rt *ResponseTimeStats) setExp(exp uint64) {\n\tif exp > rt.exp {\n\t\tshift := exp - rt.exp\n\t\tfor i, v := range rt.stats {\n\t\t\trt.stats[i] = v >> shift\n\t\t}\n\t\trt.exp = exp\n\t}\n\tif exp < rt.exp {\n\t\tshift := rt.exp - exp\n\t\tfor i, v := range rt.stats {\n\t\t\trt.stats[i] = v << shift\n\t\t}\n\t\trt.exp = exp\n\t}\n}\n\n// Value calculates the total service value based on the given distribution, using the\n// specified weight function.\nfunc (rt ResponseTimeStats) Value(weights ResponseTimeWeights, expFactor utils.ExpirationFactor) float64 {\n\tvar v float64\n\tfor i, s := range rt.stats {\n\t\tv += float64(s) * weights[i]\n\t}\n\tif v < 0 {\n\t\treturn 0\n\t}\n\treturn expFactor.Value(v, rt.exp) / weightScaleFactor\n}\n\n// AddStats adds the given ResponseTimeStats to the current one.\nfunc (rt *ResponseTimeStats) AddStats(s *ResponseTimeStats) {\n\trt.setExp(s.exp)\n\tfor i, v := range s.stats {\n\t\trt.stats[i] += v\n\t}\n}\n\n// SubStats subtracts the given ResponseTimeStats from the current one.\nfunc (rt *ResponseTimeStats) SubStats(s *ResponseTimeStats) {\n\trt.setExp(s.exp)\n\tfor i, v := range s.stats {\n\t\tif v < rt.stats[i] {\n\t\t\trt.stats[i] -= v\n\t\t} else {\n\t\t\trt.stats[i] = 0\n\t\t}\n\t}\n}\n\n// Timeout suggests a timeout value based on the previous distribution. The parameter\n// is the desired rate of timeouts assuming a similar distribution in the future.\n// Note that the actual timeout should have a sensible minimum bound so that operating\n// under ideal working conditions for a long time (for example, using a local server\n// with very low response times) will not make it very hard for the system to accommodate\n// longer response times in the future.\nfunc (rt ResponseTimeStats) Timeout(failRatio float64) time.Duration {\n\tvar sum uint64\n\tfor _, v := range rt.stats {\n\t\tsum += v\n\t}\n\ts := uint64(float64(sum) * failRatio)\n\ti := timeStatLength - 1\n\tfor i > 0 && s >= rt.stats[i] {\n\t\ts -= rt.stats[i]\n\t\ti--\n\t}\n\tr := float64(i) + 0.5\n\tif rt.stats[i] > 0 {\n\t\tr -= float64(s) / float64(rt.stats[i])\n\t}\n\tif r < 0 {\n\t\tr = 0\n\t}\n\tth := StatScaleToTime(r)\n\tif th > maxResponseTime {\n\t\tth = maxResponseTime\n\t}\n\treturn th\n}\n\n// RtDistribution represents a distribution as a series of (X, Y) chart coordinates,\n// where the X axis is the response time in seconds while the Y axis is the amount of\n// service value received with a response time close to the X coordinate.\ntype RtDistribution [timeStatLength][2]float64\n\n// Distribution returns a RtDistribution, optionally normalized to a sum of 1.\nfunc (rt ResponseTimeStats) Distribution(normalized bool, expFactor utils.ExpirationFactor) (res RtDistribution) {\n\tvar mul float64\n\tif normalized {\n\t\tvar sum uint64\n\t\tfor _, v := range rt.stats {\n\t\t\tsum += v\n\t\t}\n\t\tif sum > 0 {\n\t\t\tmul = 1 / float64(sum)\n\t\t}\n\t} else {\n\t\tmul = expFactor.Value(float64(1)/weightScaleFactor, rt.exp)\n\t}\n\tfor i, v := range rt.stats {\n\t\tres[i][0] = float64(StatScaleToTime(float64(i))) / float64(time.Second)\n\t\tres[i][1] = float64(v) * mul\n\t}\n\treturn\n}\n"
  },
  {
    "path": "les/vflux/client/timestats_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage client\n\nimport (\n\t\"math\"\n\t\"math/rand\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n)\n\nfunc TestTransition(t *testing.T) {\n\tvar epsilon = 0.01\n\tvar cases = []time.Duration{\n\t\ttime.Millisecond, minResponseTime,\n\t\ttime.Second, time.Second * 5, maxResponseTime,\n\t}\n\tfor _, c := range cases {\n\t\tgot := StatScaleToTime(TimeToStatScale(c))\n\t\tif float64(got)*(1+epsilon) < float64(c) || float64(got)*(1-epsilon) > float64(c) {\n\t\t\tt.Fatalf(\"Failed to transition back\")\n\t\t}\n\t}\n\t// If the time is too large(exceeds the max response time.\n\tgot := StatScaleToTime(TimeToStatScale(2 * maxResponseTime))\n\tif float64(got)*(1+epsilon) < float64(maxResponseTime) || float64(got)*(1-epsilon) > float64(maxResponseTime) {\n\t\tt.Fatalf(\"Failed to transition back\")\n\t}\n}\n\nvar maxResponseWeights = TimeoutWeights(maxResponseTime)\n\nfunc TestValue(t *testing.T) {\n\tnoexp := utils.ExpirationFactor{Factor: 1}\n\tfor i := 0; i < 1000; i++ {\n\t\tmax := minResponseTime + time.Duration(rand.Int63n(int64(maxResponseTime-minResponseTime)))\n\t\tmin := minResponseTime + time.Duration(rand.Int63n(int64(max-minResponseTime)))\n\t\ttimeout := max/2 + time.Duration(rand.Int63n(int64(maxResponseTime-max/2)))\n\t\ts := makeRangeStats(min, max, 1000, noexp)\n\t\tvalue := s.Value(TimeoutWeights(timeout), noexp)\n\t\t// calculate the average weight (the average of the given range of the half cosine\n\t\t// weight function).\n\t\tminx := math.Pi / 2 * float64(min) / float64(timeout)\n\t\tmaxx := math.Pi / 2 * float64(max) / float64(timeout)\n\t\tavgWeight := (math.Sin(maxx) - math.Sin(minx)) / (maxx - minx)\n\t\texpv := 1000 * avgWeight\n\t\tif expv < 0 {\n\t\t\texpv = 0\n\t\t}\n\t\tif value < expv-10 || value > expv+10 {\n\t\t\tt.Errorf(\"Value failed (expected %v, got %v)\", expv, value)\n\t\t}\n\t}\n}\n\nfunc TestAddSubExpire(t *testing.T) {\n\tvar (\n\t\tsum1, sum2                 ResponseTimeStats\n\t\tsum1ValueExp, sum2ValueExp float64\n\t\tlogOffset                  utils.Fixed64\n\t)\n\tfor i := 0; i < 1000; i++ {\n\t\texp := utils.ExpFactor(logOffset)\n\t\tmax := minResponseTime + time.Duration(rand.Int63n(int64(maxResponseTime-minResponseTime)))\n\t\tmin := minResponseTime + time.Duration(rand.Int63n(int64(max-minResponseTime)))\n\t\ts := makeRangeStats(min, max, 1000, exp)\n\t\tvalue := s.Value(maxResponseWeights, exp)\n\t\tsum1.AddStats(&s)\n\t\tsum1ValueExp += value\n\t\tif rand.Intn(2) == 1 {\n\t\t\tsum2.AddStats(&s)\n\t\t\tsum2ValueExp += value\n\t\t}\n\t\tlogOffset += utils.Float64ToFixed64(0.001 / math.Log(2))\n\t\tsum1ValueExp -= sum1ValueExp * 0.001\n\t\tsum2ValueExp -= sum2ValueExp * 0.001\n\t}\n\texp := utils.ExpFactor(logOffset)\n\tsum1Value := sum1.Value(maxResponseWeights, exp)\n\tif sum1Value < sum1ValueExp*0.99 || sum1Value > sum1ValueExp*1.01 {\n\t\tt.Errorf(\"sum1Value failed (expected %v, got %v)\", sum1ValueExp, sum1Value)\n\t}\n\tsum2Value := sum2.Value(maxResponseWeights, exp)\n\tif sum2Value < sum2ValueExp*0.99 || sum2Value > sum2ValueExp*1.01 {\n\t\tt.Errorf(\"sum2Value failed (expected %v, got %v)\", sum2ValueExp, sum2Value)\n\t}\n\tdiff := sum1\n\tdiff.SubStats(&sum2)\n\tdiffValue := diff.Value(maxResponseWeights, exp)\n\tdiffValueExp := sum1ValueExp - sum2ValueExp\n\tif diffValue < diffValueExp*0.99 || diffValue > diffValueExp*1.01 {\n\t\tt.Errorf(\"diffValue failed (expected %v, got %v)\", diffValueExp, diffValue)\n\t}\n}\n\nfunc TestTimeout(t *testing.T) {\n\ttestTimeoutRange(t, 0, time.Second)\n\ttestTimeoutRange(t, time.Second, time.Second*2)\n\ttestTimeoutRange(t, time.Second, maxResponseTime)\n}\n\nfunc testTimeoutRange(t *testing.T, min, max time.Duration) {\n\ts := makeRangeStats(min, max, 1000, utils.ExpirationFactor{Factor: 1})\n\tfor i := 2; i < 9; i++ {\n\t\tto := s.Timeout(float64(i) / 10)\n\t\texp := max - (max-min)*time.Duration(i)/10\n\t\ttol := (max - min) / 50\n\t\tif to < exp-tol || to > exp+tol {\n\t\t\tt.Errorf(\"Timeout failed (expected %v, got %v)\", exp, to)\n\t\t}\n\t}\n}\n\nfunc makeRangeStats(min, max time.Duration, amount float64, exp utils.ExpirationFactor) ResponseTimeStats {\n\tvar s ResponseTimeStats\n\tamount /= 1000\n\tfor i := 0; i < 1000; i++ {\n\t\ts.Add(min+(max-min)*time.Duration(i)/999, amount, exp)\n\t}\n\treturn s\n}\n"
  },
  {
    "path": "les/vflux/client/valuetracker.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage client\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\nconst (\n\tvtVersion  = 1 // database encoding format for ValueTracker\n\tnvtVersion = 1 // database encoding format for NodeValueTracker\n)\n\nvar (\n\tvtKey     = []byte(\"vt:\")\n\tvtNodeKey = []byte(\"vtNode:\")\n)\n\n// NodeValueTracker collects service value statistics for a specific server node\ntype NodeValueTracker struct {\n\tlock sync.Mutex\n\n\tvt                   *ValueTracker\n\trtStats, lastRtStats ResponseTimeStats\n\tlastTransfer         mclock.AbsTime\n\tbasket               serverBasket\n\treqCosts             []uint64\n\treqValues            *[]float64\n}\n\n// UpdateCosts updates the node value tracker's request cost table\nfunc (nv *NodeValueTracker) UpdateCosts(reqCosts []uint64) {\n\tnv.vt.lock.Lock()\n\tdefer nv.vt.lock.Unlock()\n\n\tnv.updateCosts(reqCosts, &nv.vt.refBasket.reqValues, nv.vt.refBasket.reqValueFactor(reqCosts))\n}\n\n// updateCosts updates the request cost table of the server. The request value factor\n// is also updated based on the given cost table and the current reference basket.\n// Note that the contents of the referenced reqValues slice will not change; a new\n// reference is passed if the values are updated by ValueTracker.\nfunc (nv *NodeValueTracker) updateCosts(reqCosts []uint64, reqValues *[]float64, rvFactor float64) {\n\tnv.lock.Lock()\n\tdefer nv.lock.Unlock()\n\n\tnv.reqCosts = reqCosts\n\tnv.reqValues = reqValues\n\tnv.basket.updateRvFactor(rvFactor)\n}\n\n// transferStats returns request basket and response time statistics that should be\n// added to the global statistics. The contents of the server's own request basket are\n// gradually transferred to the main reference basket and removed from the server basket\n// with the specified transfer rate.\n// The response time statistics are retained at both places and therefore the global\n// distribution is always the sum of the individual server distributions.\nfunc (nv *NodeValueTracker) transferStats(now mclock.AbsTime, transferRate float64) (requestBasket, ResponseTimeStats) {\n\tnv.lock.Lock()\n\tdefer nv.lock.Unlock()\n\n\tdt := now - nv.lastTransfer\n\tnv.lastTransfer = now\n\tif dt < 0 {\n\t\tdt = 0\n\t}\n\trecentRtStats := nv.rtStats\n\trecentRtStats.SubStats(&nv.lastRtStats)\n\tnv.lastRtStats = nv.rtStats\n\treturn nv.basket.transfer(-math.Expm1(-transferRate * float64(dt))), recentRtStats\n}\n\ntype ServedRequest struct {\n\tReqType, Amount uint32\n}\n\n// Served adds a served request to the node's statistics. An actual request may be composed\n// of one or more request types (service vector indices).\nfunc (nv *NodeValueTracker) Served(reqs []ServedRequest, respTime time.Duration) {\n\tnv.vt.statsExpLock.RLock()\n\texpFactor := nv.vt.statsExpFactor\n\tnv.vt.statsExpLock.RUnlock()\n\n\tnv.lock.Lock()\n\tdefer nv.lock.Unlock()\n\n\tvar value float64\n\tfor _, r := range reqs {\n\t\tnv.basket.add(r.ReqType, r.Amount, nv.reqCosts[r.ReqType]*uint64(r.Amount), expFactor)\n\t\tvalue += (*nv.reqValues)[r.ReqType] * float64(r.Amount)\n\t}\n\tnv.rtStats.Add(respTime, value, expFactor)\n}\n\n// RtStats returns the node's own response time distribution statistics\nfunc (nv *NodeValueTracker) RtStats() ResponseTimeStats {\n\tnv.lock.Lock()\n\tdefer nv.lock.Unlock()\n\n\treturn nv.rtStats\n}\n\n// ValueTracker coordinates service value calculation for individual servers and updates\n// global statistics\ntype ValueTracker struct {\n\tclock        mclock.Clock\n\tlock         sync.Mutex\n\tquit         chan chan struct{}\n\tdb           ethdb.KeyValueStore\n\tconnected    map[enode.ID]*NodeValueTracker\n\treqTypeCount int\n\n\trefBasket      referenceBasket\n\tmappings       [][]string\n\tcurrentMapping int\n\tinitRefBasket  requestBasket\n\trtStats        ResponseTimeStats\n\n\ttransferRate                 float64\n\tstatsExpLock                 sync.RWMutex\n\tstatsExpRate, offlineExpRate float64\n\tstatsExpirer                 utils.Expirer\n\tstatsExpFactor               utils.ExpirationFactor\n}\n\ntype valueTrackerEncV1 struct {\n\tMappings           [][]string\n\tRefBasketMapping   uint\n\tRefBasket          requestBasket\n\tRtStats            ResponseTimeStats\n\tExpOffset, SavedAt uint64\n}\n\ntype nodeValueTrackerEncV1 struct {\n\tRtStats             ResponseTimeStats\n\tServerBasketMapping uint\n\tServerBasket        requestBasket\n}\n\n// RequestInfo is an initializer structure for the service vector.\ntype RequestInfo struct {\n\t// Name identifies the request type and is used for re-mapping the service vector if necessary\n\tName string\n\t// InitAmount and InitValue are used to initialize the reference basket\n\tInitAmount, InitValue float64\n}\n\n// NewValueTracker creates a new ValueTracker and loads its previously saved state from\n// the database if possible.\nfunc NewValueTracker(db ethdb.KeyValueStore, clock mclock.Clock, reqInfo []RequestInfo, updatePeriod time.Duration, transferRate, statsExpRate, offlineExpRate float64) *ValueTracker {\n\tnow := clock.Now()\n\n\tinitRefBasket := requestBasket{items: make([]basketItem, len(reqInfo))}\n\tmapping := make([]string, len(reqInfo))\n\n\tvar sumAmount, sumValue float64\n\tfor _, req := range reqInfo {\n\t\tsumAmount += req.InitAmount\n\t\tsumValue += req.InitAmount * req.InitValue\n\t}\n\tscaleValues := sumAmount * basketFactor / sumValue\n\tfor i, req := range reqInfo {\n\t\tmapping[i] = req.Name\n\t\tinitRefBasket.items[i].amount = uint64(req.InitAmount * basketFactor)\n\t\tinitRefBasket.items[i].value = uint64(req.InitAmount * req.InitValue * scaleValues)\n\t}\n\n\tvt := &ValueTracker{\n\t\tclock:          clock,\n\t\tconnected:      make(map[enode.ID]*NodeValueTracker),\n\t\tquit:           make(chan chan struct{}),\n\t\tdb:             db,\n\t\treqTypeCount:   len(initRefBasket.items),\n\t\tinitRefBasket:  initRefBasket,\n\t\ttransferRate:   transferRate,\n\t\tstatsExpRate:   statsExpRate,\n\t\tofflineExpRate: offlineExpRate,\n\t}\n\tif vt.loadFromDb(mapping) != nil {\n\t\t// previous state not saved or invalid, init with default values\n\t\tvt.refBasket.basket = initRefBasket\n\t\tvt.mappings = [][]string{mapping}\n\t\tvt.currentMapping = 0\n\t}\n\tvt.statsExpirer.SetRate(now, statsExpRate)\n\tvt.refBasket.init(vt.reqTypeCount)\n\tvt.periodicUpdate()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-clock.After(updatePeriod):\n\t\t\t\tvt.lock.Lock()\n\t\t\t\tvt.periodicUpdate()\n\t\t\t\tvt.lock.Unlock()\n\t\t\tcase quit := <-vt.quit:\n\t\t\t\tclose(quit)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn vt\n}\n\n// StatsExpirer returns the statistics expirer so that other values can be expired\n// with the same rate as the service value statistics.\nfunc (vt *ValueTracker) StatsExpirer() *utils.Expirer {\n\treturn &vt.statsExpirer\n}\n\n// StatsExpirer returns the current expiration factor so that other values can be expired\n// with the same rate as the service value statistics.\nfunc (vt *ValueTracker) StatsExpFactor() utils.ExpirationFactor {\n\tvt.statsExpLock.RLock()\n\tdefer vt.statsExpLock.RUnlock()\n\n\treturn vt.statsExpFactor\n}\n\n// loadFromDb loads the value tracker's state from the database and converts saved\n// request basket index mapping if it does not match the specified index to name mapping.\nfunc (vt *ValueTracker) loadFromDb(mapping []string) error {\n\tenc, err := vt.db.Get(vtKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr := bytes.NewReader(enc)\n\tvar version uint\n\tif err := rlp.Decode(r, &version); err != nil {\n\t\tlog.Error(\"Decoding value tracker state failed\", \"err\", err)\n\t\treturn err\n\t}\n\tif version != vtVersion {\n\t\tlog.Error(\"Unknown ValueTracker version\", \"stored\", version, \"current\", nvtVersion)\n\t\treturn fmt.Errorf(\"Unknown ValueTracker version %d (current version is %d)\", version, vtVersion)\n\t}\n\tvar vte valueTrackerEncV1\n\tif err := rlp.Decode(r, &vte); err != nil {\n\t\tlog.Error(\"Decoding value tracker state failed\", \"err\", err)\n\t\treturn err\n\t}\n\tlogOffset := utils.Fixed64(vte.ExpOffset)\n\tdt := time.Now().UnixNano() - int64(vte.SavedAt)\n\tif dt > 0 {\n\t\tlogOffset += utils.Float64ToFixed64(float64(dt) * vt.offlineExpRate / math.Log(2))\n\t}\n\tvt.statsExpirer.SetLogOffset(vt.clock.Now(), logOffset)\n\tvt.rtStats = vte.RtStats\n\tvt.mappings = vte.Mappings\n\tvt.currentMapping = -1\nloop:\n\tfor i, m := range vt.mappings {\n\t\tif len(m) != len(mapping) {\n\t\t\tcontinue loop\n\t\t}\n\t\tfor j, s := range mapping {\n\t\t\tif m[j] != s {\n\t\t\t\tcontinue loop\n\t\t\t}\n\t\t}\n\t\tvt.currentMapping = i\n\t\tbreak\n\t}\n\tif vt.currentMapping == -1 {\n\t\tvt.currentMapping = len(vt.mappings)\n\t\tvt.mappings = append(vt.mappings, mapping)\n\t}\n\tif int(vte.RefBasketMapping) == vt.currentMapping {\n\t\tvt.refBasket.basket = vte.RefBasket\n\t} else {\n\t\tif vte.RefBasketMapping >= uint(len(vt.mappings)) {\n\t\t\tlog.Error(\"Unknown request basket mapping\", \"stored\", vte.RefBasketMapping, \"current\", vt.currentMapping)\n\t\t\treturn fmt.Errorf(\"Unknown request basket mapping %d (current version is %d)\", vte.RefBasketMapping, vt.currentMapping)\n\t\t}\n\t\tvt.refBasket.basket = vte.RefBasket.convertMapping(vt.mappings[vte.RefBasketMapping], mapping, vt.initRefBasket)\n\t}\n\treturn nil\n}\n\n// saveToDb saves the value tracker's state to the database\nfunc (vt *ValueTracker) saveToDb() {\n\tvte := valueTrackerEncV1{\n\t\tMappings:         vt.mappings,\n\t\tRefBasketMapping: uint(vt.currentMapping),\n\t\tRefBasket:        vt.refBasket.basket,\n\t\tRtStats:          vt.rtStats,\n\t\tExpOffset:        uint64(vt.statsExpirer.LogOffset(vt.clock.Now())),\n\t\tSavedAt:          uint64(time.Now().UnixNano()),\n\t}\n\tenc1, err := rlp.EncodeToBytes(uint(vtVersion))\n\tif err != nil {\n\t\tlog.Error(\"Encoding value tracker state failed\", \"err\", err)\n\t\treturn\n\t}\n\tenc2, err := rlp.EncodeToBytes(&vte)\n\tif err != nil {\n\t\tlog.Error(\"Encoding value tracker state failed\", \"err\", err)\n\t\treturn\n\t}\n\tif err := vt.db.Put(vtKey, append(enc1, enc2...)); err != nil {\n\t\tlog.Error(\"Saving value tracker state failed\", \"err\", err)\n\t}\n}\n\n// Stop saves the value tracker's state and each loaded node's individual state and\n// returns after shutting the internal goroutines down.\nfunc (vt *ValueTracker) Stop() {\n\tquit := make(chan struct{})\n\tvt.quit <- quit\n\t<-quit\n\tvt.lock.Lock()\n\tvt.periodicUpdate()\n\tfor id, nv := range vt.connected {\n\t\tvt.saveNode(id, nv)\n\t}\n\tvt.connected = nil\n\tvt.saveToDb()\n\tvt.lock.Unlock()\n}\n\n// Register adds a server node to the value tracker\nfunc (vt *ValueTracker) Register(id enode.ID) *NodeValueTracker {\n\tvt.lock.Lock()\n\tdefer vt.lock.Unlock()\n\n\tif vt.connected == nil {\n\t\t// ValueTracker has already been stopped\n\t\treturn nil\n\t}\n\tnv := vt.loadOrNewNode(id)\n\treqTypeCount := len(vt.refBasket.reqValues)\n\tnv.reqCosts = make([]uint64, reqTypeCount)\n\tnv.lastTransfer = vt.clock.Now()\n\tnv.reqValues = &vt.refBasket.reqValues\n\tnv.basket.init(reqTypeCount)\n\n\tvt.connected[id] = nv\n\treturn nv\n}\n\n// Unregister removes a server node from the value tracker\nfunc (vt *ValueTracker) Unregister(id enode.ID) {\n\tvt.lock.Lock()\n\tdefer vt.lock.Unlock()\n\n\tif nv := vt.connected[id]; nv != nil {\n\t\tvt.saveNode(id, nv)\n\t\tdelete(vt.connected, id)\n\t}\n}\n\n// GetNode returns an individual server node's value tracker. If it did not exist before\n// then a new node is created.\nfunc (vt *ValueTracker) GetNode(id enode.ID) *NodeValueTracker {\n\tvt.lock.Lock()\n\tdefer vt.lock.Unlock()\n\n\treturn vt.loadOrNewNode(id)\n}\n\n// loadOrNewNode returns an individual server node's value tracker. If it did not exist before\n// then a new node is created.\nfunc (vt *ValueTracker) loadOrNewNode(id enode.ID) *NodeValueTracker {\n\tif nv, ok := vt.connected[id]; ok {\n\t\treturn nv\n\t}\n\tnv := &NodeValueTracker{vt: vt, lastTransfer: vt.clock.Now()}\n\tenc, err := vt.db.Get(append(vtNodeKey, id[:]...))\n\tif err != nil {\n\t\treturn nv\n\t}\n\tr := bytes.NewReader(enc)\n\tvar version uint\n\tif err := rlp.Decode(r, &version); err != nil {\n\t\tlog.Error(\"Failed to decode node value tracker\", \"id\", id, \"err\", err)\n\t\treturn nv\n\t}\n\tif version != nvtVersion {\n\t\tlog.Error(\"Unknown NodeValueTracker version\", \"stored\", version, \"current\", nvtVersion)\n\t\treturn nv\n\t}\n\tvar nve nodeValueTrackerEncV1\n\tif err := rlp.Decode(r, &nve); err != nil {\n\t\tlog.Error(\"Failed to decode node value tracker\", \"id\", id, \"err\", err)\n\t\treturn nv\n\t}\n\tnv.rtStats = nve.RtStats\n\tnv.lastRtStats = nve.RtStats\n\tif int(nve.ServerBasketMapping) == vt.currentMapping {\n\t\tnv.basket.basket = nve.ServerBasket\n\t} else {\n\t\tif nve.ServerBasketMapping >= uint(len(vt.mappings)) {\n\t\t\tlog.Error(\"Unknown request basket mapping\", \"stored\", nve.ServerBasketMapping, \"current\", vt.currentMapping)\n\t\t\treturn nv\n\t\t}\n\t\tnv.basket.basket = nve.ServerBasket.convertMapping(vt.mappings[nve.ServerBasketMapping], vt.mappings[vt.currentMapping], vt.initRefBasket)\n\t}\n\treturn nv\n}\n\n// saveNode saves a server node's value tracker to the database\nfunc (vt *ValueTracker) saveNode(id enode.ID, nv *NodeValueTracker) {\n\trecentRtStats := nv.rtStats\n\trecentRtStats.SubStats(&nv.lastRtStats)\n\tvt.rtStats.AddStats(&recentRtStats)\n\tnv.lastRtStats = nv.rtStats\n\n\tnve := nodeValueTrackerEncV1{\n\t\tRtStats:             nv.rtStats,\n\t\tServerBasketMapping: uint(vt.currentMapping),\n\t\tServerBasket:        nv.basket.basket,\n\t}\n\tenc1, err := rlp.EncodeToBytes(uint(nvtVersion))\n\tif err != nil {\n\t\tlog.Error(\"Failed to encode service value information\", \"id\", id, \"err\", err)\n\t\treturn\n\t}\n\tenc2, err := rlp.EncodeToBytes(&nve)\n\tif err != nil {\n\t\tlog.Error(\"Failed to encode service value information\", \"id\", id, \"err\", err)\n\t\treturn\n\t}\n\tif err := vt.db.Put(append(vtNodeKey, id[:]...), append(enc1, enc2...)); err != nil {\n\t\tlog.Error(\"Failed to save service value information\", \"id\", id, \"err\", err)\n\t}\n}\n\n// RtStats returns the global response time distribution statistics\nfunc (vt *ValueTracker) RtStats() ResponseTimeStats {\n\tvt.lock.Lock()\n\tdefer vt.lock.Unlock()\n\n\tvt.periodicUpdate()\n\treturn vt.rtStats\n}\n\n// periodicUpdate transfers individual node data to the global statistics, normalizes\n// the reference basket and updates request values. The global state is also saved to\n// the database with each update.\nfunc (vt *ValueTracker) periodicUpdate() {\n\tnow := vt.clock.Now()\n\tvt.statsExpLock.Lock()\n\tvt.statsExpFactor = utils.ExpFactor(vt.statsExpirer.LogOffset(now))\n\tvt.statsExpLock.Unlock()\n\n\tfor _, nv := range vt.connected {\n\t\tbasket, rtStats := nv.transferStats(now, vt.transferRate)\n\t\tvt.refBasket.add(basket)\n\t\tvt.rtStats.AddStats(&rtStats)\n\t}\n\tvt.refBasket.normalize()\n\tvt.refBasket.updateReqValues()\n\tfor _, nv := range vt.connected {\n\t\tnv.updateCosts(nv.reqCosts, &vt.refBasket.reqValues, vt.refBasket.reqValueFactor(nv.reqCosts))\n\t}\n\tvt.saveToDb()\n}\n\ntype RequestStatsItem struct {\n\tName                string\n\tReqAmount, ReqValue float64\n}\n\n// RequestStats returns the current contents of the reference request basket, with\n// request values meaning average per request rather than total.\nfunc (vt *ValueTracker) RequestStats() []RequestStatsItem {\n\tvt.statsExpLock.RLock()\n\texpFactor := vt.statsExpFactor\n\tvt.statsExpLock.RUnlock()\n\tvt.lock.Lock()\n\tdefer vt.lock.Unlock()\n\n\tvt.periodicUpdate()\n\tres := make([]RequestStatsItem, len(vt.refBasket.basket.items))\n\tfor i, item := range vt.refBasket.basket.items {\n\t\tres[i].Name = vt.mappings[vt.currentMapping][i]\n\t\tres[i].ReqAmount = expFactor.Value(float64(item.amount)/basketFactor, vt.refBasket.basket.exp)\n\t\tres[i].ReqValue = vt.refBasket.reqValues[i]\n\t}\n\treturn res\n}\n"
  },
  {
    "path": "les/vflux/client/valuetracker_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage client\n\nimport (\n\t\"math\"\n\t\"math/rand\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/ethdb/memorydb\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n)\n\nconst (\n\ttestReqTypes  = 3\n\ttestNodeCount = 5\n\ttestReqCount  = 10000\n\ttestRounds    = 10\n)\n\nfunc TestValueTracker(t *testing.T) {\n\tdb := memorydb.New()\n\tclock := &mclock.Simulated{}\n\trequestList := make([]RequestInfo, testReqTypes)\n\trelPrices := make([]float64, testReqTypes)\n\ttotalAmount := make([]uint64, testReqTypes)\n\tfor i := range requestList {\n\t\trequestList[i] = RequestInfo{Name: \"testreq\" + strconv.Itoa(i), InitAmount: 1, InitValue: 1}\n\t\ttotalAmount[i] = 1\n\t\trelPrices[i] = rand.Float64() + 0.1\n\t}\n\tnodes := make([]*NodeValueTracker, testNodeCount)\n\tfor round := 0; round < testRounds; round++ {\n\t\tmakeRequests := round < testRounds-2\n\t\tuseExpiration := round == testRounds-1\n\t\tvar expRate float64\n\t\tif useExpiration {\n\t\t\texpRate = math.Log(2) / float64(time.Hour*100)\n\t\t}\n\n\t\tvt := NewValueTracker(db, clock, requestList, time.Minute, 1/float64(time.Hour), expRate, expRate)\n\t\tupdateCosts := func(i int) {\n\t\t\tcostList := make([]uint64, testReqTypes)\n\t\t\tbaseCost := rand.Float64()*10000000 + 100000\n\t\t\tfor j := range costList {\n\t\t\t\tcostList[j] = uint64(baseCost * relPrices[j])\n\t\t\t}\n\t\t\tnodes[i].UpdateCosts(costList)\n\t\t}\n\t\tfor i := range nodes {\n\t\t\tnodes[i] = vt.Register(enode.ID{byte(i)})\n\t\t\tupdateCosts(i)\n\t\t}\n\t\tif makeRequests {\n\t\t\tfor i := 0; i < testReqCount; i++ {\n\t\t\t\treqType := rand.Intn(testReqTypes)\n\t\t\t\treqAmount := rand.Intn(10) + 1\n\t\t\t\tnode := rand.Intn(testNodeCount)\n\t\t\t\trespTime := time.Duration((rand.Float64() + 1) * float64(time.Second) * float64(node+1) / testNodeCount)\n\t\t\t\ttotalAmount[reqType] += uint64(reqAmount)\n\t\t\t\tnodes[node].Served([]ServedRequest{{uint32(reqType), uint32(reqAmount)}}, respTime)\n\t\t\t\tclock.Run(time.Second)\n\t\t\t}\n\t\t} else {\n\t\t\tclock.Run(time.Hour * 100)\n\t\t\tif useExpiration {\n\t\t\t\tfor i, a := range totalAmount {\n\t\t\t\t\ttotalAmount[i] = a / 2\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tvt.Stop()\n\t\tvar sumrp, sumrv float64\n\t\tfor i, rp := range relPrices {\n\t\t\tsumrp += rp\n\t\t\tsumrv += vt.refBasket.reqValues[i]\n\t\t}\n\t\tfor i, rp := range relPrices {\n\t\t\tratio := vt.refBasket.reqValues[i] * sumrp / (rp * sumrv)\n\t\t\tif ratio < 0.99 || ratio > 1.01 {\n\t\t\t\tt.Errorf(\"reqValues (%v) does not match relPrices (%v)\", vt.refBasket.reqValues, relPrices)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\texp := utils.ExpFactor(vt.StatsExpirer().LogOffset(clock.Now()))\n\t\tbasketAmount := make([]uint64, testReqTypes)\n\t\tfor i, bi := range vt.refBasket.basket.items {\n\t\t\tbasketAmount[i] += uint64(exp.Value(float64(bi.amount), vt.refBasket.basket.exp))\n\t\t}\n\t\tif makeRequests {\n\t\t\t// if we did not make requests in this round then we expect all amounts to be\n\t\t\t// in the reference basket\n\t\t\tfor _, node := range nodes {\n\t\t\t\tfor i, bi := range node.basket.basket.items {\n\t\t\t\t\tbasketAmount[i] += uint64(exp.Value(float64(bi.amount), node.basket.basket.exp))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor i, a := range basketAmount {\n\t\t\tamount := a / basketFactor\n\t\t\tif amount+10 < totalAmount[i] || amount > totalAmount[i]+10 {\n\t\t\t\tt.Errorf(\"totalAmount[%d] mismatch in round %d (expected %d, got %d)\", i, round, totalAmount[i], amount)\n\t\t\t}\n\t\t}\n\t\tvar sumValue float64\n\t\tfor _, node := range nodes {\n\t\t\ts := node.RtStats()\n\t\t\tsumValue += s.Value(maxResponseWeights, exp)\n\t\t}\n\t\ts := vt.RtStats()\n\t\tmainValue := s.Value(maxResponseWeights, exp)\n\t\tif sumValue < mainValue-10 || sumValue > mainValue+10 {\n\t\t\tt.Errorf(\"Main rtStats value does not match sum of node rtStats values in round %d (main %v, sum %v)\", round, mainValue, sumValue)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "les/vflux/client/wrsiterator.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage client\n\nimport (\n\t\"sync\"\n\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n)\n\n// WrsIterator returns nodes from the specified selectable set with a weighted random\n// selection. Selection weights are provided by a callback function.\ntype WrsIterator struct {\n\tlock sync.Mutex\n\tcond *sync.Cond\n\n\tns       *nodestate.NodeStateMachine\n\twrs      *utils.WeightedRandomSelect\n\tnextNode *enode.Node\n\tclosed   bool\n}\n\n// NewWrsIterator creates a new WrsIterator. Nodes are selectable if they have all the required\n// and none of the disabled flags set. When a node is selected the selectedFlag is set which also\n// disables further selectability until it is removed or times out.\nfunc NewWrsIterator(ns *nodestate.NodeStateMachine, requireFlags, disableFlags nodestate.Flags, weightField nodestate.Field) *WrsIterator {\n\twfn := func(i interface{}) uint64 {\n\t\tn := ns.GetNode(i.(enode.ID))\n\t\tif n == nil {\n\t\t\treturn 0\n\t\t}\n\t\twt, _ := ns.GetField(n, weightField).(uint64)\n\t\treturn wt\n\t}\n\n\tw := &WrsIterator{\n\t\tns:  ns,\n\t\twrs: utils.NewWeightedRandomSelect(wfn),\n\t}\n\tw.cond = sync.NewCond(&w.lock)\n\n\tns.SubscribeField(weightField, func(n *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {\n\t\tif state.HasAll(requireFlags) && state.HasNone(disableFlags) {\n\t\t\tw.lock.Lock()\n\t\t\tw.wrs.Update(n.ID())\n\t\t\tw.lock.Unlock()\n\t\t\tw.cond.Signal()\n\t\t}\n\t})\n\n\tns.SubscribeState(requireFlags.Or(disableFlags), func(n *enode.Node, oldState, newState nodestate.Flags) {\n\t\toldMatch := oldState.HasAll(requireFlags) && oldState.HasNone(disableFlags)\n\t\tnewMatch := newState.HasAll(requireFlags) && newState.HasNone(disableFlags)\n\t\tif newMatch == oldMatch {\n\t\t\treturn\n\t\t}\n\n\t\tw.lock.Lock()\n\t\tif newMatch {\n\t\t\tw.wrs.Update(n.ID())\n\t\t} else {\n\t\t\tw.wrs.Remove(n.ID())\n\t\t}\n\t\tw.lock.Unlock()\n\t\tw.cond.Signal()\n\t})\n\treturn w\n}\n\n// Next selects the next node.\nfunc (w *WrsIterator) Next() bool {\n\tw.nextNode = w.chooseNode()\n\treturn w.nextNode != nil\n}\n\nfunc (w *WrsIterator) chooseNode() *enode.Node {\n\tw.lock.Lock()\n\tdefer w.lock.Unlock()\n\n\tfor {\n\t\tfor !w.closed && w.wrs.IsEmpty() {\n\t\t\tw.cond.Wait()\n\t\t}\n\t\tif w.closed {\n\t\t\treturn nil\n\t\t}\n\t\t// Choose the next node at random. Even though w.wrs is guaranteed\n\t\t// non-empty here, Choose might return nil if all items have weight\n\t\t// zero.\n\t\tif c := w.wrs.Choose(); c != nil {\n\t\t\tid := c.(enode.ID)\n\t\t\tw.wrs.Remove(id)\n\t\t\treturn w.ns.GetNode(id)\n\t\t}\n\t}\n\n}\n\n// Close ends the iterator.\nfunc (w *WrsIterator) Close() {\n\tw.lock.Lock()\n\tw.closed = true\n\tw.lock.Unlock()\n\tw.cond.Signal()\n}\n\n// Node returns the current node.\nfunc (w *WrsIterator) Node() *enode.Node {\n\tw.lock.Lock()\n\tdefer w.lock.Unlock()\n\treturn w.nextNode\n}\n"
  },
  {
    "path": "les/vflux/client/wrsiterator_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage client\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n)\n\nvar (\n\ttestSetup     = &nodestate.Setup{}\n\tsfTest1       = testSetup.NewFlag(\"test1\")\n\tsfTest2       = testSetup.NewFlag(\"test2\")\n\tsfTest3       = testSetup.NewFlag(\"test3\")\n\tsfTest4       = testSetup.NewFlag(\"test4\")\n\tsfiTestWeight = testSetup.NewField(\"nodeWeight\", reflect.TypeOf(uint64(0)))\n)\n\nconst iterTestNodeCount = 6\n\nfunc TestWrsIterator(t *testing.T) {\n\tns := nodestate.NewNodeStateMachine(nil, nil, &mclock.Simulated{}, testSetup)\n\tw := NewWrsIterator(ns, sfTest2, sfTest3.Or(sfTest4), sfiTestWeight)\n\tns.Start()\n\tfor i := 1; i <= iterTestNodeCount; i++ {\n\t\tns.SetState(testNode(i), sfTest1, nodestate.Flags{}, 0)\n\t\tns.SetField(testNode(i), sfiTestWeight, uint64(1))\n\t}\n\tnext := func() int {\n\t\tch := make(chan struct{})\n\t\tgo func() {\n\t\t\tw.Next()\n\t\t\tclose(ch)\n\t\t}()\n\t\tselect {\n\t\tcase <-ch:\n\t\tcase <-time.After(time.Second * 5):\n\t\t\tt.Fatalf(\"Iterator.Next() timeout\")\n\t\t}\n\t\tnode := w.Node()\n\t\tns.SetState(node, sfTest4, nodestate.Flags{}, 0)\n\t\treturn testNodeIndex(node.ID())\n\t}\n\tset := make(map[int]bool)\n\texpset := func() {\n\t\tfor len(set) > 0 {\n\t\t\tn := next()\n\t\t\tif !set[n] {\n\t\t\t\tt.Errorf(\"Item returned by iterator not in the expected set (got %d)\", n)\n\t\t\t}\n\t\t\tdelete(set, n)\n\t\t}\n\t}\n\n\tns.SetState(testNode(1), sfTest2, nodestate.Flags{}, 0)\n\tns.SetState(testNode(2), sfTest2, nodestate.Flags{}, 0)\n\tns.SetState(testNode(3), sfTest2, nodestate.Flags{}, 0)\n\tset[1] = true\n\tset[2] = true\n\tset[3] = true\n\texpset()\n\tns.SetState(testNode(4), sfTest2, nodestate.Flags{}, 0)\n\tns.SetState(testNode(5), sfTest2.Or(sfTest3), nodestate.Flags{}, 0)\n\tns.SetState(testNode(6), sfTest2, nodestate.Flags{}, 0)\n\tset[4] = true\n\tset[6] = true\n\texpset()\n\tns.SetField(testNode(2), sfiTestWeight, uint64(0))\n\tns.SetState(testNode(1), nodestate.Flags{}, sfTest4, 0)\n\tns.SetState(testNode(2), nodestate.Flags{}, sfTest4, 0)\n\tns.SetState(testNode(3), nodestate.Flags{}, sfTest4, 0)\n\tset[1] = true\n\tset[3] = true\n\texpset()\n\tns.SetField(testNode(2), sfiTestWeight, uint64(1))\n\tns.SetState(testNode(2), nodestate.Flags{}, sfTest2, 0)\n\tns.SetState(testNode(1), nodestate.Flags{}, sfTest4, 0)\n\tns.SetState(testNode(2), sfTest2, sfTest4, 0)\n\tns.SetState(testNode(3), nodestate.Flags{}, sfTest4, 0)\n\tset[1] = true\n\tset[2] = true\n\tset[3] = true\n\texpset()\n\tns.Stop()\n}\n"
  },
  {
    "path": "les/vflux/requests.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage vflux\n\nimport (\n\t\"errors\"\n\t\"math\"\n\t\"math/big\"\n\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\nvar ErrNoReply = errors.New(\"no reply for given request\")\n\nconst (\n\tMaxRequestLength    = 16 // max number of individual requests in a batch\n\tCapacityQueryName   = \"cq\"\n\tCapacityQueryMaxLen = 16\n)\n\ntype (\n\t// Request describes a single vflux request inside a batch. Service and request\n\t// type are identified by strings, parameters are RLP encoded.\n\tRequest struct {\n\t\tService, Name string\n\t\tParams        []byte\n\t}\n\t// Requests are a batch of vflux requests\n\tRequests []Request\n\n\t// Replies are the replies to a batch of requests\n\tReplies [][]byte\n\n\t// CapacityQueryReq is the encoding format of the capacity query\n\tCapacityQueryReq struct {\n\t\tBias      uint64 // seconds\n\t\tAddTokens []IntOrInf\n\t}\n\t// CapacityQueryReq is the encoding format of the response to the capacity query\n\tCapacityQueryReply []uint64\n)\n\n// Add encodes and adds a new request to the batch\nfunc (r *Requests) Add(service, name string, val interface{}) (int, error) {\n\tenc, err := rlp.EncodeToBytes(val)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\t*r = append(*r, Request{\n\t\tService: service,\n\t\tName:    name,\n\t\tParams:  enc,\n\t})\n\treturn len(*r) - 1, nil\n}\n\n// Get decodes the reply to the i-th request in the batch\nfunc (r Replies) Get(i int, val interface{}) error {\n\tif i < 0 || i >= len(r) {\n\t\treturn ErrNoReply\n\t}\n\treturn rlp.DecodeBytes(r[i], val)\n}\n\nconst (\n\tIntNonNegative = iota\n\tIntNegative\n\tIntPlusInf\n\tIntMinusInf\n)\n\n// IntOrInf is the encoding format for arbitrary length signed integers that can also\n// hold the values of +Inf or -Inf\ntype IntOrInf struct {\n\tType  uint8\n\tValue big.Int\n}\n\n// BigInt returns the value as a big.Int or panics if the value is infinity\nfunc (i *IntOrInf) BigInt() *big.Int {\n\tswitch i.Type {\n\tcase IntNonNegative:\n\t\treturn new(big.Int).Set(&i.Value)\n\tcase IntNegative:\n\t\treturn new(big.Int).Neg(&i.Value)\n\tcase IntPlusInf:\n\t\tpanic(nil) // caller should check Inf() before trying to convert to big.Int\n\tcase IntMinusInf:\n\t\tpanic(nil)\n\t}\n\treturn &big.Int{} // invalid type decodes to 0 value\n}\n\n// Inf returns 1 if the value is +Inf, -1 if it is -Inf, 0 otherwise\nfunc (i *IntOrInf) Inf() int {\n\tswitch i.Type {\n\tcase IntPlusInf:\n\t\treturn 1\n\tcase IntMinusInf:\n\t\treturn -1\n\t}\n\treturn 0 // invalid type decodes to 0 value\n}\n\n// Int64 limits the value between MinInt64 and MaxInt64 (even if it is +-Inf) and returns an int64 type\nfunc (i *IntOrInf) Int64() int64 {\n\tswitch i.Type {\n\tcase IntNonNegative:\n\t\tif i.Value.IsInt64() {\n\t\t\treturn i.Value.Int64()\n\t\t} else {\n\t\t\treturn math.MaxInt64\n\t\t}\n\tcase IntNegative:\n\t\tif i.Value.IsInt64() {\n\t\t\treturn -i.Value.Int64()\n\t\t} else {\n\t\t\treturn math.MinInt64\n\t\t}\n\tcase IntPlusInf:\n\t\treturn math.MaxInt64\n\tcase IntMinusInf:\n\t\treturn math.MinInt64\n\t}\n\treturn 0 // invalid type decodes to 0 value\n}\n\n// SetBigInt sets the value to the given big.Int\nfunc (i *IntOrInf) SetBigInt(v *big.Int) {\n\tif v.Sign() >= 0 {\n\t\ti.Type = IntNonNegative\n\t\ti.Value.Set(v)\n\t} else {\n\t\ti.Type = IntNegative\n\t\ti.Value.Neg(v)\n\t}\n}\n\n// SetInt64 sets the value to the given int64. Note that MaxInt64 translates to +Inf\n// while MinInt64 translates to -Inf.\nfunc (i *IntOrInf) SetInt64(v int64) {\n\tif v >= 0 {\n\t\tif v == math.MaxInt64 {\n\t\t\ti.Type = IntPlusInf\n\t\t} else {\n\t\t\ti.Type = IntNonNegative\n\t\t\ti.Value.SetInt64(v)\n\t\t}\n\t} else {\n\t\tif v == math.MinInt64 {\n\t\t\ti.Type = IntMinusInf\n\t\t} else {\n\t\t\ti.Type = IntNegative\n\t\t\ti.Value.SetInt64(-v)\n\t\t}\n\t}\n}\n\n// SetInf sets the value to +Inf or -Inf\nfunc (i *IntOrInf) SetInf(sign int) {\n\tif sign == 1 {\n\t\ti.Type = IntPlusInf\n\t} else {\n\t\ti.Type = IntMinusInf\n\t}\n}\n"
  },
  {
    "path": "les/vflux/server/balance.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage server\n\nimport (\n\t\"errors\"\n\t\"math\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n)\n\nvar errBalanceOverflow = errors.New(\"balance overflow\")\n\nconst maxBalance = math.MaxInt64 // maximum allowed balance value\n\nconst (\n\tbalanceCallbackUpdate = iota // called when priority drops below the last minimum estimate\n\tbalanceCallbackZero          // called when priority drops to zero (positive balance exhausted)\n\tbalanceCallbackCount         // total number of balance callbacks\n)\n\n// PriceFactors determine the pricing policy (may apply either to positive or\n// negative balances which may have different factors).\n// - TimeFactor is cost unit per nanosecond of connection time\n// - CapacityFactor is cost unit per nanosecond of connection time per 1000000 capacity\n// - RequestFactor is cost unit per request \"realCost\" unit\ntype PriceFactors struct {\n\tTimeFactor, CapacityFactor, RequestFactor float64\n}\n\n// timePrice returns the price of connection per nanosecond at the given capacity\nfunc (p PriceFactors) timePrice(cap uint64) float64 {\n\treturn p.TimeFactor + float64(cap)*p.CapacityFactor/1000000\n}\n\n// NodeBalance keeps track of the positive and negative balances of a connected\n// client and calculates actual and projected future priority values.\n// Implements nodePriority interface.\ntype NodeBalance struct {\n\tbt                               *BalanceTracker\n\tlock                             sync.RWMutex\n\tnode                             *enode.Node\n\tconnAddress                      string\n\tactive                           bool\n\tpriority                         bool\n\tcapacity                         uint64\n\tbalance                          balance\n\tposFactor, negFactor             PriceFactors\n\tsumReqCost                       uint64\n\tlastUpdate, nextUpdate, initTime mclock.AbsTime\n\tupdateEvent                      mclock.Timer\n\t// since only a limited and fixed number of callbacks are needed, they are\n\t// stored in a fixed size array ordered by priority threshold.\n\tcallbacks [balanceCallbackCount]balanceCallback\n\t// callbackIndex maps balanceCallback constants to callbacks array indexes (-1 if not active)\n\tcallbackIndex [balanceCallbackCount]int\n\tcallbackCount int // number of active callbacks\n}\n\n// balance represents a pair of positive and negative balances\ntype balance struct {\n\tpos, neg utils.ExpiredValue\n}\n\n// balanceCallback represents a single callback that is activated when client priority\n// reaches the given threshold\ntype balanceCallback struct {\n\tid        int\n\tthreshold int64\n\tcallback  func()\n}\n\n// GetBalance returns the current positive and negative balance.\nfunc (n *NodeBalance) GetBalance() (uint64, uint64) {\n\tn.lock.Lock()\n\tdefer n.lock.Unlock()\n\n\tnow := n.bt.clock.Now()\n\tn.updateBalance(now)\n\treturn n.balance.pos.Value(n.bt.posExp.LogOffset(now)), n.balance.neg.Value(n.bt.negExp.LogOffset(now))\n}\n\n// GetRawBalance returns the current positive and negative balance\n// but in the raw(expired value) format.\nfunc (n *NodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) {\n\tn.lock.Lock()\n\tdefer n.lock.Unlock()\n\n\tnow := n.bt.clock.Now()\n\tn.updateBalance(now)\n\treturn n.balance.pos, n.balance.neg\n}\n\n// AddBalance adds the given amount to the positive balance and returns the balance\n// before and after the operation. Exceeding maxBalance results in an error (balance is\n// unchanged) while adding a negative amount higher than the current balance results in\n// zero balance.\nfunc (n *NodeBalance) AddBalance(amount int64) (uint64, uint64, error) {\n\tvar (\n\t\terr      error\n\t\told, new uint64\n\t)\n\tn.bt.ns.Operation(func() {\n\t\tvar (\n\t\t\tcallbacks   []func()\n\t\t\tsetPriority bool\n\t\t)\n\t\tn.bt.updateTotalBalance(n, func() bool {\n\t\t\tnow := n.bt.clock.Now()\n\t\t\tn.updateBalance(now)\n\n\t\t\t// Ensure the given amount is valid to apply.\n\t\t\toffset := n.bt.posExp.LogOffset(now)\n\t\t\told = n.balance.pos.Value(offset)\n\t\t\tif amount > 0 && (amount > maxBalance || old > maxBalance-uint64(amount)) {\n\t\t\t\terr = errBalanceOverflow\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\t// Update the total positive balance counter.\n\t\t\tn.balance.pos.Add(amount, offset)\n\t\t\tcallbacks = n.checkCallbacks(now)\n\t\t\tsetPriority = n.checkPriorityStatus()\n\t\t\tnew = n.balance.pos.Value(offset)\n\t\t\tn.storeBalance(true, false)\n\t\t\treturn true\n\t\t})\n\t\tfor _, cb := range callbacks {\n\t\t\tcb()\n\t\t}\n\t\tif setPriority {\n\t\t\tn.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0)\n\t\t}\n\t\tn.signalPriorityUpdate()\n\t})\n\tif err != nil {\n\t\treturn old, old, err\n\t}\n\n\treturn old, new, nil\n}\n\n// SetBalance sets the positive and negative balance to the given values\nfunc (n *NodeBalance) SetBalance(pos, neg uint64) error {\n\tif pos > maxBalance || neg > maxBalance {\n\t\treturn errBalanceOverflow\n\t}\n\tn.bt.ns.Operation(func() {\n\t\tvar (\n\t\t\tcallbacks   []func()\n\t\t\tsetPriority bool\n\t\t)\n\t\tn.bt.updateTotalBalance(n, func() bool {\n\t\t\tnow := n.bt.clock.Now()\n\t\t\tn.updateBalance(now)\n\n\t\t\tvar pb, nb utils.ExpiredValue\n\t\t\tpb.Add(int64(pos), n.bt.posExp.LogOffset(now))\n\t\t\tnb.Add(int64(neg), n.bt.negExp.LogOffset(now))\n\t\t\tn.balance.pos = pb\n\t\t\tn.balance.neg = nb\n\t\t\tcallbacks = n.checkCallbacks(now)\n\t\t\tsetPriority = n.checkPriorityStatus()\n\t\t\tn.storeBalance(true, true)\n\t\t\treturn true\n\t\t})\n\t\tfor _, cb := range callbacks {\n\t\t\tcb()\n\t\t}\n\t\tif setPriority {\n\t\t\tn.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0)\n\t\t}\n\t\tn.signalPriorityUpdate()\n\t})\n\treturn nil\n}\n\n// RequestServed should be called after serving a request for the given peer\nfunc (n *NodeBalance) RequestServed(cost uint64) uint64 {\n\tn.lock.Lock()\n\tvar callbacks []func()\n\tdefer func() {\n\t\tn.lock.Unlock()\n\t\tif callbacks != nil {\n\t\t\tn.bt.ns.Operation(func() {\n\t\t\t\tfor _, cb := range callbacks {\n\t\t\t\t\tcb()\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}()\n\n\tnow := n.bt.clock.Now()\n\tn.updateBalance(now)\n\tfcost := float64(cost)\n\n\tposExp := n.bt.posExp.LogOffset(now)\n\tvar check bool\n\tif !n.balance.pos.IsZero() {\n\t\tif n.posFactor.RequestFactor != 0 {\n\t\t\tc := -int64(fcost * n.posFactor.RequestFactor)\n\t\t\tcc := n.balance.pos.Add(c, posExp)\n\t\t\tif c == cc {\n\t\t\t\tfcost = 0\n\t\t\t} else {\n\t\t\t\tfcost *= 1 - float64(cc)/float64(c)\n\t\t\t}\n\t\t\tcheck = true\n\t\t} else {\n\t\t\tfcost = 0\n\t\t}\n\t}\n\tif fcost > 0 {\n\t\tif n.negFactor.RequestFactor != 0 {\n\t\t\tn.balance.neg.Add(int64(fcost*n.negFactor.RequestFactor), n.bt.negExp.LogOffset(now))\n\t\t\tcheck = true\n\t\t}\n\t}\n\tif check {\n\t\tcallbacks = n.checkCallbacks(now)\n\t}\n\tn.sumReqCost += cost\n\treturn n.balance.pos.Value(posExp)\n}\n\n// Priority returns the actual priority based on the current balance\nfunc (n *NodeBalance) Priority(capacity uint64) int64 {\n\tn.lock.Lock()\n\tdefer n.lock.Unlock()\n\n\tn.updateBalance(n.bt.clock.Now())\n\treturn n.balanceToPriority(n.balance, capacity)\n}\n\n// EstMinPriority gives a lower estimate for the priority at a given time in the future.\n// An average request cost per time is assumed that is twice the average cost per time\n// in the current session.\n// If update is true then a priority callback is added that turns UpdateFlag on and off\n// in case the priority goes below the estimated minimum.\nfunc (n *NodeBalance) EstimatePriority(capacity uint64, addBalance int64, future, bias time.Duration, update bool) int64 {\n\tn.lock.Lock()\n\tdefer n.lock.Unlock()\n\n\tnow := n.bt.clock.Now()\n\tn.updateBalance(now)\n\tb := n.balance\n\tif addBalance != 0 {\n\t\toffset := n.bt.posExp.LogOffset(now)\n\t\told := n.balance.pos.Value(offset)\n\t\tif addBalance > 0 && (addBalance > maxBalance || old > maxBalance-uint64(addBalance)) {\n\t\t\tb.pos = utils.ExpiredValue{}\n\t\t\tb.pos.Add(maxBalance, offset)\n\t\t} else {\n\t\t\tb.pos.Add(addBalance, offset)\n\t\t}\n\t}\n\tif future > 0 {\n\t\tvar avgReqCost float64\n\t\tdt := time.Duration(n.lastUpdate - n.initTime)\n\t\tif dt > time.Second {\n\t\t\tavgReqCost = float64(n.sumReqCost) * 2 / float64(dt)\n\t\t}\n\t\tb = n.reducedBalance(b, now, future, capacity, avgReqCost)\n\t}\n\tif bias > 0 {\n\t\tb = n.reducedBalance(b, now+mclock.AbsTime(future), bias, capacity, 0)\n\t}\n\tpri := n.balanceToPriority(b, capacity)\n\tif update {\n\t\tn.addCallback(balanceCallbackUpdate, pri, n.signalPriorityUpdate)\n\t}\n\treturn pri\n}\n\n// PosBalanceMissing calculates the missing amount of positive balance in order to\n// connect at targetCapacity, stay connected for the given amount of time and then\n// still have a priority of targetPriority\nfunc (n *NodeBalance) PosBalanceMissing(targetPriority int64, targetCapacity uint64, after time.Duration) uint64 {\n\tn.lock.Lock()\n\tdefer n.lock.Unlock()\n\n\tnow := n.bt.clock.Now()\n\tif targetPriority < 0 {\n\t\ttimePrice := n.negFactor.timePrice(targetCapacity)\n\t\ttimeCost := uint64(float64(after) * timePrice)\n\t\tnegBalance := n.balance.neg.Value(n.bt.negExp.LogOffset(now))\n\t\tif timeCost+negBalance < uint64(-targetPriority) {\n\t\t\treturn 0\n\t\t}\n\t\tif uint64(-targetPriority) > negBalance && timePrice > 1e-100 {\n\t\t\tif negTime := time.Duration(float64(uint64(-targetPriority)-negBalance) / timePrice); negTime < after {\n\t\t\t\tafter -= negTime\n\t\t\t} else {\n\t\t\t\tafter = 0\n\t\t\t}\n\t\t}\n\t\ttargetPriority = 0\n\t}\n\ttimePrice := n.posFactor.timePrice(targetCapacity)\n\tposRequired := uint64(float64(targetPriority)*float64(targetCapacity)+float64(after)*timePrice) + 1\n\tif posRequired >= maxBalance {\n\t\treturn math.MaxUint64 // target not reachable\n\t}\n\tposBalance := n.balance.pos.Value(n.bt.posExp.LogOffset(now))\n\tif posRequired > posBalance {\n\t\treturn posRequired - posBalance\n\t}\n\treturn 0\n}\n\n// SetPriceFactors sets the price factors. TimeFactor is the price of a nanosecond of\n// connection while RequestFactor is the price of a request cost unit.\nfunc (n *NodeBalance) SetPriceFactors(posFactor, negFactor PriceFactors) {\n\tn.lock.Lock()\n\tnow := n.bt.clock.Now()\n\tn.updateBalance(now)\n\tn.posFactor, n.negFactor = posFactor, negFactor\n\tcallbacks := n.checkCallbacks(now)\n\tn.lock.Unlock()\n\tif callbacks != nil {\n\t\tn.bt.ns.Operation(func() {\n\t\t\tfor _, cb := range callbacks {\n\t\t\t\tcb()\n\t\t\t}\n\t\t})\n\t}\n}\n\n// GetPriceFactors returns the price factors\nfunc (n *NodeBalance) GetPriceFactors() (posFactor, negFactor PriceFactors) {\n\tn.lock.Lock()\n\tdefer n.lock.Unlock()\n\n\treturn n.posFactor, n.negFactor\n}\n\n// activate starts time/capacity cost deduction.\nfunc (n *NodeBalance) activate() {\n\tn.bt.updateTotalBalance(n, func() bool {\n\t\tif n.active {\n\t\t\treturn false\n\t\t}\n\t\tn.active = true\n\t\tn.lastUpdate = n.bt.clock.Now()\n\t\treturn true\n\t})\n}\n\n// deactivate stops time/capacity cost deduction and saves the balances in the database\nfunc (n *NodeBalance) deactivate() {\n\tn.bt.updateTotalBalance(n, func() bool {\n\t\tif !n.active {\n\t\t\treturn false\n\t\t}\n\t\tn.updateBalance(n.bt.clock.Now())\n\t\tif n.updateEvent != nil {\n\t\t\tn.updateEvent.Stop()\n\t\t\tn.updateEvent = nil\n\t\t}\n\t\tn.storeBalance(true, true)\n\t\tn.active = false\n\t\treturn true\n\t})\n}\n\n// updateBalance updates balance based on the time factor\nfunc (n *NodeBalance) updateBalance(now mclock.AbsTime) {\n\tif n.active && now > n.lastUpdate {\n\t\tn.balance = n.reducedBalance(n.balance, n.lastUpdate, time.Duration(now-n.lastUpdate), n.capacity, 0)\n\t\tn.lastUpdate = now\n\t}\n}\n\n// storeBalance stores the positive and/or negative balance of the node in the database\nfunc (n *NodeBalance) storeBalance(pos, neg bool) {\n\tif pos {\n\t\tn.bt.storeBalance(n.node.ID().Bytes(), false, n.balance.pos)\n\t}\n\tif neg {\n\t\tn.bt.storeBalance([]byte(n.connAddress), true, n.balance.neg)\n\t}\n}\n\n// addCallback sets up a one-time callback to be called when priority reaches\n// the threshold. If it has already reached the threshold the callback is called\n// immediately.\n// Note: should be called while n.lock is held\n// Note 2: the callback function runs inside a NodeStateMachine operation\nfunc (n *NodeBalance) addCallback(id int, threshold int64, callback func()) {\n\tn.removeCallback(id)\n\tidx := 0\n\tfor idx < n.callbackCount && threshold > n.callbacks[idx].threshold {\n\t\tidx++\n\t}\n\tfor i := n.callbackCount - 1; i >= idx; i-- {\n\t\tn.callbackIndex[n.callbacks[i].id]++\n\t\tn.callbacks[i+1] = n.callbacks[i]\n\t}\n\tn.callbackCount++\n\tn.callbackIndex[id] = idx\n\tn.callbacks[idx] = balanceCallback{id, threshold, callback}\n\tnow := n.bt.clock.Now()\n\tn.updateBalance(now)\n\tn.scheduleCheck(now)\n}\n\n// removeCallback removes the given callback and returns true if it was active\n// Note: should be called while n.lock is held\nfunc (n *NodeBalance) removeCallback(id int) bool {\n\tidx := n.callbackIndex[id]\n\tif idx == -1 {\n\t\treturn false\n\t}\n\tn.callbackIndex[id] = -1\n\tfor i := idx; i < n.callbackCount-1; i++ {\n\t\tn.callbackIndex[n.callbacks[i+1].id]--\n\t\tn.callbacks[i] = n.callbacks[i+1]\n\t}\n\tn.callbackCount--\n\treturn true\n}\n\n// checkCallbacks checks whether the threshold of any of the active callbacks\n// have been reached and returns triggered callbacks.\n// Note: checkCallbacks assumes that the balance has been recently updated.\nfunc (n *NodeBalance) checkCallbacks(now mclock.AbsTime) (callbacks []func()) {\n\tif n.callbackCount == 0 || n.capacity == 0 {\n\t\treturn\n\t}\n\tpri := n.balanceToPriority(n.balance, n.capacity)\n\tfor n.callbackCount != 0 && n.callbacks[n.callbackCount-1].threshold >= pri {\n\t\tn.callbackCount--\n\t\tn.callbackIndex[n.callbacks[n.callbackCount].id] = -1\n\t\tcallbacks = append(callbacks, n.callbacks[n.callbackCount].callback)\n\t}\n\tn.scheduleCheck(now)\n\treturn\n}\n\n// scheduleCheck sets up or updates a scheduled event to ensure that it will be called\n// again just after the next threshold has been reached.\nfunc (n *NodeBalance) scheduleCheck(now mclock.AbsTime) {\n\tif n.callbackCount != 0 {\n\t\td, ok := n.timeUntil(n.callbacks[n.callbackCount-1].threshold)\n\t\tif !ok {\n\t\t\tn.nextUpdate = 0\n\t\t\tn.updateAfter(0)\n\t\t\treturn\n\t\t}\n\t\tif n.nextUpdate == 0 || n.nextUpdate > now+mclock.AbsTime(d) {\n\t\t\tif d > time.Second {\n\t\t\t\t// Note: if the scheduled update is not in the very near future then we\n\t\t\t\t// schedule the update a bit earlier. This way we do need to update a few\n\t\t\t\t// extra times but don't need to reschedule every time a processed request\n\t\t\t\t// brings the expected firing time a little bit closer.\n\t\t\t\td = ((d - time.Second) * 7 / 8) + time.Second\n\t\t\t}\n\t\t\tn.nextUpdate = now + mclock.AbsTime(d)\n\t\t\tn.updateAfter(d)\n\t\t}\n\t} else {\n\t\tn.nextUpdate = 0\n\t\tn.updateAfter(0)\n\t}\n}\n\n// updateAfter schedules a balance update and callback check in the future\nfunc (n *NodeBalance) updateAfter(dt time.Duration) {\n\tif n.updateEvent == nil || n.updateEvent.Stop() {\n\t\tif dt == 0 {\n\t\t\tn.updateEvent = nil\n\t\t} else {\n\t\t\tn.updateEvent = n.bt.clock.AfterFunc(dt, func() {\n\t\t\t\tvar callbacks []func()\n\t\t\t\tn.lock.Lock()\n\t\t\t\tif n.callbackCount != 0 {\n\t\t\t\t\tnow := n.bt.clock.Now()\n\t\t\t\t\tn.updateBalance(now)\n\t\t\t\t\tcallbacks = n.checkCallbacks(now)\n\t\t\t\t}\n\t\t\t\tn.lock.Unlock()\n\t\t\t\tif callbacks != nil {\n\t\t\t\t\tn.bt.ns.Operation(func() {\n\t\t\t\t\t\tfor _, cb := range callbacks {\n\t\t\t\t\t\t\tcb()\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\n// balanceExhausted should be called when the positive balance is exhausted (priority goes to zero/negative)\n// Note: this function should run inside a NodeStateMachine operation\nfunc (n *NodeBalance) balanceExhausted() {\n\tn.lock.Lock()\n\tn.storeBalance(true, false)\n\tn.priority = false\n\tn.lock.Unlock()\n\tn.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.PriorityFlag, 0)\n}\n\n// checkPriorityStatus checks whether the node has gained priority status and sets the priority\n// callback and flag if necessary. It assumes that the balance has been recently updated.\n// Note that the priority flag has to be set by the caller after the mutex has been released.\nfunc (n *NodeBalance) checkPriorityStatus() bool {\n\tif !n.priority && !n.balance.pos.IsZero() {\n\t\tn.priority = true\n\t\tn.addCallback(balanceCallbackZero, 0, func() { n.balanceExhausted() })\n\t\treturn true\n\t}\n\treturn false\n}\n\n// signalPriorityUpdate signals that the priority fell below the previous minimum estimate\n// Note: this function should run inside a NodeStateMachine operation\nfunc (n *NodeBalance) signalPriorityUpdate() {\n\tn.bt.ns.SetStateSub(n.node, n.bt.UpdateFlag, nodestate.Flags{}, 0)\n\tn.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.UpdateFlag, 0)\n}\n\n// setCapacity updates the capacity value used for priority calculation\n// Note: capacity should never be zero\n// Note 2: this function should run inside a NodeStateMachine operation\nfunc (n *NodeBalance) setCapacity(capacity uint64) {\n\tn.lock.Lock()\n\tnow := n.bt.clock.Now()\n\tn.updateBalance(now)\n\tn.capacity = capacity\n\tcallbacks := n.checkCallbacks(now)\n\tn.lock.Unlock()\n\tfor _, cb := range callbacks {\n\t\tcb()\n\t}\n}\n\n// balanceToPriority converts a balance to a priority value. Lower priority means\n// first to disconnect. Positive balance translates to positive priority. If positive\n// balance is zero then negative balance translates to a negative priority.\nfunc (n *NodeBalance) balanceToPriority(b balance, capacity uint64) int64 {\n\tif !b.pos.IsZero() {\n\t\treturn int64(b.pos.Value(n.bt.posExp.LogOffset(n.bt.clock.Now())) / capacity)\n\t}\n\treturn -int64(b.neg.Value(n.bt.negExp.LogOffset(n.bt.clock.Now())))\n}\n\n// reducedBalance estimates the reduced balance at a given time in the fututre based\n// on the given balance, the time factor and an estimated average request cost per time ratio\nfunc (n *NodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Duration, capacity uint64, avgReqCost float64) balance {\n\t// since the costs are applied continuously during the dt time period we calculate\n\t// the expiration offset at the middle of the period\n\tat := start + mclock.AbsTime(dt/2)\n\tdtf := float64(dt)\n\tif !b.pos.IsZero() {\n\t\tfactor := n.posFactor.timePrice(capacity) + n.posFactor.RequestFactor*avgReqCost\n\t\tdiff := -int64(dtf * factor)\n\t\tdd := b.pos.Add(diff, n.bt.posExp.LogOffset(at))\n\t\tif dd == diff {\n\t\t\tdtf = 0\n\t\t} else {\n\t\t\tdtf += float64(dd) / factor\n\t\t}\n\t}\n\tif dt > 0 {\n\t\tfactor := n.negFactor.timePrice(capacity) + n.negFactor.RequestFactor*avgReqCost\n\t\tb.neg.Add(int64(dtf*factor), n.bt.negExp.LogOffset(at))\n\t}\n\treturn b\n}\n\n// timeUntil calculates the remaining time needed to reach a given priority level\n// assuming that no requests are processed until then. If the given level is never\n// reached then (0, false) is returned.\n// Note: the function assumes that the balance has been recently updated and\n// calculates the time starting from the last update.\nfunc (n *NodeBalance) timeUntil(priority int64) (time.Duration, bool) {\n\tnow := n.bt.clock.Now()\n\tvar dt float64\n\tif !n.balance.pos.IsZero() {\n\t\tposBalance := n.balance.pos.Value(n.bt.posExp.LogOffset(now))\n\t\ttimePrice := n.posFactor.timePrice(n.capacity)\n\t\tif timePrice < 1e-100 {\n\t\t\treturn 0, false\n\t\t}\n\t\tif priority > 0 {\n\t\t\tnewBalance := uint64(priority) * n.capacity\n\t\t\tif newBalance > posBalance {\n\t\t\t\treturn 0, false\n\t\t\t}\n\t\t\tdt = float64(posBalance-newBalance) / timePrice\n\t\t\treturn time.Duration(dt), true\n\t\t} else {\n\t\t\tdt = float64(posBalance) / timePrice\n\t\t}\n\t} else {\n\t\tif priority > 0 {\n\t\t\treturn 0, false\n\t\t}\n\t}\n\t// if we have a positive balance then dt equals the time needed to get it to zero\n\tnegBalance := n.balance.neg.Value(n.bt.negExp.LogOffset(now))\n\ttimePrice := n.negFactor.timePrice(n.capacity)\n\tif uint64(-priority) > negBalance {\n\t\tif timePrice < 1e-100 {\n\t\t\treturn 0, false\n\t\t}\n\t\tdt += float64(uint64(-priority)-negBalance) / timePrice\n\t}\n\treturn time.Duration(dt), true\n}\n"
  },
  {
    "path": "les/vflux/server/balance_test.go",
    "content": "// Copyright 2019 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage server\n\nimport (\n\t\"math\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/ethdb/memorydb\"\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/enr\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n)\n\nvar (\n\ttestFlag     = testSetup.NewFlag(\"testFlag\")\n\tconnAddrFlag = testSetup.NewField(\"connAddr\", reflect.TypeOf(\"\"))\n\tbtTestSetup  = NewBalanceTrackerSetup(testSetup)\n)\n\nfunc init() {\n\tbtTestSetup.Connect(connAddrFlag, ppTestSetup.CapacityField)\n}\n\ntype zeroExpirer struct{}\n\nfunc (z zeroExpirer) SetRate(now mclock.AbsTime, rate float64)                 {}\nfunc (z zeroExpirer) SetLogOffset(now mclock.AbsTime, logOffset utils.Fixed64) {}\nfunc (z zeroExpirer) LogOffset(now mclock.AbsTime) utils.Fixed64               { return 0 }\n\ntype balanceTestSetup struct {\n\tclock *mclock.Simulated\n\tns    *nodestate.NodeStateMachine\n\tbt    *BalanceTracker\n}\n\nfunc newBalanceTestSetup() *balanceTestSetup {\n\tclock := &mclock.Simulated{}\n\tns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup)\n\tdb := memorydb.New()\n\tbt := NewBalanceTracker(ns, btTestSetup, db, clock, zeroExpirer{}, zeroExpirer{})\n\tns.Start()\n\treturn &balanceTestSetup{\n\t\tclock: clock,\n\t\tns:    ns,\n\t\tbt:    bt,\n\t}\n}\n\nfunc (b *balanceTestSetup) newNode(capacity uint64) *NodeBalance {\n\tnode := enode.SignNull(&enr.Record{}, enode.ID{})\n\tb.ns.SetState(node, testFlag, nodestate.Flags{}, 0)\n\tb.ns.SetField(node, btTestSetup.connAddressField, \"\")\n\tif capacity != 0 {\n\t\tb.ns.SetField(node, ppTestSetup.CapacityField, capacity)\n\t}\n\tn, _ := b.ns.GetField(node, btTestSetup.BalanceField).(*NodeBalance)\n\treturn n\n}\n\nfunc (b *balanceTestSetup) stop() {\n\tb.bt.Stop()\n\tb.ns.Stop()\n}\n\nfunc TestAddBalance(t *testing.T) {\n\tb := newBalanceTestSetup()\n\tdefer b.stop()\n\n\tnode := b.newNode(1000)\n\tvar inputs = []struct {\n\t\tdelta     int64\n\t\texpect    [2]uint64\n\t\ttotal     uint64\n\t\texpectErr bool\n\t}{\n\t\t{100, [2]uint64{0, 100}, 100, false},\n\t\t{-100, [2]uint64{100, 0}, 0, false},\n\t\t{-100, [2]uint64{0, 0}, 0, false},\n\t\t{1, [2]uint64{0, 1}, 1, false},\n\t\t{maxBalance, [2]uint64{0, 0}, 0, true},\n\t}\n\tfor _, i := range inputs {\n\t\told, new, err := node.AddBalance(i.delta)\n\t\tif i.expectErr {\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"Expect get error but nil\")\n\t\t\t}\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\tt.Fatalf(\"Expect get no error but %v\", err)\n\t\t}\n\t\tif old != i.expect[0] || new != i.expect[1] {\n\t\t\tt.Fatalf(\"Positive balance mismatch, got %v -> %v\", old, new)\n\t\t}\n\t\tif b.bt.TotalTokenAmount() != i.total {\n\t\t\tt.Fatalf(\"Total positive balance mismatch, want %v, got %v\", i.total, b.bt.TotalTokenAmount())\n\t\t}\n\t}\n}\n\nfunc TestSetBalance(t *testing.T) {\n\tb := newBalanceTestSetup()\n\tdefer b.stop()\n\tnode := b.newNode(1000)\n\n\tvar inputs = []struct {\n\t\tpos, neg uint64\n\t}{\n\t\t{1000, 0},\n\t\t{0, 1000},\n\t\t{1000, 1000},\n\t}\n\n\tfor _, i := range inputs {\n\t\tnode.SetBalance(i.pos, i.neg)\n\t\tpos, neg := node.GetBalance()\n\t\tif pos != i.pos {\n\t\t\tt.Fatalf(\"Positive balance mismatch, want %v, got %v\", i.pos, pos)\n\t\t}\n\t\tif neg != i.neg {\n\t\t\tt.Fatalf(\"Negative balance mismatch, want %v, got %v\", i.neg, neg)\n\t\t}\n\t}\n}\n\nfunc TestBalanceTimeCost(t *testing.T) {\n\tb := newBalanceTestSetup()\n\tdefer b.stop()\n\tnode := b.newNode(1000)\n\n\tb.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))\n\tnode.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})\n\tnode.SetBalance(uint64(time.Minute), 0) // 1 minute time allowance\n\n\tvar inputs = []struct {\n\t\trunTime time.Duration\n\t\texpPos  uint64\n\t\texpNeg  uint64\n\t}{\n\t\t{time.Second, uint64(time.Second * 59), 0},\n\t\t{0, uint64(time.Second * 59), 0},\n\t\t{time.Second * 59, 0, 0},\n\t\t{time.Second, 0, uint64(time.Second)},\n\t}\n\tfor _, i := range inputs {\n\t\tb.clock.Run(i.runTime)\n\t\tif pos, _ := node.GetBalance(); pos != i.expPos {\n\t\t\tt.Fatalf(\"Positive balance mismatch, want %v, got %v\", i.expPos, pos)\n\t\t}\n\t\tif _, neg := node.GetBalance(); neg != i.expNeg {\n\t\t\tt.Fatalf(\"Negative balance mismatch, want %v, got %v\", i.expNeg, neg)\n\t\t}\n\t}\n\n\tnode.SetBalance(uint64(time.Minute), 0) // Refill 1 minute time allowance\n\tfor _, i := range inputs {\n\t\tb.clock.Run(i.runTime)\n\t\tif pos, _ := node.GetBalance(); pos != i.expPos {\n\t\t\tt.Fatalf(\"Positive balance mismatch, want %v, got %v\", i.expPos, pos)\n\t\t}\n\t\tif _, neg := node.GetBalance(); neg != i.expNeg {\n\t\t\tt.Fatalf(\"Negative balance mismatch, want %v, got %v\", i.expNeg, neg)\n\t\t}\n\t}\n}\n\nfunc TestBalanceReqCost(t *testing.T) {\n\tb := newBalanceTestSetup()\n\tdefer b.stop()\n\tnode := b.newNode(1000)\n\tnode.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})\n\n\tb.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))\n\tnode.SetBalance(uint64(time.Minute), 0) // 1 minute time serving time allowance\n\tvar inputs = []struct {\n\t\treqCost uint64\n\t\texpPos  uint64\n\t\texpNeg  uint64\n\t}{\n\t\t{uint64(time.Second), uint64(time.Second * 59), 0},\n\t\t{0, uint64(time.Second * 59), 0},\n\t\t{uint64(time.Second * 59), 0, 0},\n\t\t{uint64(time.Second), 0, uint64(time.Second)},\n\t}\n\tfor _, i := range inputs {\n\t\tnode.RequestServed(i.reqCost)\n\t\tif pos, _ := node.GetBalance(); pos != i.expPos {\n\t\t\tt.Fatalf(\"Positive balance mismatch, want %v, got %v\", i.expPos, pos)\n\t\t}\n\t\tif _, neg := node.GetBalance(); neg != i.expNeg {\n\t\t\tt.Fatalf(\"Negative balance mismatch, want %v, got %v\", i.expNeg, neg)\n\t\t}\n\t}\n}\n\nfunc TestBalanceToPriority(t *testing.T) {\n\tb := newBalanceTestSetup()\n\tdefer b.stop()\n\tnode := b.newNode(1000)\n\tnode.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})\n\n\tvar inputs = []struct {\n\t\tpos      uint64\n\t\tneg      uint64\n\t\tpriority int64\n\t}{\n\t\t{1000, 0, 1},\n\t\t{2000, 0, 2}, // Higher balance, higher priority value\n\t\t{0, 0, 0},\n\t\t{0, 1000, -1000},\n\t}\n\tfor _, i := range inputs {\n\t\tnode.SetBalance(i.pos, i.neg)\n\t\tpriority := node.Priority(1000)\n\t\tif priority != i.priority {\n\t\t\tt.Fatalf(\"Priority mismatch, want %v, got %v\", i.priority, priority)\n\t\t}\n\t}\n}\n\nfunc TestEstimatedPriority(t *testing.T) {\n\tb := newBalanceTestSetup()\n\tdefer b.stop()\n\tnode := b.newNode(1000000000)\n\tnode.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})\n\n\tb.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))\n\tnode.SetBalance(uint64(time.Minute), 0)\n\tvar inputs = []struct {\n\t\trunTime    time.Duration // time cost\n\t\tfutureTime time.Duration // diff of future time\n\t\treqCost    uint64        // single request cost\n\t\tpriority   int64         // expected estimated priority\n\t}{\n\t\t{time.Second, time.Second, 0, 58},\n\t\t{0, time.Second, 0, 58},\n\n\t\t// 2 seconds time cost, 1 second estimated time cost, 10^9 request cost,\n\t\t// 10^9 estimated request cost per second.\n\t\t{time.Second, time.Second, 1000000000, 55},\n\n\t\t// 3 seconds time cost, 3 second estimated time cost, 10^9*2 request cost,\n\t\t// 4*10^9 estimated request cost.\n\t\t{time.Second, 3 * time.Second, 1000000000, 48},\n\n\t\t// All positive balance is used up\n\t\t{time.Second * 55, 0, 0, 0},\n\n\t\t// 1 minute estimated time cost, 4/58 * 10^9 estimated request cost per sec.\n\t\t{0, time.Minute, 0, -int64(time.Minute) - int64(time.Second)*120/29},\n\t}\n\tfor _, i := range inputs {\n\t\tb.clock.Run(i.runTime)\n\t\tnode.RequestServed(i.reqCost)\n\t\tpriority := node.EstimatePriority(1000000000, 0, i.futureTime, 0, false)\n\t\tif priority != i.priority {\n\t\t\tt.Fatalf(\"Estimated priority mismatch, want %v, got %v\", i.priority, priority)\n\t\t}\n\t}\n}\n\nfunc TestPosBalanceMissing(t *testing.T) {\n\tb := newBalanceTestSetup()\n\tdefer b.stop()\n\tnode := b.newNode(1000)\n\tnode.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})\n\n\tb.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))\n\tvar inputs = []struct {\n\t\tpos, neg uint64\n\t\tpriority int64\n\t\tcap      uint64\n\t\tafter    time.Duration\n\t\texpect   uint64\n\t}{\n\t\t{uint64(time.Second * 2), 0, 0, 1, time.Second, 0},\n\t\t{uint64(time.Second * 2), 0, 0, 1, 2 * time.Second, 1},\n\t\t{uint64(time.Second * 2), 0, int64(time.Second), 1, 2 * time.Second, uint64(time.Second) + 1},\n\t\t{0, 0, int64(time.Second), 1, time.Second, uint64(2*time.Second) + 1},\n\t\t{0, 0, -int64(time.Second), 1, time.Second, 1},\n\t}\n\tfor _, i := range inputs {\n\t\tnode.SetBalance(i.pos, i.neg)\n\t\tgot := node.PosBalanceMissing(i.priority, i.cap, i.after)\n\t\tif got != i.expect {\n\t\t\tt.Fatalf(\"Missing budget mismatch, want %v, got %v\", i.expect, got)\n\t\t}\n\t}\n}\n\nfunc TestPostiveBalanceCounting(t *testing.T) {\n\tb := newBalanceTestSetup()\n\tdefer b.stop()\n\n\tvar nodes []*NodeBalance\n\tfor i := 0; i < 100; i += 1 {\n\t\tnode := b.newNode(1000000)\n\t\tnode.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})\n\t\tnodes = append(nodes, node)\n\t}\n\n\t// Allocate service token\n\tvar sum uint64\n\tfor i := 0; i < 100; i += 1 {\n\t\tamount := int64(rand.Intn(100) + 100)\n\t\tnodes[i].AddBalance(amount)\n\t\tsum += uint64(amount)\n\t}\n\tif b.bt.TotalTokenAmount() != sum {\n\t\tt.Fatalf(\"Invalid token amount\")\n\t}\n\n\t// Change client status\n\tfor i := 0; i < 100; i += 1 {\n\t\tif rand.Intn(2) == 0 {\n\t\t\tb.ns.SetField(nodes[i].node, ppTestSetup.CapacityField, uint64(1))\n\t\t}\n\t}\n\tif b.bt.TotalTokenAmount() != sum {\n\t\tt.Fatalf(\"Invalid token amount\")\n\t}\n\tfor i := 0; i < 100; i += 1 {\n\t\tif rand.Intn(2) == 0 {\n\t\t\tb.ns.SetField(nodes[i].node, ppTestSetup.CapacityField, uint64(1))\n\t\t}\n\t}\n\tif b.bt.TotalTokenAmount() != sum {\n\t\tt.Fatalf(\"Invalid token amount\")\n\t}\n}\n\nfunc TestCallbackChecking(t *testing.T) {\n\tb := newBalanceTestSetup()\n\tdefer b.stop()\n\tnode := b.newNode(1000000)\n\tnode.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})\n\n\tvar inputs = []struct {\n\t\tpriority int64\n\t\texpDiff  time.Duration\n\t}{\n\t\t{500, time.Millisecond * 500},\n\t\t{0, time.Second},\n\t\t{-int64(time.Second), 2 * time.Second},\n\t}\n\tnode.SetBalance(uint64(time.Second), 0)\n\tfor _, i := range inputs {\n\t\tdiff, _ := node.timeUntil(i.priority)\n\t\tif diff != i.expDiff {\n\t\t\tt.Fatalf(\"Time difference mismatch, want %v, got %v\", i.expDiff, diff)\n\t\t}\n\t}\n}\n\nfunc TestCallback(t *testing.T) {\n\tb := newBalanceTestSetup()\n\tdefer b.stop()\n\tnode := b.newNode(1000)\n\tnode.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1})\n\tb.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1))\n\n\tcallCh := make(chan struct{}, 1)\n\tnode.SetBalance(uint64(time.Minute), 0)\n\tnode.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} })\n\n\tb.clock.Run(time.Minute)\n\tselect {\n\tcase <-callCh:\n\tcase <-time.NewTimer(time.Second).C:\n\t\tt.Fatalf(\"Callback hasn't been called yet\")\n\t}\n\n\tnode.SetBalance(uint64(time.Minute), 0)\n\tnode.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} })\n\tnode.removeCallback(balanceCallbackZero)\n\n\tb.clock.Run(time.Minute)\n\tselect {\n\tcase <-callCh:\n\t\tt.Fatalf(\"Callback shouldn't be called\")\n\tcase <-time.NewTimer(time.Millisecond * 100).C:\n\t}\n}\n\nfunc TestBalancePersistence(t *testing.T) {\n\tclock := &mclock.Simulated{}\n\tns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup)\n\tdb := memorydb.New()\n\tposExp := &utils.Expirer{}\n\tnegExp := &utils.Expirer{}\n\tposExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour*2)) // halves every two hours\n\tnegExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour))   // halves every hour\n\tbt := NewBalanceTracker(ns, btTestSetup, db, clock, posExp, negExp)\n\tns.Start()\n\tbts := &balanceTestSetup{\n\t\tclock: clock,\n\t\tns:    ns,\n\t\tbt:    bt,\n\t}\n\tvar nb *NodeBalance\n\texp := func(expPos, expNeg uint64) {\n\t\tpos, neg := nb.GetBalance()\n\t\tif pos != expPos {\n\t\t\tt.Fatalf(\"Positive balance incorrect, want %v, got %v\", expPos, pos)\n\t\t}\n\t\tif neg != expNeg {\n\t\t\tt.Fatalf(\"Positive balance incorrect, want %v, got %v\", expPos, pos)\n\t\t}\n\t}\n\texpTotal := func(expTotal uint64) {\n\t\ttotal := bt.TotalTokenAmount()\n\t\tif total != expTotal {\n\t\t\tt.Fatalf(\"Total token amount incorrect, want %v, got %v\", expTotal, total)\n\t\t}\n\t}\n\n\texpTotal(0)\n\tnb = bts.newNode(0)\n\texpTotal(0)\n\tnb.SetBalance(16000000000, 16000000000)\n\texp(16000000000, 16000000000)\n\texpTotal(16000000000)\n\tclock.Run(time.Hour * 2)\n\texp(8000000000, 4000000000)\n\texpTotal(8000000000)\n\tbt.Stop()\n\tns.Stop()\n\n\tclock = &mclock.Simulated{}\n\tns = nodestate.NewNodeStateMachine(nil, nil, clock, testSetup)\n\tposExp = &utils.Expirer{}\n\tnegExp = &utils.Expirer{}\n\tposExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour*2)) // halves every two hours\n\tnegExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour))   // halves every hour\n\tbt = NewBalanceTracker(ns, btTestSetup, db, clock, posExp, negExp)\n\tns.Start()\n\tbts = &balanceTestSetup{\n\t\tclock: clock,\n\t\tns:    ns,\n\t\tbt:    bt,\n\t}\n\texpTotal(8000000000)\n\tnb = bts.newNode(0)\n\texp(8000000000, 4000000000)\n\texpTotal(8000000000)\n\tclock.Run(time.Hour * 2)\n\texp(4000000000, 1000000000)\n\texpTotal(4000000000)\n\tbt.Stop()\n\tns.Stop()\n}\n"
  },
  {
    "path": "les/vflux/server/balance_tracker.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage server\n\nimport (\n\t\"reflect\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n)\n\nconst (\n\tposThreshold             = 1000000         // minimum positive balance that is persisted in the database\n\tnegThreshold             = 1000000         // minimum negative balance that is persisted in the database\n\tpersistExpirationRefresh = time.Minute * 5 // refresh period of the token expiration persistence\n)\n\n// BalanceTrackerSetup contains node state flags and fields used by BalanceTracker\ntype BalanceTrackerSetup struct {\n\t// controlled by PriorityPool\n\tPriorityFlag, UpdateFlag nodestate.Flags\n\tBalanceField             nodestate.Field\n\t// external connections\n\tconnAddressField, capacityField nodestate.Field\n}\n\n// NewBalanceTrackerSetup creates a new BalanceTrackerSetup and initializes the fields\n// and flags controlled by BalanceTracker\nfunc NewBalanceTrackerSetup(setup *nodestate.Setup) BalanceTrackerSetup {\n\treturn BalanceTrackerSetup{\n\t\t// PriorityFlag is set if the node has a positive balance\n\t\tPriorityFlag: setup.NewFlag(\"priorityNode\"),\n\t\t// UpdateFlag set and then immediately reset if the balance has been updated and\n\t\t// therefore priority is suddenly changed\n\t\tUpdateFlag: setup.NewFlag(\"balanceUpdate\"),\n\t\t// BalanceField contains the NodeBalance struct which implements nodePriority,\n\t\t// allowing on-demand priority calculation and future priority estimation\n\t\tBalanceField: setup.NewField(\"balance\", reflect.TypeOf(&NodeBalance{})),\n\t}\n}\n\n// Connect sets the fields used by BalanceTracker as an input\nfunc (bts *BalanceTrackerSetup) Connect(connAddressField, capacityField nodestate.Field) {\n\tbts.connAddressField = connAddressField\n\tbts.capacityField = capacityField\n}\n\n// BalanceTracker tracks positive and negative balances for connected nodes.\n// After connAddressField is set externally, a NodeBalance is created and previous\n// balance values are loaded from the database. Both balances are exponentially expired\n// values. Costs are deducted from the positive balance if present, otherwise added to\n// the negative balance. If the capacity is non-zero then a time cost is applied\n// continuously while individual request costs are applied immediately.\n// The two balances are translated into a single priority value that also depends\n// on the actual capacity.\ntype BalanceTracker struct {\n\tBalanceTrackerSetup\n\tclock              mclock.Clock\n\tlock               sync.Mutex\n\tns                 *nodestate.NodeStateMachine\n\tndb                *nodeDB\n\tposExp, negExp     utils.ValueExpirer\n\tposExpTC, negExpTC uint64\n\n\tactive, inactive utils.ExpiredValue\n\tbalanceTimer     *utils.UpdateTimer\n\tquit             chan struct{}\n}\n\n// NewBalanceTracker creates a new BalanceTracker\nfunc NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *BalanceTracker {\n\tndb := newNodeDB(db, clock)\n\tbt := &BalanceTracker{\n\t\tns:                  ns,\n\t\tBalanceTrackerSetup: setup,\n\t\tndb:                 ndb,\n\t\tclock:               clock,\n\t\tposExp:              posExp,\n\t\tnegExp:              negExp,\n\t\tbalanceTimer:        utils.NewUpdateTimer(clock, time.Second*10),\n\t\tquit:                make(chan struct{}),\n\t}\n\tposOffset, negOffset := bt.ndb.getExpiration()\n\tposExp.SetLogOffset(clock.Now(), posOffset)\n\tnegExp.SetLogOffset(clock.Now(), negOffset)\n\n\tbt.ndb.forEachBalance(false, func(id enode.ID, balance utils.ExpiredValue) bool {\n\t\tbt.inactive.AddExp(balance)\n\t\treturn true\n\t})\n\n\tns.SubscribeField(bt.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {\n\t\tn, _ := ns.GetField(node, bt.BalanceField).(*NodeBalance)\n\t\tif n == nil {\n\t\t\treturn\n\t\t}\n\n\t\tov, _ := oldValue.(uint64)\n\t\tnv, _ := newValue.(uint64)\n\t\tif ov == 0 && nv != 0 {\n\t\t\tn.activate()\n\t\t}\n\t\tif nv != 0 {\n\t\t\tn.setCapacity(nv)\n\t\t}\n\t\tif ov != 0 && nv == 0 {\n\t\t\tn.deactivate()\n\t\t}\n\t})\n\tns.SubscribeField(bt.connAddressField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {\n\t\tif newValue != nil {\n\t\t\tns.SetFieldSub(node, bt.BalanceField, bt.newNodeBalance(node, newValue.(string)))\n\t\t} else {\n\t\t\tns.SetStateSub(node, nodestate.Flags{}, bt.PriorityFlag, 0)\n\t\t\tif b, _ := ns.GetField(node, bt.BalanceField).(*NodeBalance); b != nil {\n\t\t\t\tb.deactivate()\n\t\t\t}\n\t\t\tns.SetFieldSub(node, bt.BalanceField, nil)\n\t\t}\n\t})\n\n\t// The positive and negative balances of clients are stored in database\n\t// and both of these decay exponentially over time. Delete them if the\n\t// value is small enough.\n\tbt.ndb.evictCallBack = bt.canDropBalance\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-clock.After(persistExpirationRefresh):\n\t\t\t\tnow := clock.Now()\n\t\t\t\tbt.ndb.setExpiration(posExp.LogOffset(now), negExp.LogOffset(now))\n\t\t\tcase <-bt.quit:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn bt\n}\n\n// Stop saves expiration offset and unsaved node balances and shuts BalanceTracker down\nfunc (bt *BalanceTracker) Stop() {\n\tnow := bt.clock.Now()\n\tbt.ndb.setExpiration(bt.posExp.LogOffset(now), bt.negExp.LogOffset(now))\n\tclose(bt.quit)\n\tbt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {\n\t\tif n, ok := bt.ns.GetField(node, bt.BalanceField).(*NodeBalance); ok {\n\t\t\tn.lock.Lock()\n\t\t\tn.storeBalance(true, true)\n\t\t\tn.lock.Unlock()\n\t\t\tbt.ns.SetField(node, bt.BalanceField, nil)\n\t\t}\n\t})\n\tbt.ndb.close()\n}\n\n// TotalTokenAmount returns the current total amount of service tokens in existence\nfunc (bt *BalanceTracker) TotalTokenAmount() uint64 {\n\tbt.lock.Lock()\n\tdefer bt.lock.Unlock()\n\n\tbt.balanceTimer.Update(func(_ time.Duration) bool {\n\t\tbt.active = utils.ExpiredValue{}\n\t\tbt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) {\n\t\t\tif n, ok := bt.ns.GetField(node, bt.BalanceField).(*NodeBalance); ok && n.active {\n\t\t\t\tpos, _ := n.GetRawBalance()\n\t\t\t\tbt.active.AddExp(pos)\n\t\t\t}\n\t\t})\n\t\treturn true\n\t})\n\ttotal := bt.active\n\ttotal.AddExp(bt.inactive)\n\treturn total.Value(bt.posExp.LogOffset(bt.clock.Now()))\n}\n\n// GetPosBalanceIDs lists node IDs with an associated positive balance\nfunc (bt *BalanceTracker) GetPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) {\n\treturn bt.ndb.getPosBalanceIDs(start, stop, maxCount)\n}\n\n// SetExpirationTCs sets positive and negative token expiration time constants.\n// Specified in seconds, 0 means infinite (no expiration).\nfunc (bt *BalanceTracker) SetExpirationTCs(pos, neg uint64) {\n\tbt.lock.Lock()\n\tdefer bt.lock.Unlock()\n\n\tbt.posExpTC, bt.negExpTC = pos, neg\n\tnow := bt.clock.Now()\n\tif pos > 0 {\n\t\tbt.posExp.SetRate(now, 1/float64(pos*uint64(time.Second)))\n\t} else {\n\t\tbt.posExp.SetRate(now, 0)\n\t}\n\tif neg > 0 {\n\t\tbt.negExp.SetRate(now, 1/float64(neg*uint64(time.Second)))\n\t} else {\n\t\tbt.negExp.SetRate(now, 0)\n\t}\n}\n\n// GetExpirationTCs returns the current positive and negative token expiration\n// time constants\nfunc (bt *BalanceTracker) GetExpirationTCs() (pos, neg uint64) {\n\tbt.lock.Lock()\n\tdefer bt.lock.Unlock()\n\n\treturn bt.posExpTC, bt.negExpTC\n}\n\n// newNodeBalance loads balances from the database and creates a NodeBalance instance\n// for the given node. It also sets the PriorityFlag and adds balanceCallbackZero if\n// the node has a positive balance.\n// Note: this function should run inside a NodeStateMachine operation\nfunc (bt *BalanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string) *NodeBalance {\n\tpb := bt.ndb.getOrNewBalance(node.ID().Bytes(), false)\n\tnb := bt.ndb.getOrNewBalance([]byte(negBalanceKey), true)\n\tn := &NodeBalance{\n\t\tbt:          bt,\n\t\tnode:        node,\n\t\tconnAddress: negBalanceKey,\n\t\tbalance:     balance{pos: pb, neg: nb},\n\t\tinitTime:    bt.clock.Now(),\n\t\tlastUpdate:  bt.clock.Now(),\n\t}\n\tfor i := range n.callbackIndex {\n\t\tn.callbackIndex[i] = -1\n\t}\n\tif n.checkPriorityStatus() {\n\t\tn.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0)\n\t}\n\treturn n\n}\n\n// storeBalance stores either a positive or a negative balance in the database\nfunc (bt *BalanceTracker) storeBalance(id []byte, neg bool, value utils.ExpiredValue) {\n\tif bt.canDropBalance(bt.clock.Now(), neg, value) {\n\t\tbt.ndb.delBalance(id, neg) // balance is small enough, drop it directly.\n\t} else {\n\t\tbt.ndb.setBalance(id, neg, value)\n\t}\n}\n\n// canDropBalance tells whether a positive or negative balance is below the threshold\n// and therefore can be dropped from the database\nfunc (bt *BalanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.ExpiredValue) bool {\n\tif neg {\n\t\treturn b.Value(bt.negExp.LogOffset(now)) <= negThreshold\n\t}\n\treturn b.Value(bt.posExp.LogOffset(now)) <= posThreshold\n}\n\n// updateTotalBalance adjusts the total balance after executing given callback.\nfunc (bt *BalanceTracker) updateTotalBalance(n *NodeBalance, callback func() bool) {\n\tbt.lock.Lock()\n\tdefer bt.lock.Unlock()\n\n\tn.lock.Lock()\n\tdefer n.lock.Unlock()\n\n\toriginal, active := n.balance.pos, n.active\n\tif !callback() {\n\t\treturn\n\t}\n\tif active {\n\t\tbt.active.SubExp(original)\n\t} else {\n\t\tbt.inactive.SubExp(original)\n\t}\n\tif n.active {\n\t\tbt.active.AddExp(n.balance.pos)\n\t} else {\n\t\tbt.inactive.AddExp(n.balance.pos)\n\t}\n}\n"
  },
  {
    "path": "les/vflux/server/clientdb.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage server\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common\"\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/ethdb\"\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n\tlru \"github.com/hashicorp/golang-lru\"\n)\n\nconst (\n\tbalanceCacheLimit = 8192 // the maximum number of cached items in service token balance queue\n\n\t// nodeDBVersion is the version identifier of the node data in db\n\t//\n\t// Changelog:\n\t// Version 0 => 1\n\t// * Replace `lastTotal` with `meta` in positive balance: version 0=>1\n\t//\n\t// Version 1 => 2\n\t// * Positive Balance and negative balance is changed:\n\t// * Cumulative time is replaced with expiration\n\tnodeDBVersion = 2\n\n\t// dbCleanupCycle is the cycle of db for useless data cleanup\n\tdbCleanupCycle = time.Hour\n)\n\nvar (\n\tpositiveBalancePrefix = []byte(\"pb:\")         // dbVersion(uint16 big endian) + positiveBalancePrefix + id -> balance\n\tnegativeBalancePrefix = []byte(\"nb:\")         // dbVersion(uint16 big endian) + negativeBalancePrefix + ip -> balance\n\texpirationKey         = []byte(\"expiration:\") // dbVersion(uint16 big endian) + expirationKey -> posExp, negExp\n)\n\ntype nodeDB struct {\n\tdb            ethdb.KeyValueStore\n\tcache         *lru.Cache\n\tauxbuf        []byte                                              // 37-byte auxiliary buffer for key encoding\n\tverbuf        [2]byte                                             // 2-byte auxiliary buffer for db version\n\tevictCallBack func(mclock.AbsTime, bool, utils.ExpiredValue) bool // Callback to determine whether the balance can be evicted.\n\tclock         mclock.Clock\n\tcloseCh       chan struct{}\n\tcleanupHook   func() // Test hook used for testing\n}\n\nfunc newNodeDB(db ethdb.KeyValueStore, clock mclock.Clock) *nodeDB {\n\tcache, _ := lru.New(balanceCacheLimit)\n\tndb := &nodeDB{\n\t\tdb:      db,\n\t\tcache:   cache,\n\t\tauxbuf:  make([]byte, 37),\n\t\tclock:   clock,\n\t\tcloseCh: make(chan struct{}),\n\t}\n\tbinary.BigEndian.PutUint16(ndb.verbuf[:], uint16(nodeDBVersion))\n\tgo ndb.expirer()\n\treturn ndb\n}\n\nfunc (db *nodeDB) close() {\n\tclose(db.closeCh)\n}\n\nfunc (db *nodeDB) getPrefix(neg bool) []byte {\n\tprefix := positiveBalancePrefix\n\tif neg {\n\t\tprefix = negativeBalancePrefix\n\t}\n\treturn append(db.verbuf[:], prefix...)\n}\n\nfunc (db *nodeDB) key(id []byte, neg bool) []byte {\n\tprefix := positiveBalancePrefix\n\tif neg {\n\t\tprefix = negativeBalancePrefix\n\t}\n\tif len(prefix)+len(db.verbuf)+len(id) > len(db.auxbuf) {\n\t\tdb.auxbuf = append(db.auxbuf, make([]byte, len(prefix)+len(db.verbuf)+len(id)-len(db.auxbuf))...)\n\t}\n\tcopy(db.auxbuf[:len(db.verbuf)], db.verbuf[:])\n\tcopy(db.auxbuf[len(db.verbuf):len(db.verbuf)+len(prefix)], prefix)\n\tcopy(db.auxbuf[len(prefix)+len(db.verbuf):len(prefix)+len(db.verbuf)+len(id)], id)\n\treturn db.auxbuf[:len(prefix)+len(db.verbuf)+len(id)]\n}\n\nfunc (db *nodeDB) getExpiration() (utils.Fixed64, utils.Fixed64) {\n\tblob, err := db.db.Get(append(db.verbuf[:], expirationKey...))\n\tif err != nil || len(blob) != 16 {\n\t\treturn 0, 0\n\t}\n\treturn utils.Fixed64(binary.BigEndian.Uint64(blob[:8])), utils.Fixed64(binary.BigEndian.Uint64(blob[8:16]))\n}\n\nfunc (db *nodeDB) setExpiration(pos, neg utils.Fixed64) {\n\tvar buff [16]byte\n\tbinary.BigEndian.PutUint64(buff[:8], uint64(pos))\n\tbinary.BigEndian.PutUint64(buff[8:16], uint64(neg))\n\tdb.db.Put(append(db.verbuf[:], expirationKey...), buff[:16])\n}\n\nfunc (db *nodeDB) getOrNewBalance(id []byte, neg bool) utils.ExpiredValue {\n\tkey := db.key(id, neg)\n\titem, exist := db.cache.Get(string(key))\n\tif exist {\n\t\treturn item.(utils.ExpiredValue)\n\t}\n\tvar b utils.ExpiredValue\n\tenc, err := db.db.Get(key)\n\tif err != nil || len(enc) == 0 {\n\t\treturn b\n\t}\n\tif err := rlp.DecodeBytes(enc, &b); err != nil {\n\t\tlog.Crit(\"Failed to decode positive balance\", \"err\", err)\n\t}\n\tdb.cache.Add(string(key), b)\n\treturn b\n}\n\nfunc (db *nodeDB) setBalance(id []byte, neg bool, b utils.ExpiredValue) {\n\tkey := db.key(id, neg)\n\tenc, err := rlp.EncodeToBytes(&(b))\n\tif err != nil {\n\t\tlog.Crit(\"Failed to encode positive balance\", \"err\", err)\n\t}\n\tdb.db.Put(key, enc)\n\tdb.cache.Add(string(key), b)\n}\n\nfunc (db *nodeDB) delBalance(id []byte, neg bool) {\n\tkey := db.key(id, neg)\n\tdb.db.Delete(key)\n\tdb.cache.Remove(string(key))\n}\n\n// getPosBalanceIDs returns a lexicographically ordered list of IDs of accounts\n// with a positive balance\nfunc (db *nodeDB) getPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) {\n\tif maxCount <= 0 {\n\t\treturn\n\t}\n\tprefix := db.getPrefix(false)\n\tkeylen := len(prefix) + len(enode.ID{})\n\n\tit := db.db.NewIterator(prefix, start.Bytes())\n\tdefer it.Release()\n\n\tfor it.Next() {\n\t\tvar id enode.ID\n\t\tif len(it.Key()) != keylen {\n\t\t\treturn\n\t\t}\n\t\tcopy(id[:], it.Key()[keylen-len(id):])\n\t\tif bytes.Compare(id.Bytes(), stop.Bytes()) >= 0 {\n\t\t\treturn\n\t\t}\n\t\tresult = append(result, id)\n\t\tif len(result) == maxCount {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n// forEachBalance iterates all balances and passes values to callback.\nfunc (db *nodeDB) forEachBalance(neg bool, callback func(id enode.ID, balance utils.ExpiredValue) bool) {\n\tprefix := db.getPrefix(neg)\n\tkeylen := len(prefix) + len(enode.ID{})\n\n\tit := db.db.NewIterator(prefix, nil)\n\tdefer it.Release()\n\n\tfor it.Next() {\n\t\tvar id enode.ID\n\t\tif len(it.Key()) != keylen {\n\t\t\treturn\n\t\t}\n\t\tcopy(id[:], it.Key()[keylen-len(id):])\n\n\t\tvar b utils.ExpiredValue\n\t\tif err := rlp.DecodeBytes(it.Value(), &b); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif !callback(id, b) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (db *nodeDB) expirer() {\n\tfor {\n\t\tselect {\n\t\tcase <-db.clock.After(dbCleanupCycle):\n\t\t\tdb.expireNodes()\n\t\tcase <-db.closeCh:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// expireNodes iterates the whole node db and checks whether the\n// token balances can be deleted.\nfunc (db *nodeDB) expireNodes() {\n\tvar (\n\t\tvisited int\n\t\tdeleted int\n\t\tstart   = time.Now()\n\t)\n\tfor _, neg := range []bool{false, true} {\n\t\titer := db.db.NewIterator(db.getPrefix(neg), nil)\n\t\tfor iter.Next() {\n\t\t\tvisited++\n\t\t\tvar balance utils.ExpiredValue\n\t\t\tif err := rlp.DecodeBytes(iter.Value(), &balance); err != nil {\n\t\t\t\tlog.Crit(\"Failed to decode negative balance\", \"err\", err)\n\t\t\t}\n\t\t\tif db.evictCallBack != nil && db.evictCallBack(db.clock.Now(), neg, balance) {\n\t\t\t\tdeleted++\n\t\t\t\tdb.db.Delete(iter.Key())\n\t\t\t}\n\t\t}\n\t}\n\t// Invoke testing hook if it's not nil.\n\tif db.cleanupHook != nil {\n\t\tdb.cleanupHook()\n\t}\n\tlog.Debug(\"Expire nodes\", \"visited\", visited, \"deleted\", deleted, \"elapsed\", common.PrettyDuration(time.Since(start)))\n}\n"
  },
  {
    "path": "les/vflux/server/prioritypool.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage server\n\nimport (\n\t\"math\"\n\t\"reflect\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/common/prque\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n)\n\nconst (\n\tlazyQueueRefresh = time.Second * 10 // refresh period of the active queue\n)\n\n// PriorityPoolSetup contains node state flags and fields used by PriorityPool\n// Note: ActiveFlag and InactiveFlag can be controlled both externally and by the pool,\n// see PriorityPool description for details.\ntype PriorityPoolSetup struct {\n\t// controlled by PriorityPool\n\tActiveFlag, InactiveFlag       nodestate.Flags\n\tCapacityField, ppNodeInfoField nodestate.Field\n\t// external connections\n\tupdateFlag    nodestate.Flags\n\tpriorityField nodestate.Field\n}\n\n// NewPriorityPoolSetup creates a new PriorityPoolSetup and initializes the fields\n// and flags controlled by PriorityPool\nfunc NewPriorityPoolSetup(setup *nodestate.Setup) PriorityPoolSetup {\n\treturn PriorityPoolSetup{\n\t\tActiveFlag:      setup.NewFlag(\"active\"),\n\t\tInactiveFlag:    setup.NewFlag(\"inactive\"),\n\t\tCapacityField:   setup.NewField(\"capacity\", reflect.TypeOf(uint64(0))),\n\t\tppNodeInfoField: setup.NewField(\"ppNodeInfo\", reflect.TypeOf(&ppNodeInfo{})),\n\t}\n}\n\n// Connect sets the fields and flags used by PriorityPool as an input\nfunc (pps *PriorityPoolSetup) Connect(priorityField nodestate.Field, updateFlag nodestate.Flags) {\n\tpps.priorityField = priorityField // should implement nodePriority\n\tpps.updateFlag = updateFlag       // triggers an immediate priority update\n}\n\n// PriorityPool handles a set of nodes where each node has a capacity (a scalar value)\n// and a priority (which can change over time and can also depend on the capacity).\n// A node is active if it has at least the necessary minimal amount of capacity while\n// inactive nodes have 0 capacity (values between 0 and the minimum are not allowed).\n// The pool ensures that the number and total capacity of all active nodes are limited\n// and the highest priority nodes are active at all times (limits can be changed\n// during operation with immediate effect).\n//\n// When activating clients a priority bias is applied in favor of the already active\n// nodes in order to avoid nodes quickly alternating between active and inactive states\n// when their priorities are close to each other. The bias is specified in terms of\n// duration (time) because priorities are expected to usually get lower over time and\n// therefore a future minimum prediction (see EstMinPriority) should monotonously\n// decrease with the specified time parameter.\n// This time bias can be interpreted as minimum expected active time at the given\n// capacity (if the threshold priority stays the same).\n//\n// Nodes in the pool always have either InactiveFlag or ActiveFlag set. A new node is\n// added to the pool by externally setting InactiveFlag. PriorityPool can switch a node\n// between InactiveFlag and ActiveFlag at any time. Nodes can be removed from the pool\n// by externally resetting both flags. ActiveFlag should not be set externally.\n//\n// The highest priority nodes in \"inactive\" state are moved to \"active\" state as soon as\n// the minimum capacity can be granted for them. The capacity of lower priority active\n// nodes is reduced or they are demoted to \"inactive\" state if their priority is\n// insufficient even at minimal capacity.\ntype PriorityPool struct {\n\tPriorityPoolSetup\n\tns                     *nodestate.NodeStateMachine\n\tclock                  mclock.Clock\n\tlock                   sync.Mutex\n\tactiveQueue            *prque.LazyQueue\n\tinactiveQueue          *prque.Prque\n\tchanged                []*ppNodeInfo\n\tactiveCount, activeCap uint64\n\tmaxCount, maxCap       uint64\n\tminCap                 uint64\n\tactiveBias             time.Duration\n\tcapacityStepDiv        uint64\n\n\tcachedCurve    *CapacityCurve\n\tccUpdatedAt    mclock.AbsTime\n\tccUpdateForced bool\n}\n\n// nodePriority interface provides current and estimated future priorities on demand\ntype nodePriority interface {\n\t// Priority should return the current priority of the node (higher is better)\n\tPriority(cap uint64) int64\n\t// EstMinPriority should return a lower estimate for the minimum of the node priority\n\t// value starting from the current moment until the given time. If the priority goes\n\t// under the returned estimate before the specified moment then it is the caller's\n\t// responsibility to signal with updateFlag.\n\tEstimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64\n}\n\n// ppNodeInfo is the internal node descriptor of PriorityPool\ntype ppNodeInfo struct {\n\tnodePriority               nodePriority\n\tnode                       *enode.Node\n\tconnected                  bool\n\tcapacity, origCap          uint64\n\tbias                       time.Duration\n\tforced, changed            bool\n\tactiveIndex, inactiveIndex int\n}\n\n// NewPriorityPool creates a new PriorityPool\nfunc NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv uint64) *PriorityPool {\n\tpp := &PriorityPool{\n\t\tns:                ns,\n\t\tPriorityPoolSetup: setup,\n\t\tclock:             clock,\n\t\tinactiveQueue:     prque.New(inactiveSetIndex),\n\t\tminCap:            minCap,\n\t\tactiveBias:        activeBias,\n\t\tcapacityStepDiv:   capacityStepDiv,\n\t}\n\tpp.activeQueue = prque.NewLazyQueue(activeSetIndex, activePriority, pp.activeMaxPriority, clock, lazyQueueRefresh)\n\n\tns.SubscribeField(pp.priorityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {\n\t\tif newValue != nil {\n\t\t\tc := &ppNodeInfo{\n\t\t\t\tnode:          node,\n\t\t\t\tnodePriority:  newValue.(nodePriority),\n\t\t\t\tactiveIndex:   -1,\n\t\t\t\tinactiveIndex: -1,\n\t\t\t}\n\t\t\tns.SetFieldSub(node, pp.ppNodeInfoField, c)\n\t\t} else {\n\t\t\tns.SetStateSub(node, nodestate.Flags{}, pp.ActiveFlag.Or(pp.InactiveFlag), 0)\n\t\t\tif n, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); n != nil {\n\t\t\t\tpp.disconnectedNode(n)\n\t\t\t}\n\t\t\tns.SetFieldSub(node, pp.CapacityField, nil)\n\t\t\tns.SetFieldSub(node, pp.ppNodeInfoField, nil)\n\t\t}\n\t})\n\tns.SubscribeState(pp.ActiveFlag.Or(pp.InactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) {\n\t\tif c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); c != nil {\n\t\t\tif oldState.IsEmpty() {\n\t\t\t\tpp.connectedNode(c)\n\t\t\t}\n\t\t\tif newState.IsEmpty() {\n\t\t\t\tpp.disconnectedNode(c)\n\t\t\t}\n\t\t}\n\t})\n\tns.SubscribeState(pp.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) {\n\t\tif !newState.IsEmpty() {\n\t\t\tpp.updatePriority(node)\n\t\t}\n\t})\n\treturn pp\n}\n\n// RequestCapacity checks whether changing the capacity of a node to the given target\n// is possible (bias is applied in favor of other active nodes if the target is higher\n// than the current capacity).\n// If setCap is true then it also performs the change if possible. The function returns\n// the minimum priority needed to do the change and whether it is currently allowed.\n// If setCap and allowed are both true then the caller can assume that the change was\n// successful.\n// Note: priorityField should always be set before calling RequestCapacity. If setCap\n// is false then both InactiveFlag and ActiveFlag can be unset and they are not changed\n// by this function call either.\n// Note 2: this function should run inside a NodeStateMachine operation\nfunc (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias time.Duration, setCap bool) (minPriority int64, allowed bool) {\n\tpp.lock.Lock()\n\tpp.activeQueue.Refresh()\n\tvar updates []capUpdate\n\tdefer func() {\n\t\tpp.lock.Unlock()\n\t\tpp.updateFlags(updates)\n\t}()\n\n\tif targetCap < pp.minCap {\n\t\ttargetCap = pp.minCap\n\t}\n\tif bias < pp.activeBias {\n\t\tbias = pp.activeBias\n\t}\n\tc, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo)\n\tif c == nil {\n\t\tlog.Error(\"RequestCapacity called for unknown node\", \"id\", node.ID())\n\t\treturn math.MaxInt64, false\n\t}\n\tvar priority int64\n\tif targetCap > c.capacity {\n\t\tpriority = c.nodePriority.EstimatePriority(targetCap, 0, 0, bias, false)\n\t} else {\n\t\tpriority = c.nodePriority.Priority(targetCap)\n\t}\n\tpp.markForChange(c)\n\tpp.setCapacity(c, targetCap)\n\tc.forced = true\n\tpp.activeQueue.Remove(c.activeIndex)\n\tpp.inactiveQueue.Remove(c.inactiveIndex)\n\tpp.activeQueue.Push(c)\n\t_, minPriority = pp.enforceLimits()\n\t// if capacity update is possible now then minPriority == math.MinInt64\n\t// if it is not possible at all then minPriority == math.MaxInt64\n\tallowed = priority > minPriority\n\tupdates = pp.finalizeChanges(setCap && allowed)\n\treturn\n}\n\n// SetLimits sets the maximum number and total capacity of simultaneously active nodes\nfunc (pp *PriorityPool) SetLimits(maxCount, maxCap uint64) {\n\tpp.lock.Lock()\n\tpp.activeQueue.Refresh()\n\tvar updates []capUpdate\n\tdefer func() {\n\t\tpp.lock.Unlock()\n\t\tpp.ns.Operation(func() { pp.updateFlags(updates) })\n\t}()\n\n\tinc := (maxCount > pp.maxCount) || (maxCap > pp.maxCap)\n\tdec := (maxCount < pp.maxCount) || (maxCap < pp.maxCap)\n\tpp.maxCount, pp.maxCap = maxCount, maxCap\n\tif dec {\n\t\tpp.enforceLimits()\n\t\tupdates = pp.finalizeChanges(true)\n\t}\n\tif inc {\n\t\tupdates = pp.tryActivate()\n\t}\n}\n\n// SetActiveBias sets the bias applied when trying to activate inactive nodes\nfunc (pp *PriorityPool) SetActiveBias(bias time.Duration) {\n\tpp.lock.Lock()\n\tdefer pp.lock.Unlock()\n\n\tpp.activeBias = bias\n\tpp.tryActivate()\n}\n\n// Active returns the number and total capacity of currently active nodes\nfunc (pp *PriorityPool) Active() (uint64, uint64) {\n\tpp.lock.Lock()\n\tdefer pp.lock.Unlock()\n\n\treturn pp.activeCount, pp.activeCap\n}\n\n// inactiveSetIndex callback updates ppNodeInfo item index in inactiveQueue\nfunc inactiveSetIndex(a interface{}, index int) {\n\ta.(*ppNodeInfo).inactiveIndex = index\n}\n\n// activeSetIndex callback updates ppNodeInfo item index in activeQueue\nfunc activeSetIndex(a interface{}, index int) {\n\ta.(*ppNodeInfo).activeIndex = index\n}\n\n// invertPriority inverts a priority value. The active queue uses inverted priorities\n// because the node on the top is the first to be deactivated.\nfunc invertPriority(p int64) int64 {\n\tif p == math.MinInt64 {\n\t\treturn math.MaxInt64\n\t}\n\treturn -p\n}\n\n// activePriority callback returns actual priority of ppNodeInfo item in activeQueue\nfunc activePriority(a interface{}) int64 {\n\tc := a.(*ppNodeInfo)\n\tif c.forced {\n\t\treturn math.MinInt64\n\t}\n\tif c.bias == 0 {\n\t\treturn invertPriority(c.nodePriority.Priority(c.capacity))\n\t} else {\n\t\treturn invertPriority(c.nodePriority.EstimatePriority(c.capacity, 0, 0, c.bias, true))\n\t}\n}\n\n// activeMaxPriority callback returns estimated maximum priority of ppNodeInfo item in activeQueue\nfunc (pp *PriorityPool) activeMaxPriority(a interface{}, until mclock.AbsTime) int64 {\n\tc := a.(*ppNodeInfo)\n\tif c.forced {\n\t\treturn math.MinInt64\n\t}\n\tfuture := time.Duration(until - pp.clock.Now())\n\tif future < 0 {\n\t\tfuture = 0\n\t}\n\treturn invertPriority(c.nodePriority.EstimatePriority(c.capacity, 0, future, c.bias, false))\n}\n\n// inactivePriority callback returns actual priority of ppNodeInfo item in inactiveQueue\nfunc (pp *PriorityPool) inactivePriority(p *ppNodeInfo) int64 {\n\treturn p.nodePriority.Priority(pp.minCap)\n}\n\n// connectedNode is called when a new node has been added to the pool (InactiveFlag set)\n// Note: this function should run inside a NodeStateMachine operation\nfunc (pp *PriorityPool) connectedNode(c *ppNodeInfo) {\n\tpp.lock.Lock()\n\tpp.activeQueue.Refresh()\n\tvar updates []capUpdate\n\tdefer func() {\n\t\tpp.lock.Unlock()\n\t\tpp.updateFlags(updates)\n\t}()\n\n\tif c.connected {\n\t\treturn\n\t}\n\tc.connected = true\n\tpp.inactiveQueue.Push(c, pp.inactivePriority(c))\n\tupdates = pp.tryActivate()\n}\n\n// disconnectedNode is called when a node has been removed from the pool (both InactiveFlag\n// and ActiveFlag reset)\n// Note: this function should run inside a NodeStateMachine operation\nfunc (pp *PriorityPool) disconnectedNode(c *ppNodeInfo) {\n\tpp.lock.Lock()\n\tpp.activeQueue.Refresh()\n\tvar updates []capUpdate\n\tdefer func() {\n\t\tpp.lock.Unlock()\n\t\tpp.updateFlags(updates)\n\t}()\n\n\tif !c.connected {\n\t\treturn\n\t}\n\tc.connected = false\n\tpp.activeQueue.Remove(c.activeIndex)\n\tpp.inactiveQueue.Remove(c.inactiveIndex)\n\tif c.capacity != 0 {\n\t\tpp.setCapacity(c, 0)\n\t\tupdates = pp.tryActivate()\n\t}\n}\n\n// markForChange internally puts a node in a temporary state that can either be reverted\n// or confirmed later. This temporary state allows changing the capacity of a node and\n// moving it between the active and inactive queue. ActiveFlag/InactiveFlag and\n// CapacityField are not changed while the changes are still temporary.\nfunc (pp *PriorityPool) markForChange(c *ppNodeInfo) {\n\tif c.changed {\n\t\treturn\n\t}\n\tc.changed = true\n\tc.origCap = c.capacity\n\tpp.changed = append(pp.changed, c)\n}\n\n// setCapacity changes the capacity of a node and adjusts activeCap and activeCount\n// accordingly. Note that this change is performed in the temporary state so it should\n// be called after markForChange and before finalizeChanges.\nfunc (pp *PriorityPool) setCapacity(n *ppNodeInfo, cap uint64) {\n\tpp.activeCap += cap - n.capacity\n\tif n.capacity == 0 {\n\t\tpp.activeCount++\n\t}\n\tif cap == 0 {\n\t\tpp.activeCount--\n\t}\n\tn.capacity = cap\n}\n\n// enforceLimits enforces active node count and total capacity limits. It returns the\n// lowest active node priority. Note that this function is performed on the temporary\n// internal state.\nfunc (pp *PriorityPool) enforceLimits() (*ppNodeInfo, int64) {\n\tif pp.activeCap <= pp.maxCap && pp.activeCount <= pp.maxCount {\n\t\treturn nil, math.MinInt64\n\t}\n\tvar (\n\t\tc                 *ppNodeInfo\n\t\tmaxActivePriority int64\n\t)\n\tpp.activeQueue.MultiPop(func(data interface{}, priority int64) bool {\n\t\tc = data.(*ppNodeInfo)\n\t\tpp.markForChange(c)\n\t\tmaxActivePriority = priority\n\t\tif c.capacity == pp.minCap || pp.activeCount > pp.maxCount {\n\t\t\tpp.setCapacity(c, 0)\n\t\t} else {\n\t\t\tsub := c.capacity / pp.capacityStepDiv\n\t\t\tif c.capacity-sub < pp.minCap {\n\t\t\t\tsub = c.capacity - pp.minCap\n\t\t\t}\n\t\t\tpp.setCapacity(c, c.capacity-sub)\n\t\t\tpp.activeQueue.Push(c)\n\t\t}\n\t\treturn pp.activeCap > pp.maxCap || pp.activeCount > pp.maxCount\n\t})\n\treturn c, invertPriority(maxActivePriority)\n}\n\n// finalizeChanges either commits or reverts temporary changes. The necessary capacity\n// field and according flag updates are not performed here but returned in a list because\n// they should be performed while the mutex is not held.\nfunc (pp *PriorityPool) finalizeChanges(commit bool) (updates []capUpdate) {\n\tfor _, c := range pp.changed {\n\t\t// always remove and push back in order to update biased/forced priority\n\t\tpp.activeQueue.Remove(c.activeIndex)\n\t\tpp.inactiveQueue.Remove(c.inactiveIndex)\n\t\tc.bias = 0\n\t\tc.forced = false\n\t\tc.changed = false\n\t\tif !commit {\n\t\t\tpp.setCapacity(c, c.origCap)\n\t\t}\n\t\tif c.connected {\n\t\t\tif c.capacity != 0 {\n\t\t\t\tpp.activeQueue.Push(c)\n\t\t\t} else {\n\t\t\t\tpp.inactiveQueue.Push(c, pp.inactivePriority(c))\n\t\t\t}\n\t\t\tif c.capacity != c.origCap && commit {\n\t\t\t\tupdates = append(updates, capUpdate{c.node, c.origCap, c.capacity})\n\t\t\t}\n\t\t}\n\t\tc.origCap = 0\n\t}\n\tpp.changed = nil\n\tif commit {\n\t\tpp.ccUpdateForced = true\n\t}\n\treturn\n}\n\n// capUpdate describes a CapacityField and ActiveFlag/InactiveFlag update\ntype capUpdate struct {\n\tnode           *enode.Node\n\toldCap, newCap uint64\n}\n\n// updateFlags performs CapacityField and ActiveFlag/InactiveFlag updates while the\n// pool mutex is not held\n// Note: this function should run inside a NodeStateMachine operation\nfunc (pp *PriorityPool) updateFlags(updates []capUpdate) {\n\tfor _, f := range updates {\n\t\tif f.oldCap == 0 {\n\t\t\tpp.ns.SetStateSub(f.node, pp.ActiveFlag, pp.InactiveFlag, 0)\n\t\t}\n\t\tif f.newCap == 0 {\n\t\t\tpp.ns.SetStateSub(f.node, pp.InactiveFlag, pp.ActiveFlag, 0)\n\t\t\tpp.ns.SetFieldSub(f.node, pp.CapacityField, nil)\n\t\t} else {\n\t\t\tpp.ns.SetFieldSub(f.node, pp.CapacityField, f.newCap)\n\t\t}\n\t}\n}\n\n// tryActivate tries to activate inactive nodes if possible\nfunc (pp *PriorityPool) tryActivate() []capUpdate {\n\tvar commit bool\n\tfor pp.inactiveQueue.Size() > 0 {\n\t\tc := pp.inactiveQueue.PopItem().(*ppNodeInfo)\n\t\tpp.markForChange(c)\n\t\tpp.setCapacity(c, pp.minCap)\n\t\tc.bias = pp.activeBias\n\t\tpp.activeQueue.Push(c)\n\t\tpp.enforceLimits()\n\t\tif c.capacity > 0 {\n\t\t\tcommit = true\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tpp.ccUpdateForced = true\n\treturn pp.finalizeChanges(commit)\n}\n\n// updatePriority gets the current priority value of the given node from the nodePriority\n// interface and performs the necessary changes. It is triggered by updateFlag.\n// Note: this function should run inside a NodeStateMachine operation\nfunc (pp *PriorityPool) updatePriority(node *enode.Node) {\n\tpp.lock.Lock()\n\tpp.activeQueue.Refresh()\n\tvar updates []capUpdate\n\tdefer func() {\n\t\tpp.lock.Unlock()\n\t\tpp.updateFlags(updates)\n\t}()\n\n\tc, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo)\n\tif c == nil || !c.connected {\n\t\treturn\n\t}\n\tpp.activeQueue.Remove(c.activeIndex)\n\tpp.inactiveQueue.Remove(c.inactiveIndex)\n\tif c.capacity != 0 {\n\t\tpp.activeQueue.Push(c)\n\t} else {\n\t\tpp.inactiveQueue.Push(c, pp.inactivePriority(c))\n\t}\n\tupdates = pp.tryActivate()\n}\n\n// CapacityCurve is a snapshot of the priority pool contents in a format that can efficiently\n// estimate how much capacity could be granted to a given node at a given priority level.\ntype CapacityCurve struct {\n\tpoints       []curvePoint       // curve points sorted in descending order of priority\n\tindex        map[enode.ID][]int // curve point indexes belonging to each node\n\texclude      []int              // curve point indexes of excluded node\n\texcludeFirst bool               // true if activeCount == maxCount\n}\n\ntype curvePoint struct {\n\tfreeCap uint64 // available capacity and node count at the current priority level\n\tnextPri int64  // next priority level where more capacity will be available\n}\n\n// GetCapacityCurve returns a new or recently cached CapacityCurve based on the contents of the pool\nfunc (pp *PriorityPool) GetCapacityCurve() *CapacityCurve {\n\tpp.lock.Lock()\n\tdefer pp.lock.Unlock()\n\n\tnow := pp.clock.Now()\n\tdt := time.Duration(now - pp.ccUpdatedAt)\n\tif !pp.ccUpdateForced && pp.cachedCurve != nil && dt < time.Second*10 {\n\t\treturn pp.cachedCurve\n\t}\n\n\tpp.ccUpdateForced = false\n\tpp.ccUpdatedAt = now\n\tcurve := &CapacityCurve{\n\t\tindex: make(map[enode.ID][]int),\n\t}\n\tpp.cachedCurve = curve\n\n\tvar excludeID enode.ID\n\texcludeFirst := pp.maxCount == pp.activeCount\n\t// reduce node capacities or remove nodes until nothing is left in the queue;\n\t// record the available capacity and the necessary priority after each step\n\tfor pp.activeCap > 0 {\n\t\tcp := curvePoint{}\n\t\tif pp.activeCap > pp.maxCap {\n\t\t\tlog.Error(\"Active capacity is greater than allowed maximum\", \"active\", pp.activeCap, \"maximum\", pp.maxCap)\n\t\t} else {\n\t\t\tcp.freeCap = pp.maxCap - pp.activeCap\n\t\t}\n\t\t// temporarily increase activeCap to enforce reducing or removing a node capacity\n\t\ttempCap := cp.freeCap + 1\n\t\tpp.activeCap += tempCap\n\t\tvar next *ppNodeInfo\n\t\t// enforceLimits removes the lowest priority node if it has minimal capacity,\n\t\t// otherwise reduces its capacity\n\t\tnext, cp.nextPri = pp.enforceLimits()\n\t\tpp.activeCap -= tempCap\n\t\tif next == nil {\n\t\t\tlog.Error(\"GetCapacityCurve: cannot remove next element from the priority queue\")\n\t\t\tbreak\n\t\t}\n\t\tid := next.node.ID()\n\t\tif excludeFirst {\n\t\t\t// if the node count limit is already reached then mark the node with the\n\t\t\t// lowest priority for exclusion\n\t\t\tcurve.excludeFirst = true\n\t\t\texcludeID = id\n\t\t\texcludeFirst = false\n\t\t}\n\t\t// multiple curve points and therefore multiple indexes may belong to a node\n\t\t// if it was removed in multiple steps (if its capacity was more than the minimum)\n\t\tcurve.index[id] = append(curve.index[id], len(curve.points))\n\t\tcurve.points = append(curve.points, cp)\n\t}\n\t// restore original state of the queue\n\tpp.finalizeChanges(false)\n\tcurve.points = append(curve.points, curvePoint{\n\t\tfreeCap: pp.maxCap,\n\t\tnextPri: math.MaxInt64,\n\t})\n\tif curve.excludeFirst {\n\t\tcurve.exclude = curve.index[excludeID]\n\t}\n\treturn curve\n}\n\n// Exclude returns a CapacityCurve with the given node excluded from the original curve\nfunc (cc *CapacityCurve) Exclude(id enode.ID) *CapacityCurve {\n\tif exclude, ok := cc.index[id]; ok {\n\t\t// return a new version of the curve (only one excluded node can be selected)\n\t\t// Note: if the first node was excluded by default (excludeFirst == true) then\n\t\t// we can forget about that and exclude the node with the given id instead.\n\t\treturn &CapacityCurve{\n\t\t\tpoints:  cc.points,\n\t\t\tindex:   cc.index,\n\t\t\texclude: exclude,\n\t\t}\n\t}\n\treturn cc\n}\n\nfunc (cc *CapacityCurve) getPoint(i int) curvePoint {\n\tcp := cc.points[i]\n\tif i == 0 && cc.excludeFirst {\n\t\tcp.freeCap = 0\n\t\treturn cp\n\t}\n\tfor ii := len(cc.exclude) - 1; ii >= 0; ii-- {\n\t\tei := cc.exclude[ii]\n\t\tif ei < i {\n\t\t\tbreak\n\t\t}\n\t\te1, e2 := cc.points[ei], cc.points[ei+1]\n\t\tcp.freeCap += e2.freeCap - e1.freeCap\n\t}\n\treturn cp\n}\n\n// MaxCapacity calculates the maximum capacity available for a node with a given\n// (monotonically decreasing) priority vs. capacity function. Note that if the requesting\n// node is already in the pool then it should be excluded from the curve in order to get\n// the correct result.\nfunc (cc *CapacityCurve) MaxCapacity(priority func(cap uint64) int64) uint64 {\n\tmin, max := 0, len(cc.points)-1 // the curve always has at least one point\n\tfor min < max {\n\t\tmid := (min + max) / 2\n\t\tcp := cc.getPoint(mid)\n\t\tif cp.freeCap == 0 || priority(cp.freeCap) > cp.nextPri {\n\t\t\tmin = mid + 1\n\t\t} else {\n\t\t\tmax = mid\n\t\t}\n\t}\n\tcp2 := cc.getPoint(min)\n\tif cp2.freeCap == 0 || min == 0 {\n\t\treturn cp2.freeCap\n\t}\n\tcp1 := cc.getPoint(min - 1)\n\tif priority(cp2.freeCap) > cp1.nextPri {\n\t\treturn cp2.freeCap\n\t}\n\tminc, maxc := cp1.freeCap, cp2.freeCap-1\n\tfor minc < maxc {\n\t\tmidc := (minc + maxc + 1) / 2\n\t\tif midc == 0 || priority(midc) > cp1.nextPri {\n\t\t\tminc = midc\n\t\t} else {\n\t\t\tmaxc = midc - 1\n\t\t}\n\t}\n\treturn maxc\n}\n"
  },
  {
    "path": "les/vflux/server/prioritypool_test.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage server\n\nimport (\n\t\"math/rand\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/common/mclock\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/p2p/enr\"\n\t\"github.com/ethereum/go-ethereum/p2p/nodestate\"\n)\n\nvar (\n\ttestSetup         = &nodestate.Setup{}\n\tppTestClientFlag  = testSetup.NewFlag(\"ppTestClientFlag\")\n\tppTestClientField = testSetup.NewField(\"ppTestClient\", reflect.TypeOf(&ppTestClient{}))\n\tppUpdateFlag      = testSetup.NewFlag(\"ppUpdateFlag\")\n\tppTestSetup       = NewPriorityPoolSetup(testSetup)\n)\n\nfunc init() {\n\tppTestSetup.Connect(ppTestClientField, ppUpdateFlag)\n}\n\nconst (\n\ttestCapacityStepDiv      = 100\n\ttestCapacityToleranceDiv = 10\n\ttestMinCap               = 100\n)\n\ntype ppTestClient struct {\n\tnode         *enode.Node\n\tbalance, cap uint64\n}\n\nfunc (c *ppTestClient) Priority(cap uint64) int64 {\n\treturn int64(c.balance / cap)\n}\n\nfunc (c *ppTestClient) EstimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 {\n\treturn int64(c.balance / cap)\n}\n\nfunc TestPriorityPool(t *testing.T) {\n\tclock := &mclock.Simulated{}\n\tns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup)\n\n\tns.SubscribeField(ppTestSetup.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) {\n\t\tif n := ns.GetField(node, ppTestSetup.priorityField); n != nil {\n\t\t\tc := n.(*ppTestClient)\n\t\t\tc.cap = newValue.(uint64)\n\t\t}\n\t})\n\tpp := NewPriorityPool(ns, ppTestSetup, clock, testMinCap, 0, testCapacityStepDiv)\n\tns.Start()\n\tpp.SetLimits(100, 1000000)\n\tclients := make([]*ppTestClient, 100)\n\traise := func(c *ppTestClient) {\n\t\tfor {\n\t\t\tvar ok bool\n\t\t\tns.Operation(func() {\n\t\t\t\t_, ok = pp.RequestCapacity(c.node, c.cap+c.cap/testCapacityStepDiv, 0, true)\n\t\t\t})\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\tvar sumBalance uint64\n\tcheck := func(c *ppTestClient) {\n\t\texpCap := 1000000 * c.balance / sumBalance\n\t\tcapTol := expCap / testCapacityToleranceDiv\n\t\tif c.cap < expCap-capTol || c.cap > expCap+capTol {\n\t\t\tt.Errorf(\"Wrong node capacity (expected %d, got %d)\", expCap, c.cap)\n\t\t}\n\t}\n\n\tfor i := range clients {\n\t\tc := &ppTestClient{\n\t\t\tnode:    enode.SignNull(&enr.Record{}, enode.ID{byte(i)}),\n\t\t\tbalance: 100000000000,\n\t\t\tcap:     1000,\n\t\t}\n\t\tsumBalance += c.balance\n\t\tclients[i] = c\n\t\tns.SetState(c.node, ppTestClientFlag, nodestate.Flags{}, 0)\n\t\tns.SetField(c.node, ppTestSetup.priorityField, c)\n\t\tns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0)\n\t\traise(c)\n\t\tcheck(c)\n\t}\n\n\tfor count := 0; count < 100; count++ {\n\t\tc := clients[rand.Intn(len(clients))]\n\t\toldBalance := c.balance\n\t\tc.balance = uint64(rand.Int63n(100000000000) + 100000000000)\n\t\tsumBalance += c.balance - oldBalance\n\t\tpp.ns.SetState(c.node, ppUpdateFlag, nodestate.Flags{}, 0)\n\t\tpp.ns.SetState(c.node, nodestate.Flags{}, ppUpdateFlag, 0)\n\t\tif c.balance > oldBalance {\n\t\t\traise(c)\n\t\t} else {\n\t\t\tfor _, c := range clients {\n\t\t\t\traise(c)\n\t\t\t}\n\t\t}\n\t\t// check whether capacities are proportional to balances\n\t\tfor _, c := range clients {\n\t\t\tcheck(c)\n\t\t}\n\t\tif count%10 == 0 {\n\t\t\t// test available capacity calculation with capacity curve\n\t\t\tc = clients[rand.Intn(len(clients))]\n\t\t\tcurve := pp.GetCapacityCurve().Exclude(c.node.ID())\n\n\t\t\tadd := uint64(rand.Int63n(10000000000000))\n\t\t\tc.balance += add\n\t\t\tsumBalance += add\n\t\t\texpCap := curve.MaxCapacity(func(cap uint64) int64 {\n\t\t\t\treturn int64(c.balance / cap)\n\t\t\t})\n\t\t\t//fmt.Println(expCap, c.balance, sumBalance)\n\t\t\t/*for i, cp := range curve.points {\n\t\t\t\tfmt.Println(\"cp\", i, cp, \"ex\", curve.getPoint(i))\n\t\t\t}*/\n\t\t\tvar ok bool\n\t\t\texpFail := expCap + 1\n\t\t\tif expFail < testMinCap {\n\t\t\t\texpFail = testMinCap\n\t\t\t}\n\t\t\tns.Operation(func() {\n\t\t\t\t_, ok = pp.RequestCapacity(c.node, expFail, 0, true)\n\t\t\t})\n\t\t\tif ok {\n\t\t\t\tt.Errorf(\"Request for more than expected available capacity succeeded\")\n\t\t\t}\n\t\t\tif expCap >= testMinCap {\n\t\t\t\tns.Operation(func() {\n\t\t\t\t\t_, ok = pp.RequestCapacity(c.node, expCap, 0, true)\n\t\t\t\t})\n\t\t\t\tif !ok {\n\t\t\t\t\tt.Errorf(\"Request for expected available capacity failed\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.balance -= add\n\t\t\tsumBalance -= add\n\t\t\tpp.ns.SetState(c.node, ppUpdateFlag, nodestate.Flags{}, 0)\n\t\t\tpp.ns.SetState(c.node, nodestate.Flags{}, ppUpdateFlag, 0)\n\t\t\tfor _, c := range clients {\n\t\t\t\traise(c)\n\t\t\t}\n\t\t}\n\t}\n\n\tns.Stop()\n}\n\nfunc TestCapacityCurve(t *testing.T) {\n\tclock := &mclock.Simulated{}\n\tns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup)\n\tpp := NewPriorityPool(ns, ppTestSetup, clock, 400000, 0, 2)\n\tns.Start()\n\tpp.SetLimits(10, 10000000)\n\tclients := make([]*ppTestClient, 10)\n\n\tfor i := range clients {\n\t\tc := &ppTestClient{\n\t\t\tnode:    enode.SignNull(&enr.Record{}, enode.ID{byte(i)}),\n\t\t\tbalance: 100000000000 * uint64(i+1),\n\t\t\tcap:     1000000,\n\t\t}\n\t\tclients[i] = c\n\t\tns.SetState(c.node, ppTestClientFlag, nodestate.Flags{}, 0)\n\t\tns.SetField(c.node, ppTestSetup.priorityField, c)\n\t\tns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0)\n\t\tns.Operation(func() {\n\t\t\tpp.RequestCapacity(c.node, c.cap, 0, true)\n\t\t})\n\t}\n\n\tcurve := pp.GetCapacityCurve()\n\tcheck := func(balance, expCap uint64) {\n\t\tcap := curve.MaxCapacity(func(cap uint64) int64 {\n\t\t\treturn int64(balance / cap)\n\t\t})\n\t\tvar fail bool\n\t\tif cap == 0 || expCap == 0 {\n\t\t\tfail = cap != expCap\n\t\t} else {\n\t\t\tpri := balance / cap\n\t\t\texpPri := balance / expCap\n\t\t\tfail = pri != expPri && pri != expPri+1\n\t\t}\n\t\tif fail {\n\t\t\tt.Errorf(\"Incorrect capacity for %d balance (got %d, expected %d)\", balance, cap, expCap)\n\t\t}\n\t}\n\n\tcheck(0, 0)\n\tcheck(10000000000, 100000)\n\tcheck(50000000000, 500000)\n\tcheck(100000000000, 1000000)\n\tcheck(200000000000, 1000000)\n\tcheck(300000000000, 1500000)\n\tcheck(450000000000, 1500000)\n\tcheck(600000000000, 2000000)\n\tcheck(800000000000, 2000000)\n\tcheck(1000000000000, 2500000)\n\n\tpp.SetLimits(11, 10000000)\n\tcurve = pp.GetCapacityCurve()\n\n\tcheck(0, 0)\n\tcheck(10000000000, 100000)\n\tcheck(50000000000, 500000)\n\tcheck(150000000000, 750000)\n\tcheck(200000000000, 1000000)\n\tcheck(220000000000, 1100000)\n\tcheck(275000000000, 1100000)\n\tcheck(375000000000, 1500000)\n\tcheck(450000000000, 1500000)\n\tcheck(600000000000, 2000000)\n\tcheck(800000000000, 2000000)\n\tcheck(1000000000000, 2500000)\n\n\tns.Stop()\n}\n"
  },
  {
    "path": "les/vflux/server/service.go",
    "content": "// Copyright 2020 The go-ethereum Authors\n// This file is part of the go-ethereum library.\n//\n// The go-ethereum library is free software: you can redistribute it and/or modify\n// it under the terms of the GNU Lesser General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// The go-ethereum library is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n// GNU Lesser General Public License for more details.\n//\n// You should have received a copy of the GNU Lesser General Public License\n// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.\n\npackage server\n\nimport (\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/ethereum/go-ethereum/les/utils\"\n\t\"github.com/ethereum/go-ethereum/les/vflux\"\n\t\"github.com/ethereum/go-ethereum/log\"\n\t\"github.com/ethereum/go-ethereum/p2p/enode\"\n\t\"github.com/ethereum/go-ethereum/rlp\"\n)\n\ntype (\n\t// Server serves vflux requests\n\tServer struct {\n\t\tlimiter         *utils.Limiter\n\t\tlock            sync.Mutex\n\t\tservices        map[string]*serviceEntry\n\t\tdelayPerRequest time.Duration\n\t}\n\n\t// Service is a service registered at the Server and identified by a string id\n\tService interface {\n\t\tServiceInfo() (id, desc string)                                      // only called during registration\n\t\tHandle(id enode.ID, address string, name string, data []byte) []byte // never called concurrently\n\t}\n\n\tserviceEntry struct {\n\t\tid, desc string\n\t\tbackend  Service\n\t}\n)\n\n// NewServer creates a new Server\nfunc NewServer(delayPerRequest time.Duration) *Server {\n\treturn &Server{\n\t\tlimiter:         utils.NewLimiter(1000),\n\t\tdelayPerRequest: delayPerRequest,\n\t\tservices:        make(map[string]*serviceEntry),\n\t}\n}\n\n// Register registers a Service\nfunc (s *Server) Register(b Service) {\n\tsrv := &serviceEntry{backend: b}\n\tsrv.id, srv.desc = b.ServiceInfo()\n\tif strings.Contains(srv.id, \":\") {\n\t\t// srv.id + \":\" will be used as a service database prefix\n\t\tlog.Error(\"Service ID contains ':'\", \"id\", srv.id)\n\t\treturn\n\t}\n\ts.lock.Lock()\n\ts.services[srv.id] = srv\n\ts.lock.Unlock()\n}\n\n// Serve serves a vflux request batch\n// Note: requests are served by the Handle functions of the registered services. Serve\n// may be called concurrently but the Handle functions are called sequentially and\n// therefore thread safety is guaranteed.\nfunc (s *Server) Serve(id enode.ID, address string, requests vflux.Requests) vflux.Replies {\n\treqLen := uint(len(requests))\n\tif reqLen == 0 || reqLen > vflux.MaxRequestLength {\n\t\treturn nil\n\t}\n\t// Note: the value parameter will be supplied by the token sale module (total amount paid)\n\tch := <-s.limiter.Add(id, address, 0, reqLen)\n\tif ch == nil {\n\t\treturn nil\n\t}\n\t// Note: the limiter ensures that the following section is not running concurrently,\n\t// the lock only protects against contention caused by new service registration\n\ts.lock.Lock()\n\tresults := make(vflux.Replies, len(requests))\n\tfor i, req := range requests {\n\t\tif service := s.services[req.Service]; service != nil {\n\t\t\tresults[i] = service.backend.Handle(id, address, req.Name, req.Params)\n\t\t}\n\t}\n\ts.lock.Unlock()\n\ttime.Sleep(s.delayPerRequest * time.Duration(reqLen))\n\tclose(ch)\n\treturn results\n}\n\n// ServeEncoded serves an encoded vflux request batch and returns the encoded replies\nfunc (s *Server) ServeEncoded(id enode.ID, addr *net.UDPAddr, req []byte) []byte {\n\tvar requests vflux.Requests\n\tif err := rlp.DecodeBytes(req, &requests); err != nil {\n\t\treturn nil\n\t}\n\tresults := s.Serve(id, addr.String(), requests)\n\tif results == nil {\n\t\treturn nil\n\t}\n\tres, _ := rlp.EncodeToBytes(&results)\n\treturn res\n}\n\n// Stop shuts down the server\nfunc (s *Server) Stop() {\n\ts.limiter.Stop()\n}\n"
  },
  {
    "path": "liblangutil/CMakeLists.txt",
    "content": "# Solidity Commons Library (Solidity related sharing bits between libsolidity and libyul)\nset(sources\n\tCommon.h\n\tCharStream.cpp\n\tCharStream.h\n\tErrorReporter.cpp\n\tErrorReporter.h\n\tEVMVersion.h\n\tEVMVersion.cpp\n\tExceptions.cpp\n\tExceptions.h\n\tParserBase.cpp\n\tParserBase.h\n\tScanner.cpp\n\tScanner.h\n\tSemVerHandler.cpp\n\tSemVerHandler.h\n\tSourceLocation.h\n\tSourceLocation.cpp\n\tSourceReferenceExtractor.cpp\n\tSourceReferenceExtractor.h\n\tSourceReferenceFormatter.cpp\n\tSourceReferenceFormatter.h\n\tToken.cpp\n\tToken.h\n\tUndefMacros.h\n)\n\nadd_library(langutil ${sources})\ntarget_link_libraries(langutil PUBLIC solutil)\n"
  },
  {
    "path": "liblangutil/CharStream.cpp",
    "content": "/*\n * This file is part of solidity.\n *\n * solidity is free software: you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * solidity is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with solidity.  If not, see <http://www.gnu.org/licenses/>.\n *\n * This file is derived from the file \"scanner.cc\", which was part of the\n * V8 project. The original copyright header follows:\n *\n * Copyright 2006-2012, the V8 project authors. All rights reserved.\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n * * Redistributions of source code must retain the above copyright\n *   notice, this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above\n *   copyright notice, this list of conditions and the following\n *   disclaimer in the documentation and/or other materials provided\n *   with the distribution.\n * * Neither the name of Google Inc. nor the names of its\n *   contributors may be used to endorse or promote products derived\n *   from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*/\n/**\n * @author Christian <c@ethdev.com>\n * @date 2014\n * Solidity scanner.\n */\n\n#include <liblangutil/CharStream.h>\n#include <liblangutil/Exceptions.h>\n\nusing namespace std;\nusing namespace solidity;\nusing namespace solidity::langutil;\n\nchar CharStream::advanceAndGet(size_t _chars)\n{\n\tif (isPastEndOfInput())\n\t\treturn 0;\n\tm_position += _chars;\n\tif (isPastEndOfInput())\n\t\treturn 0;\n\treturn m_source[m_position];\n}\n\nchar CharStream::rollback(size_t _amount)\n{\n\tsolAssert(m_position >= _amount, \"\");\n\tm_position -= _amount;\n\treturn get();\n}\n\nchar CharStream::setPosition(size_t _location)\n{\n\tsolAssert(_location <= m_source.size(), \"Attempting to set position past end of source.\");\n\tm_position = _location;\n\treturn get();\n}\n\nstring CharStream::lineAtPosition(int _position) const\n{\n\t// if _position points to \\n, it returns the line before the \\n\n\tusing size_type = string::size_type;\n\tsize_type searchStart = min<size_type>(m_source.size(), size_type(_position));\n\tif (searchStart > 0)\n\t\tsearchStart--;\n\tsize_type lineStart = m_source.rfind('\\n', searchStart);\n\tif (lineStart == string::npos)\n\t\tlineStart = 0;\n\telse\n\t\tlineStart++;\n\tstring line = m_source.substr(\n\t\tlineStart,\n\t\tmin(m_source.find('\\n', lineStart), m_source.size()) - lineStart\n\t);\n\tif (!line.empty() && line.back() == '\\r')\n\t\tline.pop_back();\n\treturn line;\n}\n\ntuple<int, int> CharStream::translatePositionToLineColumn(int _position) const\n{\n\tusing size_type = string::size_type;\n\tusing diff_type = string::difference_type;\n\tsize_type searchPosition = min<size_type>(m_source.size(), size_type(_position));\n\tint lineNumber = static_cast<int>(count(m_source.begin(), m_source.begin() + diff_type(searchPosition), '\\n'));\n\tsize_type lineStart;\n\tif (searchPosition == 0)\n\t\tlineStart = 0;\n\telse\n\t{\n\t\tlineStart = m_source.rfind('\\n', searchPosition - 1);\n\t\tlineStart = lineStart == string::npos ? 0 : lineStart + 1;\n\t}\n\treturn tuple<int, int>(lineNumber, searchPosition - lineStart);\n}\n"
  },
  {
    "path": "liblangutil/EVMVersion.cpp",
    "content": "/*\n\tThis file is part of solidity.\n\n\tsolidity is free software: you can redistribute it and/or modify\n\tit under the terms of the GNU General Public License as published by\n\tthe Free Software Foundation, either version 3 of the License, or\n\t(at your option) any later version.\n\n\tsolidity is distributed in the hope that it will be useful,\n\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n\tGNU General Public License for more details.\n\n\tYou should have received a copy of the GNU General Public License\n\talong with solidity.  If not, see <http://www.gnu.org/licenses/>.\n*/\n// SPDX-License-Identifier: GPL-3.0\n/**\n * EVM versioning.\n */\n\n#include <liblangutil/EVMVersion.h>\n\nusing namespace solidity;\nusing namespace solidity::evmasm;\nusing namespace solidity::langutil;\n\nbool EVMVersion::hasOpcode(Instruction _opcode) const\n{\n\tswitch (_opcode)\n\t{\n\tcase Instruction::RETURNDATACOPY:\n\tcase Instruction::RETURNDATASIZE:\n\t\treturn supportsReturndata();\n\tcase Instruction::STATICCALL:\n\t\treturn hasStaticCall();\n\tcase Instruction::SHL:\n\tcase Instruction::SHR:\n\tcase Instruction::SAR:\n\t\treturn hasBitwiseShifting();\n\tcase Instruction::CREATE2:\n\t\treturn hasCreate2();\n\tcase Instruction::EXTCODEHASH:\n\t\treturn hasExtCodeHash();\n\tcase Instruction::CHAINID:\n\t\treturn hasChainID();\n\tcase Instruction::SELFBALANCE:\n\t\treturn hasSelfBalance();\n\tdefault:\n\t\treturn true;\n\t}\n}\n\n"
  },
  {
    "path": "liblangutil/ErrorReporter.cpp",
    "content": "/*\n\tThis file is part of solidity.\n\n\tsolidity is free software: you can redistribute it and/or modify\n\tit under the terms of the GNU General Public License as published by\n\tthe Free Software Foundation, either version 3 of the License, or\n\t(at your option) any later version.\n\n\tsolidity is distributed in the hope that it will be useful,\n\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n\tGNU General Public License for more details.\n\n\tYou should have received a copy of the GNU General Public License\n\talong with solidity.  If not, see <http://www.gnu.org/licenses/>.\n*/\n// SPDX-License-Identifier: GPL-3.0\n/**\n * @author Rhett <roadriverrail@gmail.com>\n * @date 2017\n * Error helper class.\n */\n\n#include <liblangutil/ErrorReporter.h>\n#include <liblangutil/SourceLocation.h>\n#include <cstdlib>\n#include <memory>\n\nusing namespace std;\nusing namespace solidity;\nusing namespace solidity::langutil;\n\nErrorReporter& ErrorReporter::operator=(ErrorReporter const& _errorReporter)\n{\n\tif (&_errorReporter == this)\n\t\treturn *this;\n\tm_errorList = _errorReporter.m_errorList;\n\treturn *this;\n}\n\nvoid ErrorReporter::warning(ErrorId _error, string const& _description)\n{\n\terror(_error, Error::Type::Warning, SourceLocation(), _description);\n}\n\nvoid ErrorReporter::warning(\n\tErrorId _error,\n\tSourceLocation const& _location,\n\tstring const& _description\n)\n{\n\terror(_error, Error::Type::Warning, _location, _description);\n}\n\nvoid ErrorReporter::warning(\n\tErrorId _error,\n\tSourceLocation const& _location,\n\tstring const& _description,\n\tSecondarySourceLocation const& _secondaryLocation\n)\n{\n\terror(_error, Error::Type::Warning, _location, _secondaryLocation, _description);\n}\n\nvoid ErrorReporter::error(ErrorId _errorId, Error::Type _type, SourceLocation const& _location, string const& _description)\n{\n\tif (checkForExcessiveErrors(_type))\n\t\treturn;\n\n\tm_errorList.push_back(make_shared<Error>(_errorId, _type, _description, _location));\n}\n\nvoid ErrorReporter::error(ErrorId _errorId, Error::Type _type, SourceLocation const& _location, SecondarySourceLocation const& _secondaryLocation, string const& _description)\n{\n\tif (checkForExcessiveErrors(_type))\n\t\treturn;\n\n\tm_errorList.push_back(make_shared<Error>(_errorId, _type, _description, _location, _secondaryLocation));\n}\n\nbool ErrorReporter::hasExcessiveErrors() const\n{\n\treturn m_errorCount > c_maxErrorsAllowed;\n}\n\nbool ErrorReporter::checkForExcessiveErrors(Error::Type _type)\n{\n\tif (_type == Error::Type::Warning)\n\t{\n\t\tm_warningCount++;\n\n\t\tif (m_warningCount == c_maxWarningsAllowed)\n\t\t\tm_errorList.push_back(make_shared<Error>(4591_error, Error::Type::Warning, \"There are more than 256 warnings. Ignoring the rest.\"));\n\n\t\tif (m_warningCount >= c_maxWarningsAllowed)\n\t\t\treturn true;\n\t}\n\telse\n\t{\n\t\tm_errorCount++;\n\n\t\tif (m_errorCount > c_maxErrorsAllowed)\n\t\t{\n\t\t\tm_errorList.push_back(make_shared<Error>(4013_error, Error::Type::Warning, \"There are more than 256 errors. Aborting.\"));\n\t\t\tBOOST_THROW_EXCEPTION(FatalError());\n\t\t}\n\t}\n\n\treturn false;\n}\n\nvoid ErrorReporter::fatalError(ErrorId _error, Error::Type _type, SourceLocation const& _location, SecondarySourceLocation const& _secondaryLocation, string const& _description)\n{\n\terror(_error, _type, _location, _secondaryLocation, _description);\n\tBOOST_THROW_EXCEPTION(FatalError());\n}\n\nvoid ErrorReporter::fatalError(ErrorId _error, Error::Type _type, SourceLocation const& _location, string const& _description)\n{\n\terror(_error, _type, _location, _description);\n\tBOOST_THROW_EXCEPTION(FatalError());\n}\n\nErrorList const& ErrorReporter::errors() const\n{\n\treturn m_errorList;\n}\n\nvoid ErrorReporter::clear()\n{\n\tm_errorList.clear();\n}\n\nvoid ErrorReporter::declarationError(ErrorId _error, SourceLocation const& _location, SecondarySourceLocation const& _secondaryLocation, string const& _description)\n{\n\terror(\n\t\t_error,\n\t\tError::Type::DeclarationError,\n\t\t_location,\n\t\t_secondaryLocation,\n\t\t_description\n\t);\n}\n\nvoid ErrorReporter::declarationError(ErrorId _error, SourceLocation const& _location, string const& _description)\n{\n\terror(\n\t\t_error,\n\t\tError::Type::DeclarationError,\n\t\t_location,\n\t\t_description\n\t);\n}\n\nvoid ErrorReporter::fatalDeclarationError(ErrorId _error, SourceLocation const& _location, std::string const& _description)\n{\n\tfatalError(\n\t\t_error,\n\t\tError::Type::DeclarationError,\n\t\t_location,\n\t\t_description);\n}\n\nvoid ErrorReporter::parserError(ErrorId _error, SourceLocation const& _location, string const& _description)\n{\n\terror(\n\t\t_error,\n\t\tError::Type::ParserError,\n\t\t_location,\n\t\t_description\n\t);\n}\n\nvoid ErrorReporter::fatalParserError(ErrorId _error, SourceLocation const& _location, string const& _description)\n{\n\tfatalError(\n\t\t_error,\n\t\tError::Type::ParserError,\n\t\t_location,\n\t\t_description\n\t);\n}\n\nvoid ErrorReporter::syntaxError(ErrorId _error, SourceLocation const& _location, string const& _description)\n{\n\terror(\n\t\t_error,\n\t\tError::Type::SyntaxError,\n\t\t_location,\n\t\t_description\n\t);\n}\n\nvoid ErrorReporter::typeError(ErrorId _error, SourceLocation const& _location, SecondarySourceLocation const& _secondaryLocation, string const& _description)\n{\n\terror(\n\t\t_error,\n\t\tError::Type::TypeError,\n\t\t_location,\n\t\t_secondaryLocation,\n\t\t_description\n\t);\n}\n\nvoid ErrorReporter::typeError(ErrorId _error, SourceLocation const& _location, string const& _description)\n{\n\terror(\n\t\t_error,\n\t\tError::Type::TypeError,\n\t\t_location,\n\t\t_description\n\t);\n}\n\n\nvoid ErrorReporter::fatalTypeError(ErrorId _error, SourceLocation const& _location, SecondarySourceLocation const& _secondaryLocation, string const& _description)\n{\n\tfatalError(\n\t\t_error,\n\t\tError::Type::TypeError,\n\t\t_location,\n\t\t_secondaryLocation,\n\t\t_description\n\t);\n}\n\nvoid ErrorReporter::fatalTypeError(ErrorId _error, SourceLocation const& _location, string const& _description)\n{\n\tfatalError(\n\t\t_error,\n\t\tError::Type::TypeError,\n\t\t_location,\n\t\t_description\n\t);\n}\n\nvoid ErrorReporter::docstringParsingError(ErrorId _error, SourceLocation const& _location, string const& _description)\n{\n\terror(\n\t\t_error,\n\t\tError::Type::DocstringParsingError,\n\t\t_location,\n\t\t_description\n\t);\n}\n"
  },
  {
    "path": "liblangutil/Exceptions.cpp",
    "content": "/*\n\tThis file is part of solidity.\n\n\tsolidity is free software: you can redistribute it and/or modify\n\tit under the terms of the GNU General Public License as published by\n\tthe Free Software Foundation, either version 3 of the License, or\n\t(at your option) any later version.\n\n\tsolidity is distributed in the hope that it will be useful,\n\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n\tGNU General Public License for more details.\n\n\tYou should have received a copy of the GNU General Public License\n\talong with solidity.  If not, see <http://www.gnu.org/licenses/>.\n*/\n// SPDX-License-Identifier: GPL-3.0\n/**\n * @author Liana <liana@ethdev.com>\n * @date 2015\n * Solidity exception hierarchy.\n */\n\n#include <liblangutil/Exceptions.h>\n\nusing namespace std;\nusing namespace solidity;\nusing namespace solidity::langutil;\n\nError::Error(\n\tErrorId _errorId, Error::Type _type,\n\tstd::string const& _description,\n\tSourceLocation const& _location,\n\tSecondarySourceLocation const& _secondaryLocation\n):\n\tm_errorId(_errorId),\n\tm_type(_type)\n{\n\tswitch (m_type)\n\t{\n\tcase Type::CodeGenerationError:\n\t\tm_typeName = \"CodeGenerationError\";\n\t\tbreak;\n\tcase Type::DeclarationError:\n\t\tm_typeName = \"DeclarationError\";\n\t\tbreak;\n\tcase Type::DocstringParsingError:\n\t\tm_typeName = \"DocstringParsingError\";\n\t\tbreak;\n\tcase Type::ParserError:\n\t\tm_typeName = \"ParserError\";\n\t\tbreak;\n\tcase Type::SyntaxError:\n\t\tm_typeName = \"SyntaxError\";\n\t\tbreak;\n\tcase Type::TypeError:\n\t\tm_typeName = \"TypeError\";\n\t\tbreak;\n\tcase Type::Warning:\n\t\tm_typeName = \"Warning\";\n\t\tbreak;\n\t}\n\n\tif (_location.isValid())\n\t\t*this << errinfo_sourceLocation(_location);\n\tif (!_secondaryLocation.infos.empty())\n\t\t*this << errinfo_secondarySourceLocation(_secondaryLocation);\n\tif (!_description.empty())\n\t\t*this << util::errinfo_comment(_description);\n}\n"
  },
  {
    "path": "liblangutil/ParserBase.cpp",
    "content": "/*\n\tThis file is part of solidity.\n\n\tsolidity is free software: you can redistribute it and/or modify\n\tit under the terms of the GNU General Public License as published by\n\tthe Free Software Foundation, either version 3 of the License, or\n\t(at your option) any later version.\n\n\tsolidity is distributed in the hope that it will be useful,\n\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n\tGNU General Public License for more details.\n\n\tYou should have received a copy of the GNU General Public License\n\talong with solidity.  If not, see <http://www.gnu.org/licenses/>.\n*/\n// SPDX-License-Identifier: GPL-3.0\n/**\n * @author Christian <c@ethdev.com>\n * @date 2016\n * Solidity parser shared functionality.\n */\n\n#include <liblangutil/ParserBase.h>\n#include <liblangutil/Scanner.h>\n#include <liblangutil/ErrorReporter.h>\n\nusing namespace std;\nusing namespace solidity;\nusing namespace solidity::langutil;\n\nSourceLocation ParserBase::currentLocation() const\n{\n\treturn m_scanner->currentLocation();\n}\n\nToken ParserBase::currentToken() const\n{\n\treturn m_scanner->currentToken();\n}\n\nToken ParserBase::peekNextToken() const\n{\n\treturn m_scanner->peekNextToken();\n}\n\nstring ParserBase::currentLiteral() const\n{\n\treturn m_scanner->currentLiteral();\n}\n\nToken ParserBase::advance()\n{\n\treturn m_scanner->next();\n}\n\nstring ParserBase::tokenName(Token _token)\n{\n\tif (_token == Token::Identifier)\n\t\treturn \"identifier\";\n\telse if (_token == Token::EOS)\n\t\treturn \"end of source\";\n\telse if (TokenTraits::isReservedKeyword(_token))\n\t\treturn \"reserved keyword '\" + TokenTraits::friendlyName(_token) + \"'\";\n\telse if (TokenTraits::isElementaryTypeName(_token)) //for the sake of accuracy in reporting\n\t{\n\t\tElementaryTypeNameToken elemTypeName = m_scanner->currentElementaryTypeNameToken();\n\t\treturn \"'\" + elemTypeName.toString() + \"'\";\n\t}\n\telse\n\t\treturn \"'\" + TokenTraits::friendlyName(_token) + \"'\";\n}\n\nvoid ParserBase::expectToken(Token _value, bool _advance)\n{\n\tToken tok = m_scanner->currentToken();\n\tif (tok != _value)\n\t{\n\t\tstring const expectedToken = ParserBase::tokenName(_value);\n\t\tif (m_parserErrorRecovery)\n\t\t\tparserError(6635_error, \"Expected \" + expectedToken + \" but got \" + tokenName(tok));\n\t\telse\n\t\t\tfatalParserError(2314_error, \"Expected \" + expectedToken + \" but got \" + tokenName(tok));\n\t\t// Do not advance so that recovery can sync or make use of the current token.\n\t\t// This is especially useful if the expected token\n\t\t// is the only one that is missing and is at the end of a construct.\n\t\t// \"{ ... ; }\" is such an example.\n\t\t//        ^\n\t\t_advance = false;\n\t}\n\tif (_advance)\n\t\tm_scanner->next();\n}\n\nvoid ParserBase::expectTokenOrConsumeUntil(Token _value, string const& _currentNodeName, bool _advance)\n{\n\tsolAssert(m_inParserRecovery, \"The function is supposed to be called during parser recovery only.\");\n\n\tToken tok = m_scanner->currentToken();\n\tif (tok != _value)\n\t{\n\t\tSourceLocation errorLoc = currentLocation();\n\t\tint startPosition = errorLoc.start;\n\t\twhile (m_scanner->currentToken() != _value && m_scanner->currentToken() != Token::EOS)\n\t\t\tm_scanner->next();\n\n\t\tstring const expectedToken = ParserBase::tokenName(_value);\n\t\tif (m_scanner->currentToken() == Token::EOS)\n\t\t{\n\t\t\t// rollback to where the token started, and raise exception to be caught at a higher level.\n\t\t\tm_scanner->setPosition(static_cast<size_t>(startPosition));\n\t\t\tstring const msg = \"In \" + _currentNodeName + \", \" + expectedToken + \"is expected; got \" + ParserBase::tokenName(tok) + \" instead.\";\n\t\t\tfatalParserError(1957_error, errorLoc, msg);\n\t\t}\n\t\telse\n\t\t{\n\t\t\tparserWarning(3796_error, \"Recovered in \" + _currentNodeName + \" at \" + expectedToken + \".\");\n\t\t\tm_inParserRecovery = false;\n\t\t}\n\t}\n\telse\n\t{\n\t\tstring expectedToken = ParserBase::tokenName(_value);\n\t\tparserWarning(3347_error, \"Recovered in \" + _currentNodeName + \" at \" + expectedToken + \".\");\n\t\tm_inParserRecovery = false;\n\t}\n\n\tif (_advance)\n\t\tm_scanner->next();\n}\n\nvoid ParserBase::increaseRecursionDepth()\n{\n\tm_recursionDepth++;\n\tif (m_recursionDepth >= 1200)\n\t\tfatalParserError(7319_error, \"Maximum recursion depth reached during parsing.\");\n}\n\nvoid ParserBase::decreaseRecursionDepth()\n{\n\tsolAssert(m_recursionDepth > 0, \"\");\n\tm_recursionDepth--;\n}\n\nvoid ParserBase::parserWarning(ErrorId _error, string const& _description)\n{\n\tm_errorReporter.warning(_error, currentLocation(), _description);\n}\n\nvoid ParserBase::parserWarning(ErrorId _error, SourceLocation const& _location, string const& _description)\n{\n\tm_errorReporter.warning(_error, _location, _description);\n}\n\nvoid ParserBase::parserError(ErrorId _error, SourceLocation const& _location, string const& _description)\n{\n\tm_errorReporter.parserError(_error, _location, _description);\n}\n\nvoid ParserBase::parserError(ErrorId _error, string const& _description)\n{\n\tparserError(_error, currentLocation(), _description);\n}\n\nvoid ParserBase::fatalParserError(ErrorId _error, string const& _description)\n{\n\tfatalParserError(_error, currentLocation(), _description);\n}\n\nvoid ParserBase::fatalParserError(ErrorId _error, SourceLocation const& _location, string const& _description)\n{\n\tm_errorReporter.fatalParserError(_error, _location, _description);\n}\n"
  },
  {
    "path": "liblangutil/Scanner.cpp",
    "content": "/*\n * This file is part of solidity.\n *\n * solidity is free software: you can redistribute it and/or modify\n * it under the terms of the GNU General Public License as published by\n * the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * solidity is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License\n * along with solidity.  If not, see <http://www.gnu.org/licenses/>.\n *\n * This file is derived from the file \"scanner.cc\", which was part of the\n * V8 project. The original copyright header follows:\n *\n * Copyright 2006-2012, the V8 project authors. All rights reserved.\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are\n * met:\n *\n * * Redistributions of source code must retain the above copyright\n *   notice, this list of conditions and the following disclaimer.\n * * Redistributions in binary form must reproduce the above\n *   copyright notice, this list of conditions and the following\n *   disclaimer in the documentation and/or other materials provided\n *   with the distribution.\n * * Neither the name of Google Inc. nor the names of its\n *   contributors may be used to endorse or promote products derived\n *   from this software without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*/\n/**\n * @author Christian <c@ethdev.com>\n * @date 2014\n * Solidity scanner.\n */\n\n#include <liblangutil/Common.h>\n#include <liblangutil/Exceptions.h>\n#include <liblangutil/Scanner.h>\n\n#include <boost/algorithm/string/classification.hpp>\n\n#include <optional>\n#include <string_view>\n#include <tuple>\n\nusing namespace std;\n\nnamespace solidity::langutil {\n\nstring to_string(ScannerError _errorCode)\n{\n\tswitch (_errorCode)\n\t{\n\t\tcase ScannerError::NoError: return \"No error.\";\n\t\tcase ScannerError::IllegalToken: return \"Invalid token.\";\n\t\tcase ScannerError::IllegalHexString: return \"Expected even number of hex-nibbles.\";\n\t\tcase ScannerError::IllegalHexDigit: return \"Hexadecimal digit missing or invalid.\";\n\t\tcase ScannerError::IllegalCommentTerminator: return \"Expected multi-line comment-terminator.\";\n\t\tcase ScannerError::IllegalEscapeSequence: return \"Invalid escape sequence.\";\n\t\tcase ScannerError::IllegalCharacterInString: return \"Invalid character in string.\";\n\t\tcase ScannerError::IllegalStringEndQuote: return \"Expected string end-quote.\";\n\t\tcase ScannerError::IllegalNumberSeparator: return \"Invalid use of number separator '_'.\";\n\t\tcase ScannerError::IllegalExponent: return \"Invalid exponent.\";\n\t\tcase ScannerError::IllegalNumberEnd: return \"Identifier-start is not allowed at end of a number.\";\n\t\tcase ScannerError::OctalNotAllowed: return \"Octal numbers not allowed.\";\n\t\tcase ScannerError::DirectionalOverrideUnderflow: return \"Unicode direction override underflow in comment or string literal.\";\n\t\tcase ScannerError::DirectionalOverrideMismatch: return \"Mismatching directional override markers in comment or string literal.\";\n\t\tdefault:\n\t\t\tsolAssert(false, \"Unhandled case in to_string(ScannerError)\");\n\t\t\treturn \"\";\n\t}\n}\n\n\nostream& operator<<(ostream& os, ScannerError _errorCode)\n{\n\treturn os << to_string(_errorCode);\n}\n\n/// Scoped helper for literal recording. Automatically drops the literal\n/// if aborting the scanning before it's complete.\nenum LiteralType\n{\n\tLITERAL_TYPE_STRING,\n\tLITERAL_TYPE_NUMBER, // not really different from string type in behaviour\n\tLITERAL_TYPE_COMMENT\n};\n\nclass LiteralScope\n{\npublic:\n\texplicit LiteralScope(Scanner* _self, enum LiteralType _type):\n\t\tm_type(_type),\n\t\tm_scanner(_self),\n\t\tm_complete(false)\n\t{\n\t\tif (_type == LITERAL_TYPE_COMMENT)\n\t\t\tm_scanner->m_skippedComments[Scanner::NextNext].literal.clear();\n\t\telse\n\t\t\tm_scanner->m_tokens[Scanner::NextNext].literal.clear();\n\t}\n\t~LiteralScope()\n\t{\n\t\tif (!m_complete)\n\t\t{\n\t\t\tif (m_type == LITERAL_TYPE_COMMENT)\n\t\t\t\tm_scanner->m_skippedComments[Scanner::NextNext].literal.clear();\n\t\t\telse\n\t\t\t\tm_scanner->m_tokens[Scanner::NextNext].literal.clear();\n\t\t}\n\t}\n\tvoid complete() { m_complete = true; }\n\nprivate:\n\tenum LiteralType m_type;\n\tScanner* m_scanner;\n\tbool m_complete;\n};\n\nvoid Scanner::reset(CharStream _source)\n{\n\tm_source = make_shared<CharStream>(std::move(_source));\n\treset();\n}\n\nvoid Scanner::reset(shared_ptr<CharStream> _source)\n{\n\tsolAssert(_source.get() != nullptr, \"You MUST provide a CharStream when resetting.\");\n\tm_source = std::move(_source);\n\treset();\n}\n\nvoid Scanner::reset()\n{\n\tm_source->reset();\n\tm_kind = ScannerKind::Solidity;\n\tm_char = m_source->get();\n\tskipWhitespace();\n\tnext();\n\tnext();\n\tnext();\n}\n\nvoid Scanner::setPosition(size_t _offset)\n{\n\tm_char = m_source->setPosition(_offset);\n\tscanToken();\n\tnext();\n\tnext();\n}\n\nbool Scanner::scanHexByte(char& o_scannedByte)\n{\n\tchar x = 0;\n\tfor (size_t i = 0; i < 2; i++)\n\t{\n\t\tint d = hexValue(m_char);\n\t\tif (d < 0)\n\t\t{\n\t\t\trollback(i);\n\t\t\treturn false;\n\t\t}\n\t\tx = static_cast<char>(x * 16 + d);\n\t\tadvance();\n\t}\n\to_scannedByte = x;\n\treturn true;\n}\n\nstd::optional<unsigned> Scanner::scanUnicode()\n{\n\tunsigned x = 0;\n\tfor (size_t i = 0; i < 4; i++)\n\t{\n\t\tint d = hexValue(m_char);\n\t\tif (d < 0)\n\t\t{\n\t\t\trollback(i);\n\t\t\treturn {};\n\t\t}\n\t\tx = x * 16 + static_cast<unsigned>(d);\n\t\tadvance();\n\t}\n\treturn x;\n}\n\n// This supports codepoints between 0000 and FFFF.\nvoid Scanner::addUnicodeAsUTF8(unsigned codepoint)\n{\n\tif (codepoint <= 0x7f)\n\t\taddLiteralChar(char(codepoint));\n\telse if (codepoint <= 0x7ff)\n\t{\n\t\taddLiteralChar(char(0xc0u | (codepoint >> 6u)));\n\t\taddLiteralChar(char(0x80u | (codepoint & 0x3fu)));\n\t}\n\telse\n\t{\n\t\taddLiteralChar(char(0xe0u | (codepoint >> 12u)));\n\t\taddLiteralChar(char(0x80u | ((codepoint >> 6u) & 0x3fu)));\n\t\taddLiteralChar(char(0x80u | (codepoint & 0x3fu)));\n\t}\n}\n\nvoid Scanner::rescan()\n{\n\tsize_t rollbackTo = 0;\n\tif (m_skippedComments[Current].literal.empty())\n\t\trollbackTo = static_cast<size_t>(m_tokens[Current].location.start);\n\telse\n\t\trollbackTo = static_cast<size_t>(m_skippedComments[Current].location.start);\n\tm_char = m_source->rollback(m_source->position() - rollbackTo);\n\tnext();\n\tnext();\n\tnext();\n}\n\n// Ensure that tokens can be stored in a byte.\nBOOST_STATIC_ASSERT(TokenTraits::count() <= 0x100);\n\nToken Scanner::next()\n{\n\tm_tokens[Current] = std::move(m_tokens[Next]);\n\tm_tokens[Next] = std::move(m_tokens[NextNext]);\n\tm_skippedComments[Current] = std::move(m_skippedComments[Next]);\n\tm_skippedComments[Next] = std::move(m_skippedComments[NextNext]);\n\n\tscanToken();\n\n\treturn m_tokens[Current].token;\n}\n\nToken Scanner::selectToken(char _next, Token _then, Token _else)\n{\n\tadvance();\n\tif (m_char == _next)\n\t\treturn selectToken(_then);\n\telse\n\t\treturn _else;\n}\n\nbool Scanner::skipWhitespace()\n{\n\tsize_t const startPosition = sourcePos();\n\twhile (isWhiteSpace(m_char))\n\t\tadvance();\n\t// Return whether or not we skipped any characters.\n\treturn sourcePos() != startPosition;\n}\n\nbool Scanner::skipWhitespaceExceptUnicodeLinebreak()\n{\n\tsize_t const startPosition = sourcePos();\n\twhile (isWhiteSpace(m_char) && !isUnicodeLinebreak())\n\t\tadvance();\n\t// Return whether or not we skipped any characters.\n\treturn sourcePos() != startPosition;\n}\n\n\nnamespace\n{\n\n/// Tries to scan for an RLO/LRO/RLE/LRE/PDF and keeps track of script writing direction override depth.\n///\n/// @returns ScannerError::NoError in case of successful parsing and directional encodings are paired\n///          and error code in case the input's lexical parser state is invalid and this error should be reported\n///          to the user.\nstatic ScannerError validateBiDiMarkup(CharStream& _stream, size_t _startPosition)\n{\n\tstatic array<pair<string_view, int>, 5> constexpr directionalSequences{\n\t\tpair<string_view, int>{\"\\xE2\\x80\\xAD\", 1}, // U+202D (LRO - Left-to-Right Override)\n\t\tpair<string_view, int>{\"\\xE2\\x80\\xAE\", 1}, // U+202E (RLO - Right-to-Left Override)\n\t\tpair<string_view, int>{\"\\xE2\\x80\\xAA\", 1}, // U+202A (LRE - Left-to-Right Embedding)\n\t\tpair<string_view, int>{\"\\xE2\\x80\\xAB\", 1}, // U+202B (RLE - Right-to-Left Embedding)\n\t\tpair<string_view, int>{\"\\xE2\\x80\\xAC\", -1} // U+202C (PDF - Pop Directional Formatting\n\t};\n\n\tsize_t endPosition = _stream.position();\n\t_stream.setPosition(_startPosition);\n\n\tint directionOverrideDepth = 0;\n\n\tfor (size_t currentPos = _startPosition; currentPos < endPosition; ++currentPos)\n\t{\n\t\t_stream.setPosition(currentPos);\n\n\t\tfor (auto const& [sequence, depthChange]: directionalSequences)\n\t\t\tif (_stream.prefixMatch(sequence))\n\t\t\t\tdirectionOverrideDepth += depthChange;\n\n\t\tif (directionOverrideDepth < 0)\n\t\t\treturn ScannerError::DirectionalOverrideUnderflow;\n\t}\n\n\t_stream.setPosition(endPosition);\n\n\treturn directionOverrideDepth > 0 ? ScannerError::DirectionalOverrideMismatch : ScannerError::NoError;\n}\n\n}\n\nToken Scanner::skipSingleLineComment()\n{\n\t// Line terminator is not part of the comment. If it is a\n\t// non-ascii line terminator, it will result in a parser error.\n\tsize_t startPosition = m_source->position();\n\twhile (!isUnicodeLinebreak())\n\t\tif (!advance())\n\t\t\tbreak;\n\n\tScannerError unicodeDirectionError = validateBiDiMarkup(*m_source, startPosition);\n\tif (unicodeDirectionError != ScannerError::NoError)\n\t\treturn setError(unicodeDirectionError);\n\n\treturn Token::Whitespace;\n}\n\nbool Scanner::atEndOfLine() const\n{\n\treturn m_char == '\\n' || m_char == '\\r';\n}\n\nbool Scanner::tryScanEndOfLine()\n{\n\tif (m_char == '\\n')\n\t{\n\t\tadvance();\n\t\treturn true;\n\t}\n\n\tif (m_char == '\\r')\n\t{\n\t\tif (advance() && m_char == '\\n')\n\t\t\tadvance();\n\t\treturn true;\n\t}\n\n\treturn false;\n}\n\nsize_t Scanner::scanSingleLineDocComment()\n{\n\tLiteralScope literal(this, LITERAL_TYPE_COMMENT);\n\tsize_t endPosition = m_source->position();\n\n\tskipWhitespaceExceptUnicodeLinebreak();\n\n\twhile (!isSourcePastEndOfInput())\n\t{\n\t\tendPosition = m_source->position();\n\t\tif (tryScanEndOfLine())\n\t\t{\n\t\t\t// Check if next line is also a single-line comment.\n\t\t\t// If any whitespaces were skipped, use source position before.\n\t\t\tif (!skipWhitespaceExceptUnicodeLinebreak())\n\t\t\t\tendPosition = m_source->position();\n\n\t\t\tif (!m_source->isPastEndOfInput(3) &&\n\t\t\t\tm_source->get(0) == '/' &&\n\t\t\t\tm_source->get(1) == '/' &&\n\t\t\t\tm_source->get(2) == '/')\n\t\t\t{\n\t\t\t\tif (!m_source->isPastEndOfInput(4) && m_source->get(3) == '/')\n\t\t\t\t\tbreak; // \"////\" is not a documentation comment\n\t\t\t\tm_char = m_source->advanceAndGet(3);\n\t\t\t\tif (atEndOfLine())\n\t\t\t\t\tcontinue;\n\t\t\t\taddCommentLiteralChar('\\n');\n\t\t\t}\n\t\t\telse\n\t\t\t\tbreak; // next line is not a documentation comment, we are done\n\t\t}\n\t\telse if (isUnicodeLinebreak())\n\t\t\t// Any line terminator that is not '\\n' is considered to end the\n\t\t\t// comment.\n\t\t\tbreak;\n\t\taddCommentLiteralChar(m_char);\n\t\tadvance();\n\t}\n\tliteral.complete();\n\treturn endPosition;\n}\n\nToken Scanner::skipMultiLineComment()\n{\n\tsize_t startPosition = m_source->position();\n\twhile (!isSourcePastEndOfInput())\n\t{\n\t\tchar prevChar = m_char;\n\t\tadvance();\n\n\t\t// If we have reached the end of the multi-line comment, we\n\t\t// consume the '/' and insert a whitespace. This way all\n\t\t// multi-line comments are treated as whitespace.\n\t\tif (prevChar == '*' && m_char == '/')\n\t\t{\n\t\t\tScannerError unicodeDirectionError = validateBiDiMarkup(*m_source, startPosition);\n\t\t\tif (unicodeDirectionError != ScannerError::NoError)\n\t\t\t\treturn setError(unicodeDirectionError);\n\n\t\t\tm_char = ' ';\n\t\t\treturn Token::Whitespace;\n\t\t}\n\t}\n\t// Unterminated multi-line comment.\n\treturn setError(ScannerError::IllegalCommentTerminator);\n}\n\nToken Scanner::scanMultiLineDocComment()\n{\n\tLiteralScope literal(this, LITERAL_TYPE_COMMENT);\n\tbool endFound = false;\n\tbool charsAdded = false;\n\n\twhile (isWhiteSpace(m_char) && !atEndOfLine())\n\t\tadvance();\n\n\twhile (!isSourcePastEndOfInput())\n\t{\n\t\t//handle newlines in multline comments\n\t\tif (atEndOfLine())\n\t\t{\n\t\t\tskipWhitespace();\n\t\t\tif (!m_source->isPastEndOfInput(1) && m_source->get(0) == '*' && m_source->get(1) == '*')\n\t\t\t{ // it is unknown if this leads to the end of the comment\n\t\t\t\taddCommentLiteralChar('*');\n\t\t\t\tadvance();\n\t\t\t}\n\t\t\telse if (!m_source->isPastEndOfInput(1) && m_source->get(0) == '*' && m_source->get(1) != '/')\n\t\t\t{ // skip first '*' in subsequent lines\n\t\t\t\tm_char = m_source->advanceAndGet(1);\n\t\t\t\tif (atEndOfLine()) // ignores empty lines\n\t\t\t\t\tcontinue;\n\t\t\t\tif (charsAdded)\n\t\t\t\t\taddCommentLiteralChar('\\n'); // corresponds to the end of previous line\n\t\t\t}\n\t\t\telse if (!m_source->isPastEndOfInput(1) && m_source->get(0) == '*' && m_source->get(1) == '/')\n\t\t\t{ // if after newline the comment ends, don't insert the newline\n\t\t\t\tm_char = m_source->advanceAndGet(2);\n\t\t\t\tendFound = true;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\telse if (charsAdded)\n\t\t\t\taddCommentLiteralChar('\\n');\n\t\t}\n\n\t\tif (!m_source->isPastEndOfInput(1) && m_source->get(0) == '*' && m_source->get(1) == '/')\n\t\t{\n\t\t\tm_char = m_source->advanceAndGet(2);\n\t\t\tendFound = true;\n\t\t\tbreak;\n\t\t}\n\t\taddCommentLiteralChar(m_char);\n\t\tcharsAdded = true;\n\t\tadvance();\n\t}\n\tliteral.complete();\n\tif (!endFound)\n\t\treturn setError(ScannerError::IllegalCommentTerminator);\n\telse\n\t\treturn Token::CommentLiteral;\n}\n\nToken Scanner::scanSlash()\n{\n\tint firstSlashPosition = static_cast<int>(sourcePos());\n\tadvance();\n\tif (m_char == '/')\n\t{\n\t\tif (!advance()) /* double slash comment directly before EOS */\n\t\t\treturn Token::Whitespace;\n\t\telse if (m_char == '/')\n\t\t{\n\t\t\tadvance(); //consume the last '/' at ///\n\n\t\t\t// \"////\"\n\t\t\tif (m_char == '/')\n\t\t\t\treturn skipSingleLineComment();\n\t\t\t// doxygen style /// comment\n\t\t\tm_skippedComments[NextNext].location.start = firstSlashPosition;\n\t\t\tm_skippedComments[NextNext].location.source = m_source;\n\t\t\tm_skippedComments[NextNext].token = Token::CommentLiteral;\n\t\t\tm_skippedComments[NextNext].location.end = static_cast<int>(scanSingleLineDocComment());\n\t\t\treturn Token::Whitespace;\n\t\t}\n\t\telse\n\t\t\treturn skipSingleLineComment();\n\t}\n\telse if (m_char == '*')\n\t{\n\t\t// doxygen style /** natspec comment\n\t\tif (!advance()) /* slash star comment before EOS */\n\t\t\treturn setError(ScannerError::IllegalCommentTerminator);\n\t\telse if (m_char == '*')\n\t\t{\n\t\t\tadvance(); //consume the last '*' at /**\n\n\t\t\t// \"/**/\"\n\t\t\tif (m_char == '/')\n\t\t\t{\n\t\t\t\tadvance(); //skip the closing slash\n\t\t\t\treturn Token::Whitespace;\n\t\t\t}\n\t\t\t// \"/***\"\n\t\t\tif (m_char == '*')\n\t\t\t\t// \"/***/\" may be interpreted as empty natspec or skipped; skipping is simpler\n\t\t\t\treturn skipMultiLineComment();\n\t\t\t// we actually have a multiline documentation comment\n\t\t\tm_skippedComments[NextNext].location.start = firstSlashPosition;\n\t\t\tm_skippedComments[NextNext].location.source = m_source;\n\t\t\tToken comment = scanMultiLineDocComment();\n\t\t\tm_skippedComments[NextNext].location.end = static_cast<int>(sourcePos());\n\t\t\tm_skippedComments[NextNext].token = comment;\n\t\t\tif (comment == Token::Illegal)\n\t\t\t\treturn Token::Illegal; // error already set\n\t\t\telse\n\t\t\t\treturn Token::Whitespace;\n\t\t}\n\t\telse\n\t\t\treturn skipMultiLineComment();\n\t}\n\telse if (m_char == '=')\n\t\treturn selectToken(Token::AssignDiv);\n\telse\n\t\treturn Token::Div;\n}\n\nvoid Scanner::scanToken()\n{\n\tm_tokens[NextNext] = {};\n\tm_skippedComments[NextNext] = {};\n\n\tToken token;\n\t// M and N are for the purposes of grabbing different type sizes\n\tunsigned m;\n\tunsigned n;\n\tdo\n\t{\n\t\t// Remember the position of the next token\n\t\tm_tokens[NextNext].location.start = static_cast<int>(sourcePos());\n\t\tswitch (m_char)\n\t\t{\n\t\tcase '\"':\n\t\tcase '\\'':\n\t\t\ttoken = scanString(false);\n\t\t\tbreak;\n\t\tcase '<':\n\t\t\t// < <= << <<=\n\t\t\tadvance();\n\t\t\tif (m_char == '=')\n\t\t\t\ttoken = selectToken(Token::LessThanOrEqual);\n\t\t\telse if (m_char == '<')\n\t\t\t\ttoken = selectToken('=', Token::AssignShl, Token::SHL);\n\t\t\telse\n\t\t\t\ttoken = Token::LessThan;\n\t\t\tbreak;\n\t\tcase '>':\n\t\t\t// > >= >> >>= >>> >>>=\n\t\t\tadvance();\n\t\t\tif (m_char == '=')\n\t\t\t\ttoken = selectToken(Token::GreaterThanOrEqual);\n\t\t\telse if (m_char == '>')\n\t\t\t{\n\t\t\t\t// >> >>= >>> >>>=\n\t\t\t\tadvance();\n\t\t\t\tif (m_char == '=')\n\t\t\t\t\ttoken = selectToken(Token::AssignSar);\n\t\t\t\telse if (m_char == '>')\n\t\t\t\t\ttoken = selectToken('=', Token::AssignShr, Token::SHR);\n\t\t\t\telse\n\t\t\t\t\ttoken = Token::SAR;\n\t\t\t}\n\t\t\telse\n\t\t\t\ttoken = Token::GreaterThan;\n\t\t\tbreak;\n\t\tcase '=':\n\t\t\t// = == =>\n\t\t\tadvance();\n\t\t\tif (m_char == '=')\n\t\t\t\ttoken = selectToken(Token::Equal);\n\t\t\telse if (m_char == '>')\n\t\t\t\ttoken = selectToken(Token::DoubleArrow);\n\t\t\telse\n\t\t\t\ttoken = Token::Assign;\n\t\t\tbreak;\n\t\tcase '!':\n\t\t\t// ! !=\n\t\t\tadvance();\n\t\t\tif (m_char == '=')\n\t\t\t\ttoken = selectToken(Token::NotEqual);\n\t\t\telse\n\t\t\t\ttoken = Token::Not;\n\t\t\tbreak;\n\t\tcase '+':\n\t\t\t// + ++ +=\n\t\t\tadvance();\n\t\t\tif (m_char == '+')\n\t\t\t\ttoken = selectToken(Token::Inc);\n\t\t\telse if (m_char == '=')\n\t\t\t\ttoken = selectToken(Token::AssignAdd);\n\t\t\telse\n\t\t\t\ttoken = Token::Add;\n\t\t\tbreak;\n\t\tcase '-':\n\t\t\t// - -- -= ->\n\t\t\tadvance();\n\t\t\tif (m_char == '-')\n\t\t\t\ttoken = selectToken(Token::Dec);\n\t\t\telse if (m_char == '=')\n\t\t\t\ttoken = selectToken(Token::AssignSub);\n\t\t\telse if (m_char == '>')\n\t\t\t\ttoken = selectToken(Token::RightArrow);\n\t\t\telse\n\t\t\t\ttoken = Token::Sub;\n\t\t\tbreak;\n\t\tcase '*':\n\t\t\t// * ** *=\n\t\t\tadvance();\n\t\t\tif (m_char == '*')\n\t\t\t\ttoken = selectToken(Token::Exp);\n\t\t\telse if (m_char == '=')\n\t\t\t\ttoken = selectToken(Token::AssignMul);\n\t\t\telse\n\t\t\t\ttoken = Token::Mul;\n\t\t\tbreak;\n\t\tcase '%':\n\t\t\t// % %=\n\t\t\ttoken = selectToken('=', Token::AssignMod, Token::Mod);\n\t\t\tbreak;\n\t\tcase '/':\n\t\t\t// /  // /* /=\n\t\t\ttoken = scanSlash();\n\t\t\tbreak;\n\t\tcase '&':\n\t\t\t// & && &=\n\t\t\tadvance();\n\t\t\tif (m_char == '&')\n\t\t\t\ttoken = selectToken(Token::And);\n\t\t\telse if (m_char == '=')\n\t\t\t\ttoken = selectToken(Token::AssignBitAnd);\n\t\t\telse\n\t\t\t\ttoken = Token::BitAnd;\n\t\t\tbreak;\n\t\tcase '|':\n\t\t\t// | || |=\n\t\t\tadvance();\n\t\t\tif (m_char == '|')\n\t\t\t\ttoken = selectToken(Token::Or);\n\t\t\telse if (m_char == '=')\n\t\t\t\ttoken = selectToken(Token::AssignBitOr);\n\t\t\telse\n\t\t\t\ttoken = Token::BitOr;\n\t\t\tbreak;\n\t\tcase '^':\n\t\t\t// ^ ^=\n\t\t\ttoken = selectToken('=', Token::AssignBitXor, Token::BitXor);\n\t\t\tbreak;\n\t\tcase '.':\n\t\t\t// . Number\n\t\t\tadvance();\n\t\t\tif (isDecimalDigit(m_char))\n\t\t\t\ttoken = scanNumber('.');\n\t\t\telse\n\t\t\t\ttoken = Token::Period;\n\t\t\tbreak;\n\t\tcase ':':\n\t\t\t// : :=\n\t\t\tadvance();\n\t\t\tif (m_char == '=')\n\t\t\t\ttoken = selectToken(Token::AssemblyAssign);\n\t\t\telse\n\t\t\t\ttoken = Token::Colon;\n\t\t\tbreak;\n\t\tcase ';':\n\t\t\ttoken = selectToken(Token::Semicolon);\n\t\t\tbreak;\n\t\tcase ',':\n\t\t\ttoken = selectToken(Token::Comma);\n\t\t\tbreak;\n\t\tcase '(':\n\t\t\ttoken = selectToken(Token::LParen);\n\t\t\tbreak;\n\t\tcase ')':\n\t\t\ttoken = selectToken(Token::RParen);\n\t\t\tbreak;\n\t\tcase '[':\n\t\t\ttoken = selectToken(Token::LBrack);\n\t\t\tbreak;\n\t\tcase ']':\n\t\t\ttoken = selectToken(Token::RBrack);\n\t\t\tbreak;\n\t\tcase '{':\n\t\t\ttoken = selectToken(Token::LBrace);\n\t\t\tbreak;\n\t\tcase '}':\n\t\t\ttoken = selectToken(Token::RBrace);\n\t\t\tbreak;\n\t\tcase '?':\n\t\t\ttoken = selectToken(Token::Conditional);\n\t\t\tbreak;\n\t\tcase '~':\n\t\t\ttoken = selectToken(Token::BitNot);\n\t\t\tbreak;\n\t\tdefault:\n\t\t\tif (isIdentifierStart(m_char))\n\t\t\t{\n\t\t\t\ttie(token, m, n) = scanIdentifierOrKeyword();\n\n\t\t\t\t// Special case for hexadecimal literals\n\t\t\t\tif (token == Token::Hex)\n\t\t\t\t{\n\t\t\t\t\t// reset\n\t\t\t\t\tm = 0;\n\t\t\t\t\tn = 0;\n\n\t\t\t\t\t// Special quoted hex string must follow\n\t\t\t\t\tif (m_char == '\"' || m_char == '\\'')\n\t\t\t\t\t\ttoken = scanHexString();\n\t\t\t\t\telse\n\t\t\t\t\t\ttoken = setError(ScannerError::IllegalToken);\n\t\t\t\t}\n\t\t\t\telse if (token == Token::Unicode && m_kind != ScannerKind::Yul)\n\t\t\t\t{\n\t\t\t\t\t// reset\n\t\t\t\t\tm = 0;\n\t\t\t\t\tn = 0;\n\n\t\t\t\t\t// Special quoted hex string must follow\n\t\t\t\t\tif (m_char == '\"' || m_char == '\\'')\n\t\t\t\t\t\ttoken = scanString(true);\n\t\t\t\t\telse\n\t\t\t\t\t\ttoken = setError(ScannerError::IllegalToken);\n\t\t\t\t}\n\t\t\t}\n\t\t\telse if (isDecimalDigit(m_char))\n\t\t\t\ttoken = scanNumber();\n\t\t\telse if (skipWhitespace())\n\t\t\t\ttoken = Token::Whitespace;\n\t\t\telse if (isSourcePastEndOfInput())\n\t\t\t\ttoken = Token::EOS;\n\t\t\telse\n\t\t\t\ttoken = selectErrorToken(ScannerError::IllegalToken);\n\t\t\tbreak;\n\t\t}\n\t\t// Continue scanning for tokens as long as we're just skipping\n\t\t// whitespace.\n\t}\n\twhile (token == Token::Whitespace);\n\tm_tokens[NextNext].location.end = static_cast<int>(sourcePos());\n\tm_tokens[NextNext].location.source = m_source;\n\tm_tokens[NextNext].token = token;\n\tm_tokens[NextNext].extendedTokenInfo = make_tuple(m, n);\n}\n\nbool Scanner::scanEscape()\n{\n\tchar c = m_char;\n\n\t// Skip escaped newlines.\n\tif (tryScanEndOfLine())\n\t\treturn true;\n\tadvance();\n\n\tswitch (c)\n\t{\n\tcase '\\'':  // fall through\n\tcase '\"':  // fall through\n\tcase '\\\\':\n\t\tbreak;\n\tcase 'n':\n\t\tc = '\\n';\n\t\tbreak;\n\tcase 'r':\n\t\tc = '\\r';\n\t\tbreak;\n\tcase 't':\n\t\tc = '\\t';\n\t\tbreak;\n\tcase 'u':\n\t{\n\t\tif (auto const codepoint = scanUnicode(); codepoint.has_value())\n\t\t\taddUnicodeAsUTF8(*codepoint);\n\t\telse\n\t\t\treturn false;\n\t\treturn true;\n\t}\n\tcase 'x':\n\t\tif (!scanHexByte(c))\n\t\t\treturn false;\n\t\tbreak;\n\tdefault:\n\t\treturn false;\n\t}\n\n\taddLiteralChar(c);\n\treturn true;\n}\n\nbool Scanner::isUnicodeLinebreak()\n{\n\tif (0x0a <= m_char && m_char <= 0x0d)\n\t\t// line feed, vertical tab, form feed, carriage return\n\t\treturn true;\n\tif (!m_source->isPastEndOfInput(1) && uint8_t(m_source->get(0)) == 0xc2 && uint8_t(m_source->get(1)) == 0x85)\n\t\t// NEL - U+0085, C2 85 in utf8\n\t\treturn true;\n\tif (!m_source->isPastEndOfInput(2) && uint8_t(m_source->get(0)) == 0xe2 && uint8_t(m_source->get(1)) == 0x80 && (\n\t\tuint8_t(m_source->get(2)) == 0xa8 || uint8_t(m_source->get(2)) == 0xa9\n\t))\n\t\t// LS - U+2028, E2 80 A8  in utf8\n\t\t// PS - U+2029, E2 80 A9  in utf8\n\t\treturn true;\n\treturn false;\n}\n\nToken Scanner::scanString(bool const _isUnicode)\n{\n\tsize_t startPosition = m_source->position();\n\tchar const quote = m_char;\n\tadvance();  // consume quote\n\tLiteralScope literal(this, LITERAL_TYPE_STRING);\n\twhile (m_char != quote && !isSourcePastEndOfInput() && !isUnicodeLinebreak())\n\t{\n\t\tchar c = m_char;\n\t\tadvance();\n\t\tif (c == '\\\\')\n\t\t{\n\t\t\tif (isSourcePastEndOfInput() || !scanEscape())\n\t\t\t\treturn setError(ScannerError::IllegalEscapeSequence);\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Report error on non-printable characters in string literals, however\n\t\t\t// allow anything for unicode string literals, because their validity will\n\t\t\t// be verified later (in the syntax checker).\n\t\t\t//\n\t\t\t// We are using a manual range and not isprint() to avoid\n\t\t\t// any potential complications with locale.\n\t\t\tif (!_isUnicode && (static_cast<unsigned>(c) <= 0x1f || static_cast<unsigned>(c) >= 0x7f))\n\t\t\t\treturn setError(ScannerError::IllegalCharacterInString);\n\t\t\taddLiteralChar(c);\n\t\t}\n\t}\n\tif (m_char != quote)\n\t\treturn setError(ScannerError::IllegalStringEndQuote);\n\n\tif (_isUnicode)\n\t{\n\t\tScannerError unicodeDirectionError = validateBiDiMarkup(*m_source, startPosition);\n\t\tif (unicodeDirectionError != ScannerError::NoError)\n\t\t\treturn setError(unicodeDirectionError);\n\t}\n\n\tliteral.complete();\n\tadvance();  // consume quote\n\treturn _isUnicode ? Token::UnicodeStringLiteral : Token::StringLiteral;\n}\n\nToken Scanner::scanHexString()\n{\n\tchar const quote = m_char;\n\tadvance();  // consume quote\n\tLiteralScope literal(this, LITERAL_TYPE_STRING);\n\tbool allowUnderscore = false;\n\twhile (m_char != quote && !isSourcePastEndOfInput())\n\t{\n\t\tchar c = m_char;\n\n\t\tif (scanHexByte(c))\n\t\t{\n\t\t\taddLiteralChar(c);\n\t\t\tallowUnderscore = true;\n\t\t}\n\t\telse if (c == '_')\n\t\t{\n\t\t\tadvance();\n\t\t\tif (!allowUnderscore || m_char == quote)\n\t\t\t\treturn setError(ScannerError::IllegalNumberSeparator);\n\t\t\tallowUnderscore = false;\n\t\t}\n\t\telse\n\t\t\treturn setError(ScannerError::IllegalHexString);\n\t}\n\n\tif (m_char != quote)\n\t\treturn setError(ScannerError::IllegalStringEndQuote);\n\n\tliteral.complete();\n\tadvance();  // consume quote\n\treturn Token::HexStringLiteral;\n}\n\n// Parse for regex [:digit:]+(_[:digit:]+)*\nvoid Scanner::scanDecimalDigits()\n{\n\t// MUST begin with a decimal digit.\n\tif (!isDecimalDigit(m_char))\n\t\treturn;\n\n\t// May continue with decimal digit or underscore for grouping.\n\tdo\n\t\taddLiteralCharAndAdvance();\n\twhile (!m_source->isPastEndOfInput() && (isDecimalDigit(m_char) || m_char == '_'));\n\n\t// Defer further validation of underscore to SyntaxChecker.\n}\n\nToken Scanner::scanNumber(char _charSeen)\n{\n\tenum { DECIMAL, HEX, BINARY } kind = DECIMAL;\n\tLiteralScope literal(this, LITERAL_TYPE_NUMBER);\n\tif (_charSeen == '.')\n\t{\n\t\t// we have already seen a decimal point of the float\n\t\taddLiteralChar('.');\n\t\tif (m_char == '_')\n\t\t\treturn setError(ScannerError::IllegalToken);\n\t\tscanDecimalDigits();  // we know we have at least one digit\n\t}\n\telse\n\t{\n\t\tsolAssert(_charSeen == 0, \"\");\n\t\t// if the first character is '0' we must check for octals and hex\n\t\tif (m_char == '0')\n\t\t{\n\t\t\taddLiteralCharAndAdvance();\n\t\t\t// either 0, 0exxx, 0Exxx, 0.xxx or a hex number\n\t\t\tif (m_char == 'x')\n\t\t\t{\n\t\t\t\t// hex number\n\t\t\t\tkind = HEX;\n\t\t\t\taddLiteralCharAndAdvance();\n\t\t\t\tif (!isHexDigit(m_char))\n\t\t\t\t\treturn setError(ScannerError::IllegalHexDigit); // we must have at least one hex digit after 'x'\n\n\t\t\t\twhile (isHexDigit(m_char) || m_char == '_') // We keep the underscores for later validation\n\t\t\t\t\taddLiteralCharAndAdvance();\n\t\t\t}\n\t\t\telse if (isDecimalDigit(m_char))\n\t\t\t\t// We do not allow octal numbers\n\t\t\t\treturn setError(ScannerError::OctalNotAllowed);\n\t\t}\n\t\t// Parse decimal digits and allow trailing fractional part.\n\t\tif (kind == DECIMAL)\n\t\t{\n\t\t\tscanDecimalDigits();  // optional\n\t\t\tif (m_char == '.')\n\t\t\t{\n\t\t\t\tif (!m_source->isPastEndOfInput(1) && m_source->get(1) == '_')\n\t\t\t\t{\n\t\t\t\t\t// Assume the input may be a floating point number with leading '_' in fraction part.\n\t\t\t\t\t// Recover by consuming it all but returning `Illegal` right away.\n\t\t\t\t\taddLiteralCharAndAdvance(); // '.'\n\t\t\t\t\taddLiteralCharAndAdvance(); // '_'\n\t\t\t\t\tscanDecimalDigits();\n\t\t\t\t}\n\t\t\t\tif (m_source->isPastEndOfInput() || !isDecimalDigit(m_source->get(1)))\n\t\t\t\t{\n\t\t\t\t\t// A '.' has to be followed by a number.\n\t\t\t\t\tliteral.complete();\n\t\t\t\t\treturn Token::Number;\n\t\t\t\t}\n\t\t\t\taddLiteralCharAndAdvance();\n\t\t\t\tscanDecimalDigits();\n\t\t\t}\n\t\t}\n\t}\n\t// scan exponent, if any\n\tif (m_char == 'e' || m_char == 'E')\n\t{\n\t\tsolAssert(kind != HEX, \"'e'/'E' must be scanned as part of the hex number\");\n\t\tif (kind != DECIMAL)\n\t\t\treturn setError(ScannerError::IllegalExponent);\n\t\telse if (!m_source->isPastEndOfInput(1) && m_source->get(1) == '_')\n\t\t{\n\t\t\t// Recover from wrongly placed underscore as delimiter in literal with scientific\n\t\t\t// notation by consuming until the end.\n\t\t\taddLiteralCharAndAdvance(); // 'e'\n\t\t\taddLiteralCharAndAdvance(); // '_'\n\t\t\tscanDecimalDigits();\n\t\t\tliteral.complete();\n\t\t\treturn Token::Number;\n\t\t}\n\t\t// scan exponent\n\t\taddLiteralCharAndAdvance(); // 'e' | 'E'\n\t\tif (m_char == '+' || m_char == '-')\n\t\t\taddLiteralCharAndAdvance();\n\t\tif (!isDecimalDigit(m_char)) // we must have at least one decimal digit after 'e'/'E'\n\t\t\treturn setError(ScannerError::IllegalExponent);\n\t\tscanDecimalDigits();\n\t}\n\t// The source character immediately following a numeric literal must\n\t// not be an identifier start or a decimal digit; see ECMA-262\n\t// section 7.8.3, page 17 (note that we read only one decimal digit\n\t// if the value is 0).\n\tif (isDecimalDigit(m_char) || isIdentifierStart(m_char))\n\t\treturn setError(ScannerError::IllegalNumberEnd);\n\tliteral.complete();\n\treturn Token::Number;\n}\n\ntuple<Token, unsigned, unsigned> Scanner::scanIdentifierOrKeyword()\n{\n\tsolAssert(isIdentifierStart(m_char), \"\");\n\tLiteralScope literal(this, LITERAL_TYPE_STRING);\n\taddLiteralCharAndAdvance();\n\t// Scan the rest of the identifier characters.\n\twhile (isIdentifierPart(m_char) || (m_char == '.' && m_kind == ScannerKind::Yul))\n\t\taddLiteralCharAndAdvance();\n\tliteral.complete();\n\tauto const token = TokenTraits::fromIdentifierOrKeyword(m_tokens[NextNext].literal);\n\tif (m_kind == ScannerKind::Yul)\n\t{\n\t\t// Turn Solidity identifier into a Yul keyword\n\t\tif (m_tokens[NextNext].literal == \"leave\")\n\t\t\treturn std::make_tuple(Token::Leave, 0, 0);\n\t\t// Turn non-Yul keywords into identifiers.\n\t\tif (!TokenTraits::isYulKeyword(std::get<0>(token)))\n\t\t\treturn std::make_tuple(Token::Identifier, 0, 0);\n\t}\n\treturn token;\n}\n\n} // namespace solidity::langutil\n"
  },
  {
    "path": "liblangutil/SemVerHandler.cpp",
    "content": "/*\n\tThis file is part of solidity.\n\n\tsolidity is free software: you can redistribute it and/or modify\n\tit under the terms of the GNU General Public License as published by\n\tthe Free Software Foundation, either version 3 of the License, or\n\t(at your option) any later version.\n\n\tsolidity is distributed in the hope that it will be useful,\n\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n\tGNU General Public License for more details.\n\n\tYou should have received a copy of the GNU General Public License\n\talong with solidity.  If not, see <http://www.gnu.org/licenses/>.\n*/\n// SPDX-License-Identifier: GPL-3.0\n/**\n * @author Christian <chris@ethereum.org>\n * @date 2016\n * Utilities to handle semantic versioning.\n */\n\n#include <liblangutil/SemVerHandler.h>\n\n#include <functional>\n\nusing namespace std;\nusing namespace solidity;\nusing namespace solidity::langutil;\n\nSemVerVersion::SemVerVersion(string const& _versionString)\n{\n\tauto i = _versionString.begin();\n\tauto end = _versionString.end();\n\n\tfor (unsigned level = 0; level < 3; ++level)\n\t{\n\t\tunsigned v = 0;\n\t\tfor (; i != end && '0' <= *i && *i <= '9'; ++i)\n\t\t\tv = v * 10 + unsigned(*i - '0');\n\t\tnumbers[level] = v;\n\t\tif (level < 2)\n\t\t{\n\t\t\tif (i == end || *i != '.')\n\t\t\t\tBOOST_THROW_EXCEPTION(SemVerError());\n\t\t\telse\n\t\t\t\t++i;\n\t\t}\n\t}\n\tif (i != end && *i == '-')\n\t{\n\t\tauto prereleaseStart = ++i;\n\t\twhile (i != end && *i != '+') ++i;\n\t\tprerelease = string(prereleaseStart, i);\n\t}\n\tif (i != end && *i == '+')\n\t{\n\t\tauto buildStart = ++i;\n\t\twhile (i != end) ++i;\n\t\tbuild = string(buildStart, i);\n\t}\n\tif (i != end)\n\t\tBOOST_THROW_EXCEPTION(SemVerError());\n}\n\nbool SemVerMatchExpression::MatchComponent::matches(SemVerVersion const& _version) const\n{\n\tif (prefix == Token::BitNot)\n\t{\n\t\tMatchComponent comp = *this;\n\n\t\tcomp.prefix = Token::GreaterThanOrEqual;\n\t\tif (!comp.matches(_version))\n\t\t\treturn false;\n\n\t\tif (levelsPresent >= 2)\n\t\t\tcomp.levelsPresent = 2;\n\t\telse\n\t\t\tcomp.levelsPresent = 1;\n\t\tcomp.prefix = Token::LessThanOrEqual;\n\t\treturn comp.matches(_version);\n\t}\n\telse if (prefix == Token::BitXor)\n\t{\n\t\tMatchComponent comp = *this;\n\n\t\tcomp.prefix = Token::GreaterThanOrEqual;\n\t\tif (!comp.matches(_version))\n\t\t\treturn false;\n\n\t\tif (comp.version.numbers[0] == 0 && comp.levelsPresent != 1)\n\t\t\tcomp.levelsPresent = 2;\n\t\telse\n\t\t\tcomp.levelsPresent = 1;\n\t\tcomp.prefix = Token::LessThanOrEqual;\n\t\treturn comp.matches(_version);\n\t}\n\telse\n\t{\n\t\tint cmp = 0;\n\t\tbool didCompare = false;\n\t\tfor (unsigned i = 0; i < levelsPresent && cmp == 0; i++)\n\t\t\tif (version.numbers[i] != std::numeric_limits<unsigned>::max())\n\t\t\t{\n\t\t\t\tdidCompare = true;\n\t\t\t\tcmp = static_cast<int>(_version.numbers[i] - version.numbers[i]);\n\t\t\t}\n\n\t\tif (cmp == 0 && !_version.prerelease.empty() && didCompare)\n\t\t\tcmp = -1;\n\n\t\tswitch (prefix)\n\t\t{\n\t\tcase Token::Assign:\n\t\t\treturn cmp == 0;\n\t\tcase Token::LessThan:\n\t\t\treturn cmp < 0;\n\t\tcase Token::LessThanOrEqual:\n\t\t\treturn cmp <= 0;\n\t\tcase Token::GreaterThan:\n\t\t\treturn cmp > 0;\n\t\tcase Token::GreaterThanOrEqual:\n\t\t\treturn cmp >= 0;\n\t\tdefault:\n\t\t\tsolAssert(false, \"Invalid SemVer expression\");\n\t\t}\n\t\treturn false;\n\t}\n}\n\nbool SemVerMatchExpression::Conjunction::matches(SemVerVersion const& _version) const\n{\n\tfor (auto const& component: components)\n\t\tif (!component.matches(_version))\n\t\t\treturn false;\n\treturn true;\n}\n\nbool SemVerMatchExpression::matches(SemVerVersion const& _version) const\n{\n\tif (!isValid())\n\t\treturn false;\n\tfor (auto const& range: m_disjunction)\n\t\tif (range.matches(_version))\n\t\t\treturn true;\n\treturn false;\n}\n\noptional<SemVerMatchExpression> SemVerMatchExpressionParser::parse()\n{\n\treset();\n\n\tif (m_tokens.empty())\n\t\treturn nullopt;\n\n\ttry\n\t{\n\t\twhile (true)\n\t\t{\n\t\t\tparseMatchExpression();\n\t\t\tif (m_pos >= m_tokens.size())\n\t\t\t\tbreak;\n\t\t\tif (currentToken() != Token::Or)\n\t\t\t\tBOOST_THROW_EXCEPTION(SemVerError());\n\t\t\tnextToken();\n\t\t}\n\t}\n\tcatch (SemVerError const&)\n\t{\n\t\treset();\n\t\treturn nullopt;\n\t}\n\n\treturn m_expression;\n}\n\n\nvoid SemVerMatchExpressionParser::reset()\n{\n\tm_expression = SemVerMatchExpression();\n\tm_pos = 0;\n\tm_posInside = 0;\n}\n\nvoid SemVerMatchExpressionParser::parseMatchExpression()\n{\n\t// component - component (range)\n\t// or component component* (conjunction)\n\n\tSemVerMatchExpression::Conjunction range;\n\trange.components.push_back(parseMatchComponent());\n\tif (currentToken() == Token::Sub)\n\t{\n\t\trange.components[0].prefix = Token::GreaterThanOrEqual;\n\t\tnextToken();\n\t\trange.components.push_back(parseMatchComponent());\n\t\trange.components[1].prefix = Token::LessThanOrEqual;\n\t}\n\telse\n\t\twhile (currentToken() != Token::Or && currentToken() != Token::Illegal)\n\t\t\trange.components.push_back(parseMatchComponent());\n\tm_expression.m_disjunction.push_back(range);\n}\n\nSemVerMatchExpression::MatchComponent SemVerMatchExpressionParser::parseMatchComponent()\n{\n\tSemVerMatchExpression::MatchComponent component;\n\tToken token = currentToken();\n\n\tswitch (token)\n\t{\n\tcase Token::BitXor:\n\tcase Token::BitNot:\n\tcase Token::LessThan:\n\tcase Token::LessThanOrEqual:\n\tcase Token::GreaterThan:\n\tcase Token::GreaterThanOrEqual:\n\tcase Token::Assign:\n\t\tcomponent.prefix = token;\n\t\tnextToken();\n\t\tbreak;\n\tdefault:\n\t\tcomponent.prefix = Token::Assign;\n\t}\n\n\tcomponent.levelsPresent = 0;\n\twhile (component.levelsPresent < 3)\n\t{\n\t\tcomponent.version.numbers[component.levelsPresent] = parseVersionPart();\n\t\tcomponent.levelsPresent++;\n\t\tif (currentChar() == '.')\n\t\t\tnextChar();\n\t\telse\n\t\t\tbreak;\n\t}\n\t// TODO we do not support pre and build version qualifiers for now in match expressions\n\t// (but we do support them in the actual versions)\n\treturn component;\n}\n\nunsigned SemVerMatchExpressionParser::parseVersionPart()\n{\n\tauto startPos = m_pos;\n\tchar c = currentChar();\n\tnextChar();\n\tif (c == 'x' || c == 'X' || c == '*')\n\t\treturn unsigned(-1);\n\telse if (c == '0')\n\t\treturn 0;\n\telse if ('1' <= c && c <= '9')\n\t{\n\t\tauto v = static_cast<unsigned>(c - '0');\n\t\t// If we skip to the next token, the current number is terminated.\n\t\twhile (m_pos == startPos && '0' <= currentChar() && currentChar() <= '9')\n\t\t{\n\t\t\tc = currentChar();\n\t\t\tif (v * 10 < v || v * 10 + static_cast<unsigned>(c - '0') < v * 10)\n\t\t\t\tBOOST_THROW_EXCEPTION(SemVerError());\n\t\t\tv = v * 10 + static_cast<unsigned>(c - '0');\n\t\t\tnextChar();\n\t\t}\n\t\treturn v;\n\t}\n\telse\n\t\tBOOST_THROW_EXCEPTION(SemVerError());\n}\n\nchar SemVerMatchExpressionParser::currentChar() const\n{\n\tif (m_pos >= m_literals.size())\n\t\treturn char(-1);\n\tif (m_posInside >= m_literals[m_pos].size())\n\t\treturn char(-1);\n\treturn m_literals[m_pos][m_posInside];\n}\n\nchar SemVerMatchExpressionParser::nextChar()\n{\n\tif (m_pos < m_literals.size())\n\t{\n\t\tif (m_posInside + 1 >= m_literals[m_pos].size())\n\t\t\tnextToken();\n\t\telse\n\t\t\t++m_posInside;\n\t}\n\treturn currentChar();\n}\n\nToken SemVerMatchExpressionParser::currentToken() const\n{\n\tif (m_pos < m_tokens.size())\n\t\treturn m_tokens[m_pos];\n\telse\n\t\treturn Token::Illegal;\n}\n\nvoid SemVerMatchExpressionParser::nextToken()\n{\n\t++m_pos;\n\tm_posInside = 0;\n}\n"
  },
  {
    "path": "liblangutil/SourceLocation.cpp",
    "content": "/*\n\tThis file is part of solidity.\n\n\tsolidity is free software: you can redistribute it and/or modify\n\tit under the terms of the GNU General Public License as published by\n\tthe Free Software Foundation, either version 3 of the License, or\n\t(at your option) any later version.\n\n\tsolidity is distributed in the hope that it will be useful,\n\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n\tGNU General Public License for more details.\n\n\tYou should have received a copy of the GNU General Public License\n\talong with solidity.  If not, see <http://www.gnu.org/licenses/>.\n*/\n// SPDX-License-Identifier: GPL-3.0\n\n#include <liblangutil/Exceptions.h>\n\n#include <boost/algorithm/string/split.hpp>\n#include <boost/algorithm/string.hpp>\n\nusing namespace solidity;\nnamespace solidity::langutil\n{\n\nSourceLocation const parseSourceLocation(std::string const& _input, std::string const& _sourceName, size_t _maxIndex)\n{\n\t// Expected input: \"start:length:sourceindex\"\n\tenum SrcElem : size_t { Start, Length, Index };\n\n\tstd::vector<std::string> pos;\n\n\tboost::algorithm::split(pos, _input, boost::is_any_of(\":\"));\n\n\tsolAssert(pos.size() == 3, \"SourceLocation string must have 3 colon separated numeric fields.\");\n\tauto const sourceIndex = stoi(pos[Index]);\n\n\tastAssert(\n\t\tsourceIndex == -1 || _maxIndex >= static_cast<size_t>(sourceIndex),\n\t\t\"'src'-field ill-formatted or src-index too high\"\n\t);\n\n\tint start = stoi(pos[Start]);\n\tint end = start + stoi(pos[Length]);\n\n\t// ASSUMPTION: only the name of source is used from here on, the m_source of the CharStream-Object can be empty\n\tstd::shared_ptr<langutil::CharStream> source;\n\tif (sourceIndex != -1)\n\t\tsource = std::make_shared<langutil::CharStream>(\"\", _sourceName);\n\n\treturn SourceLocation{start, end, source};\n}\n\n}\n"
  },
  {
    "path": "liblangutil/SourceReferenceExtractor.cpp",
    "content": "/*\n\tThis file is part of solidity.\n\n\tsolidity is free software: you can redistribute it and/or modify\n\tit under the terms of the GNU General Public License as published by\n\tthe Free Software Foundation, either version 3 of the License, or\n\t(at your option) any later version.\n\n\tsolidity is distributed in the hope that it will be useful,\n\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n\tGNU General Public License for more details.\n\n\tYou should have received a copy of the GNU General Public License\n\talong with solidity.  If not, see <http://www.gnu.org/licenses/>.\n*/\n// SPDX-License-Identifier: GPL-3.0\n#include <liblangutil/SourceReferenceExtractor.h>\n#include <liblangutil/CharStream.h>\n#include <liblangutil/Exceptions.h>\n\n#include <algorithm>\n#include <cmath>\n#include <iomanip>\n\nusing namespace std;\nusing namespace solidity;\nusing namespace solidity::langutil;\n\nSourceReferenceExtractor::Message SourceReferenceExtractor::extract(util::Exception const& _exception, string _category)\n{\n\tSourceLocation const* location = boost::get_error_info<errinfo_sourceLocation>(_exception);\n\n\tstring const* message = boost::get_error_info<util::errinfo_comment>(_exception);\n\tSourceReference primary = extract(location, message ? *message : \"\");\n\n\tstd::vector<SourceReference> secondary;\n\tauto secondaryLocation = boost::get_error_info<errinfo_secondarySourceLocation>(_exception);\n\tif (secondaryLocation && !secondaryLocation->infos.empty())\n\t\tfor (auto const& info: secondaryLocation->infos)\n\t\t\tsecondary.emplace_back(extract(&info.second, info.first));\n\n\treturn Message{std::move(primary), _category, std::move(secondary), nullopt};\n}\n\nSourceReferenceExtractor::Message SourceReferenceExtractor::extract(Error const& _error)\n{\n\tstring category = (_error.type() == Error::Type::Warning) ? \"Warning\" : \"Error\";\n\tMessage message = extract(_error, category);\n\tmessage.errorId = _error.errorId();\n\treturn message;\n}\n\nSourceReference SourceReferenceExtractor::extract(SourceLocation const* _location, std::string message)\n{\n\tif (!_location || !_location->source.get()) // Nothing we can extract here\n\t\treturn SourceReference::MessageOnly(std::move(message));\n\n\tif (!_location->hasText()) // No source text, so we can only extract the source name\n\t\treturn SourceReference::MessageOnly(std::move(message), _location->source->name());\n\n\tshared_ptr<CharStream> const& source = _location->source;\n\n\tLineColumn const interest = source->translatePositionToLineColumn(_location->start);\n\tLineColumn start = interest;\n\tLineColumn end = source->translatePositionToLineColumn(_location->end);\n\tbool const isMultiline = start.line != end.line;\n\n\tstring line = source->lineAtPosition(_location->start);\n\n\tint locationLength =\n\t\tisMultiline ?\n\t\t\tint(line.length()) - start.column :\n\t\t\tend.column - start.column;\n\n\tif (locationLength > 150)\n\t{\n\t\tauto const lhs = static_cast<size_t>(start.column) + 35;\n\t\tstring::size_type const rhs = (isMultiline ? line.length() : static_cast<size_t>(end.column)) - 35;\n\t\tline = line.substr(0, lhs) + \" ... \" + line.substr(rhs);\n\t\tend.column = start.column + 75;\n\t\tlocationLength = 75;\n\t}\n\n\tif (line.length() > 150)\n\t{\n\t\tint const len = static_cast<int>(line.length());\n\t\tline = line.substr(\n\t\t\tstatic_cast<size_t>(max(0, start.column - 35)),\n\t\t\tstatic_cast<size_t>(min(start.column, 35)) + static_cast<size_t>(\n\t\t\t\tmin(locationLength + 35, len - start.column)\n\t\t\t)\n\t\t);\n\t\tif (start.column + locationLength + 35 < len)\n\t\t\tline += \" ...\";\n\t\tif (start.column > 35)\n\t\t{\n\t\t\tline = \" ... \" + line;\n\t\t\tstart.column = 40;\n\t\t}\n\t\tend.column = start.column + static_cast<int>(locationLength);\n\t}\n\n\treturn SourceReference{\n\t\tstd::move(message),\n\t\tsource->name(),\n\t\tinterest,\n\t\tisMultiline,\n\t\tline,\n\t\tmin(start.column, static_cast<int>(line.length())),\n\t\tmin(end.column, static_cast<int>(line.length()))\n\t};\n}\n"
  },
  {
    "path": "liblangutil/SourceReferenceFormatter.cpp",
    "content": "/*\n\tThis file is part of solidity.\n\n\tsolidity is free software: you can redistribute it and/or modify\n\tit under the terms of the GNU General Public License as published by\n\tthe Free Software Foundation, either version 3 of the License, or\n\t(at your option) any later version.\n\n\tsolidity is distributed in the hope that it will be useful,\n\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n\tGNU General Public License for more details.\n\n\tYou should have received a copy of the GNU General Public License\n\talong with solidity.  If not, see <http://www.gnu.org/licenses/>.\n*/\n// SPDX-License-Identifier: GPL-3.0\n/**\n * Formatting functions for errors referencing positions and locations in the source.\n */\n\n#include <liblangutil/SourceReferenceFormatter.h>\n#include <liblangutil/Scanner.h>\n#include <liblangutil/Exceptions.h>\n#include <libsolutil/UTF8.h>\n#include <iomanip>\n#include <string_view>\n\nusing namespace std;\nusing namespace solidity;\nusing namespace solidity::langutil;\nusing namespace solidity::util;\nusing namespace solidity::util::formatting;\n\nnamespace\n{\n\nstd::string replaceNonTabs(std::string_view _utf8Input, char _filler)\n{\n\tstd::string output;\n\tfor (char const c: _utf8Input)\n\t\tif ((c & 0xc0) != 0x80)\n\t\t\toutput.push_back(c == '\\t' ? '\\t' : _filler);\n\treturn output;\n}\n\n}\n\nAnsiColorized SourceReferenceFormatter::normalColored() const\n{\n\treturn AnsiColorized(m_stream, m_colored, {WHITE});\n}\n\nAnsiColorized SourceReferenceFormatter::frameColored() const\n{\n\treturn AnsiColorized(m_stream, m_colored, {BOLD, BLUE});\n}\n\nAnsiColorized SourceReferenceFormatter::errorColored() const\n{\n\treturn AnsiColorized(m_stream, m_colored, {BOLD, RED});\n}\n\nAnsiColorized SourceReferenceFormatter::messageColored() const\n{\n\treturn AnsiColorized(m_stream, m_colored, {BOLD, WHITE});\n}\n\nAnsiColorized SourceReferenceFormatter::secondaryColored() const\n{\n\treturn AnsiColorized(m_stream, m_colored, {BOLD, CYAN});\n}\n\nAnsiColorized SourceReferenceFormatter::highlightColored() const\n{\n\treturn AnsiColorized(m_stream, m_colored, {YELLOW});\n}\n\nAnsiColorized SourceReferenceFormatter::diagColored() const\n{\n\treturn AnsiColorized(m_stream, m_colored, {BOLD, YELLOW});\n}\n\nvoid SourceReferenceFormatter::printSourceLocation(SourceReference const& _ref)\n{\n\tif (_ref.sourceName.empty())\n\t\treturn; // Nothing we can print here\n\n\tif (_ref.position.line < 0)\n\t{\n\t\tframeColored() << \"-->\";\n\t\tm_stream << ' ' << _ref.sourceName << '\\n';\n\t\treturn; // No line available, nothing else to print\n\t}\n\n\tstring line = std::to_string(_ref.position.line + 1); // one-based line number as string\n\tstring leftpad = string(line.size(), ' ');\n\n\t// line 0: source name\n\tm_stream << leftpad;\n\tframeColored() << \"-->\";\n\tm_stream << ' ' << _ref.sourceName << ':' << line << ':' << (_ref.position.column + 1) << \":\\n\";\n\n\tstring_view text = _ref.text;\n\n\tif (!_ref.multiline)\n\t{\n\t\tsize_t const locationLength = static_cast<size_t>(_ref.endColumn - _ref.startColumn);\n\n\t\t// line 1:\n\t\tm_stream << leftpad << ' ';\n\t\tframeColored() << '|';\n\t\tm_stream << '\\n';\n\n\t\t// line 2:\n\t\tframeColored() << line << \" |\";\n\n\t\tm_stream << ' ' << text.substr(0, static_cast<size_t>(_ref.startColumn));\n\t\thighlightColored() << text.substr(static_cast<size_t>(_ref.startColumn), locationLength);\n\t\tm_stream << text.substr(static_cast<size_t>(_ref.endColumn)) << '\\n';\n\n\t\t// line 3:\n\t\tm_stream << leftpad << ' ';\n\t\tframeColored() << '|';\n\n\t\tm_stream << ' ' << replaceNonTabs(text.substr(0, static_cast<size_t>(_ref.startColumn)), ' ');\n\t\tdiagColored() << (\n\t\t\tlocationLength == 0 ?\n\t\t\t\"^\" :\n\t\t\treplaceNonTabs(text.substr(static_cast<size_t>(_ref.startColumn), locationLength), '^')\n\t\t);\n\t\tm_stream << '\\n';\n\t}\n\telse\n\t{\n\t\t// line 1:\n\t\tm_stream << leftpad << ' ';\n\t\tframeColored() << '|';\n\t\tm_stream << '\\n';\n\n\t\t// line 2:\n\t\tframeColored() << line << \" |\";\n\t\tm_stream << ' ' << text.substr(0, static_cast<size_t>(_ref.startColumn));\n\t\thighlightColored() << text.substr(static_cast<size_t>(_ref.startColumn)) << '\\n';\n\n\t\t// line 3:\n\t\tm_stream << leftpad << ' ';\n\t\tframeColored() << '|';\n\t\tm_stream << ' ' << replaceNonTabs(text.substr(0, static_cast<size_t>(_ref.startColumn)), ' ');\n\t\tdiagColored() << \"^ (Relevant source part starts here and spans across multiple lines).\";\n\t\tm_stream << '\\n';\n\t}\n}\n\nvoid SourceReferenceFormatter::printExceptionInformation(SourceReferenceExtractor::Message const& _msg)\n{\n\t// exception header line\n\terrorColored() << _msg.category;\n\tif (m_withErrorIds && _msg.errorId.has_value())\n\t\terrorColored() << \" (\" << _msg.errorId.value().error << \")\";\n\tmessageColored() << \": \" << _msg.primary.message << '\\n';\n\n\tprintSourceLocation(_msg.primary);\n\n\tfor (auto const& secondary: _msg.secondary)\n\t{\n\t\tsecondaryColored() << \"Note\";\n\t\tmessageColored() << \":\" << (secondary.message.empty() ? \"\" : (\" \" + secondary.message)) << '\\n';\n\t\tprintSourceLocation(secondary);\n\t}\n\n\tm_stream << '\\n';\n}\n\nvoid SourceReferenceFormatter::printExceptionInformation(util::Exception const& _exception, std::string const& _category)\n{\n\tprintExceptionInformation(SourceReferenceExtractor::extract(_exception, _category));\n}\n\nvoid SourceReferenceFormatter::printErrorInformation(Error const& _error)\n{\n\tprintExceptionInformation(SourceReferenceExtractor::extract(_error));\n}\n"
  },
  {
    "path": "liblangutil/Token.cpp",
    "content": "// Copyright 2006-2012, the V8 project authors. All rights reserved.\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//    * Redistributions of source code must retain the above copyright\n//      notice, this list of conditions and the following disclaimer.\n//    * Redistributions in binary form must reproduce the above\n//      copyright notice, this list of conditions and the following\n//      disclaimer in the documentation and/or other materials provided\n//      with the distribution.\n//    * Neither the name of Google Inc. nor the names of its\n//      contributors may be used to endorse or promote products derived\n//      from this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n//\n// Modifications as part of solidity under the following license:\n//\n// solidity is free software: you can redistribute it and/or modify\n// it under the terms of the GNU General Public License as published by\n// the Free Software Foundation, either version 3 of the License, or\n// (at your option) any later version.\n//\n// solidity is distributed in the hope that it will be useful,\n// but WITHOUT ANY WARRANTY; without even the implied warranty of\n// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n// GNU General Public License for more details.\n//\n// You should have received a copy of the GNU General Public License\n// along with solidity.  If not, see <http://www.gnu.org/licenses/>.\n\n#include <liblangutil/Token.h>\n#include <map>\n\nusing namespace std;\n\nnamespace solidity::langutil\n{\n\nvoid ElementaryTypeNameToken::assertDetails(Token _baseType, unsigned const& _first, unsigned const& _second)\n{\n\tsolAssert(TokenTraits::isElementaryTypeName(_baseType), \"Expected elementary type name: \" + string(TokenTraits::toString(_baseType)));\n\tif (_baseType == Token::BytesM)\n\t{\n\t\tsolAssert(_second == 0, \"There should not be a second size argument to type bytesM.\");\n\t\tsolAssert(_first <= 32, \"No elementary type bytes\" + to_string(_first) + \".\");\n\t}\n\telse if (_baseType == Token::UIntM || _baseType == Token::IntM)\n\t{\n\t\tsolAssert(_second == 0, \"There should not be a second size argument to type \" + string(TokenTraits::toString(_baseType)) + \".\");\n\t\tsolAssert(\n\t\t\t_first <= 256 && _first % 8 == 0,\n\t\t\t\"No elementary type \" + string(TokenTraits::toString(_baseType)) + to_string(_first) + \".\"\n\t\t);\n\t}\n\telse if (_baseType == Token::UFixedMxN || _baseType == Token::FixedMxN)\n\t{\n\t\tsolAssert(\n\t\t\t_first >= 8 && _first <= 256 && _first % 8 == 0 && _second <= 80,\n\t\t\t\"No elementary type \" + string(TokenTraits::toString(_baseType)) + to_string(_first) + \"x\" + to_string(_second) + \".\"\n\t\t);\n\t}\n\telse\n\t\tsolAssert(_first == 0 && _second == 0, \"Unexpected size arguments\");\n\n\tm_token = _baseType;\n\tm_firstNumber = _first;\n\tm_secondNumber = _second;\n}\n\nnamespace TokenTraits\n{\nchar const* toString(Token tok)\n{\n\tswitch (tok)\n\t{\n#define T(name, string, precedence) case Token::name: return string;\n\t\tTOKEN_LIST(T, T)\n#undef T\n\t\tdefault: // Token::NUM_TOKENS:\n\t\t\treturn \"\";\n\t}\n}\n\nchar const* name(Token tok)\n{\n#define T(name, string, precedence) #name,\n\tstatic char const* const names[TokenTraits::count()] = { TOKEN_LIST(T, T) };\n#undef T\n\n\tsolAssert(static_cast<size_t>(tok) < TokenTraits::count(), \"\");\n\treturn names[static_cast<size_t>(tok)];\n}\n\nstd::string friendlyName(Token tok)\n{\n\tchar const* ret = toString(tok);\n\tif (ret)\n\t\treturn std::string(ret);\n\n\tret = name(tok);\n\tsolAssert(ret != nullptr, \"\");\n\treturn std::string(ret);\n}\n\n\nstatic Token keywordByName(string const& _name)\n{\n\t// The following macros are used inside TOKEN_LIST and cause non-keyword tokens to be ignored\n\t// and keywords to be put inside the keywords variable.\n#define KEYWORD(name, string, precedence) {string, Token::name},\n#define TOKEN(name, string, precedence)\n\tstatic map<string, Token> const keywords({TOKEN_LIST(TOKEN, KEYWORD)});\n#undef KEYWORD\n#undef TOKEN\n\tauto it = keywords.find(_name);\n\treturn it == keywords.end() ? Token::Identifier : it->second;\n}\n\nbool isYulKeyword(string const& _literal)\n{\n\treturn _literal == \"leave\" || isYulKeyword(keywordByName(_literal));\n}\n\ntuple<Token, unsigned int, unsigned int> fromIdentifierOrKeyword(string const& _literal)\n{\n\t// Used for `bytesM`, `uintM`, `intM`, `fixedMxN`, `ufixedMxN`.\n\t// M/N must be shortest representation. M can never be 0. N can be zero.\n\tauto parseSize = [](string::const_iterator _begin, string::const_iterator _end) -> int\n\t{\n\t\t// No number.\n\t\tif (distance(_begin, _end) == 0)\n\t\t\treturn -1;\n\n\t\t// Disallow leading zero.\n\t\tif (distance(_begin, _end) > 1 && *_begin == '0')\n\t\t\treturn -1;\n\n\t\tint ret = 0;\n\t\tfor (auto it = _begin; it != _end; it++)\n\t\t{\n\t\t\tif (*it < '0' || *it > '9')\n\t\t\t\treturn -1;\n\t\t\t//  Overflow check. The largest acceptable value is 256 in the callers.\n\t\t\tif (ret >= 256)\n\t\t\t\treturn -1;\n\t\t\tret *= 10;\n\t\t\tret += *it - '0';\n\t\t}\n\t\treturn ret;\n\t};\n\n\tauto positionM = find_if(_literal.begin(), _literal.end(), ::isdigit);\n\tif (positionM != _literal.end())\n\t{\n\t\tstring baseType(_literal.begin(), positionM);\n\t\tauto positionX = find_if_not(positionM, _literal.end(), ::isdigit);\n\t\tint m = parseSize(positionM, positionX);\n\t\tToken keyword = keywordByName(baseType);\n\t\tif (keyword == Token::Bytes)\n\t\t{\n\t\t\tif (0 < m && m <= 32 && positionX == _literal.end())\n\t\t\t\treturn make_tuple(Token::BytesM, m, 0);\n\t\t}\n\t\telse if (keyword == Token::UInt || keyword == Token::Int)\n\t\t{\n\t\t\tif (0 < m && m <= 256 && m % 8 == 0 && positionX == _literal.end())\n\t\t\t{\n\t\t\t\tif (keyword == Token::UInt)\n\t\t\t\t\treturn make_tuple(Token::UIntM, m, 0);\n\t\t\t\telse\n\t\t\t\t\treturn make_tuple(Token::IntM, m, 0);\n\t\t\t}\n\t\t}\n\t\telse if (keyword == Token::UFixed || keyword == Token::Fixed)\n\t\t{\n\t\t\tif (\n\t\t\t\tpositionM < positionX &&\n\t\t\t\tpositionX < _literal.end() &&\n\t\t\t\t*positionX == 'x' &&\n\t\t\t\tall_of(positionX + 1, _literal.end(), ::isdigit)\n\t\t\t) {\n\t\t\t\tint n = parseSize(positionX + 1, _literal.end());\n\t\t\t\tif (\n\t\t\t\t\t8 <= m && m <= 256 && m % 8 == 0 &&\n\t\t\t\t\t0 <= n && n <= 80\n\t\t\t\t) {\n\t\t\t\t\tif (keyword == Token::UFixed)\n\t\t\t\t\t\treturn make_tuple(Token::UFixedMxN, m, n);\n\t\t\t\t\telse\n\t\t\t\t\t\treturn make_tuple(Token::FixedMxN, m, n);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn make_tuple(Token::Identifier, 0, 0);\n\t}\n\n\treturn make_tuple(keywordByName(_literal), 0, 0);\n}\n\n}\n}\n"
  },
  {
    "path": "logging-toolkit/Cargo.toml",
    "content": "[package]\nname = \"logging-toolkit\"\nversion = \"0.1.0\"\nauthors = [\"laser <l@s3r.com>\"]\nlicense = \"MIT OR Apache-2.0\"\n\nedition = \"2018\"\n\n[dependencies]\nslog = \"2.4.1\"\nslog-term = \"2.4.0\"\nslog-json = \"2.3.0\"\nslog-async = \"2.3.0\"\n"
  },
  {
    "path": "logging-toolkit/src/lib.rs",
    "content": "#[macro_use]\nextern crate slog;\nextern crate slog_async;\nextern crate slog_json;\nextern crate slog_term;\n\nuse slog::Drain;\nuse slog::FnValue;\nuse slog::Level;\nuse slog::LevelFilter;\nuse slog::Logger;\nuse std::env;\n\npub fn make_logger(\n    root_name: &'static str,\n    use_json_env_name: &str,\n    min_log_level_env_name: &str,\n) -> Logger {\n    let drain = match env::var(use_json_env_name).as_ref().map(|x| x.as_str()) {\n        Ok(\"true\") => {\n            let json_drain = slog_json::Json::new(std::io::stdout())\n                .add_default_keys()\n                .build()\n                .fuse();\n\n            slog_async::Async::new(json_drain).build().fuse()\n        }\n        _ => {\n            let term_decorator = slog_term::TermDecorator::new().build();\n            let term_drain = slog_term::FullFormat::new(term_decorator).build().fuse();\n\n            slog_async::Async::new(term_drain).build().fuse()\n        }\n    };\n\n    let min_log_level = match env::var(min_log_level_env_name) {\n        Ok(val) => match val.parse::<u64>() {\n            Ok(parsed) => match Level::from_usize(parsed as usize) {\n                Some(level) => level,\n                None => Level::Info,\n            },\n            _ => Level::Info,\n        },\n        _ => Level::Info,\n    };\n\n    let with_filter = LevelFilter::new(drain, min_log_level).map(slog::Fuse);\n\n    Logger::root(\n        with_filter,\n        o!(\"root\" => root_name, \"place\" => FnValue(move |info| {\n            format!(\"{}:{} {}\",\n                    info.file(),\n                    info.line(),\n                    info.module(),\n                    )\n        })),\n    )\n}\n"
  },
  {
    "path": "parameters.json",
    "content": "{\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params\": {\n    \"cid\": \"QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR\",\n    \"digest\": \"7610b9f82bfc88405b7a832b651ce2f6\",\n    \"sector_size\": 2048\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk\": {\n    \"cid\": \"QmcS5JZs8X3TdtkEBpHAdUYjdNDqcL7fWQFtQz69mpnu2X\",\n    \"digest\": \"0e0958009936b9d5e515ec97b8cb792d\",\n    \"sector_size\": 2048\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params\": {\n    \"cid\": \"QmUiRx71uxfmUE8V3H9sWAsAXoM88KR4eo1ByvvcFNeTLR\",\n    \"digest\": \"1a7d4a9c8a502a497ed92a54366af33f\",\n    \"sector_size\": 536870912\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk\": {\n    \"cid\": \"QmfCeddjFpWtavzfEzZpJfzSajGNwfL4RjFXWAvA9TSnTV\",\n    \"digest\": \"4dae975de4f011f101f5a2f86d1daaba\",\n    \"sector_size\": 536870912\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params\": {\n    \"cid\": \"QmcSTqDcFVLGGVYz1njhUZ7B6fkKtBumsLUwx4nkh22TzS\",\n    \"digest\": \"82c88066be968bb550a05e30ff6c2413\",\n    \"sector_size\": 2048\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk\": {\n    \"cid\": \"QmSTCXF2ipGA3f6muVo6kHc2URSx6PzZxGUqu7uykaH5KU\",\n    \"digest\": \"ffd79788d614d27919ae5bd2d94eacb6\",\n    \"sector_size\": 2048\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params\": {\n    \"cid\": \"QmU9SBzJNrcjRFDiFc4GcApqdApN6z9X7MpUr66mJ2kAJP\",\n    \"digest\": \"700171ecf7334e3199437c930676af82\",\n    \"sector_size\": 8388608\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk\": {\n    \"cid\": \"QmbmUMa3TbbW3X5kFhExs6WgC4KeWT18YivaVmXDkB6ANG\",\n    \"digest\": \"79ebb55f56fda427743e35053edad8fc\",\n    \"sector_size\": 8388608\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params\": {\n    \"cid\": \"QmdNEL2RtqL52GQNuj8uz6mVj5Z34NVnbaJ1yMyh1oXtBx\",\n    \"digest\": \"c49499bb76a0762884896f9683403f55\",\n    \"sector_size\": 8388608\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk\": {\n    \"cid\": \"QmUiVYCQUgr6Y13pZFr8acWpSM4xvTXUdcvGmxyuHbKhsc\",\n    \"digest\": \"34d4feeacd9abf788d69ef1bb4d8fd00\",\n    \"sector_size\": 8388608\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params\": {\n    \"cid\": \"QmVgCsJFRXKLuuUhT3aMYwKVGNA9rDeR6DCrs7cAe8riBT\",\n    \"digest\": \"827359440349fe8f5a016e7598993b79\",\n    \"sector_size\": 536870912\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk\": {\n    \"cid\": \"QmfA31fbCWojSmhSGvvfxmxaYCpMoXP95zEQ9sLvBGHNaN\",\n    \"digest\": \"bd2cd62f65c1ab84f19ca27e97b7c731\",\n    \"sector_size\": 536870912\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params\": {\n    \"cid\": \"QmaUmfcJt6pozn8ndq1JVBzLRjRJdHMTPd4foa8iw5sjBZ\",\n    \"digest\": \"2cf49eb26f1fee94c85781a390ddb4c8\",\n    \"sector_size\": 34359738368\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk\": {\n    \"cid\": \"QmR9i9KL3vhhAqTBGj1bPPC7LvkptxrH9RvxJxLN1vvsBE\",\n    \"digest\": \"0f8ec542485568fa3468c066e9fed82b\",\n    \"sector_size\": 34359738368\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params\": {\n    \"cid\": \"Qmdtczp7p4wrbDofmHdGhiixn9irAcN77mV9AEHZBaTt1i\",\n    \"digest\": \"d84f79a16fe40e9e25a36e2107bb1ba0\",\n    \"sector_size\": 34359738368\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk\": {\n    \"cid\": \"QmZCvxKcKP97vDAk8Nxs9R1fWtqpjQrAhhfXPoCi1nkDoF\",\n    \"digest\": \"fc02943678dd119e69e7fab8420e8819\",\n    \"sector_size\": 34359738368\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params\": {\n    \"cid\": \"QmeAN4vuANhXsF8xP2Lx5j2L6yMSdogLzpcvqCJThRGK1V\",\n    \"digest\": \"3810b7780ac0e299b22ae70f1f94c9bc\",\n    \"sector_size\": 68719476736\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk\": {\n    \"cid\": \"QmWV8rqZLxs1oQN9jxNWmnT1YdgLwCcscv94VARrhHf1T7\",\n    \"digest\": \"59d2bf1857adc59a4f08fcf2afaa916b\",\n    \"sector_size\": 68719476736\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params\": {\n    \"cid\": \"QmVkrXc1SLcpgcudK5J25HH93QvR9tNsVhVTYHm5UymXAz\",\n    \"digest\": \"2170a91ad5bae22ea61f2ea766630322\",\n    \"sector_size\": 68719476736\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk\": {\n    \"cid\": \"QmbfQjPD7EpzjhWGmvWAsyN2mAZ4PcYhsf3ujuhU9CSuBm\",\n    \"digest\": \"6d3789148fb6466d07ee1e24d6292fd6\",\n    \"sector_size\": 68719476736\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params\": {\n    \"cid\": \"QmWceMgnWYLopMuM4AoGMvGEau7tNe5UK83XFjH5V9B17h\",\n    \"digest\": \"434fb1338ecfaf0f59256f30dde4968f\",\n    \"sector_size\": 2048\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk\": {\n    \"cid\": \"QmamahpFCstMUqHi2qGtVoDnRrsXhid86qsfvoyCTKJqHr\",\n    \"digest\": \"dc1ade9929ade1708238f155343044ac\",\n    \"sector_size\": 2048\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params\": {\n    \"cid\": \"QmYBpTt7LWNAWr1JXThV5VxX7wsQFLd1PHrGYVbrU1EZjC\",\n    \"digest\": \"6c77597eb91ab936c1cef4cf19eba1b3\",\n    \"sector_size\": 536870912\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk\": {\n    \"cid\": \"QmWionkqH2B6TXivzBSQeSyBxojaiAFbzhjtwYRrfwd8nH\",\n    \"digest\": \"065179da19fbe515507267677f02823e\",\n    \"sector_size\": 536870912\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params\": {\n    \"cid\": \"QmPXAPPuQtuQz7Zz3MHMAMEtsYwqM1o9H1csPLeiMUQwZH\",\n    \"digest\": \"09e612e4eeb7a0eb95679a88404f960c\",\n    \"sector_size\": 8388608\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk\": {\n    \"cid\": \"QmYCuipFyvVW1GojdMrjK1JnMobXtT4zRCZs1CGxjizs99\",\n    \"digest\": \"b687beb9adbd9dabe265a7e3620813e4\",\n    \"sector_size\": 8388608\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params\": {\n    \"cid\": \"QmengpM684XLQfG8754ToonszgEg2bQeAGUan5uXTHUQzJ\",\n    \"digest\": \"6a388072a518cf46ebd661f5cc46900a\",\n    \"sector_size\": 34359738368\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk\": {\n    \"cid\": \"Qmf93EMrADXAK6CyiSfE8xx45fkMfR3uzKEPCvZC1n2kzb\",\n    \"digest\": \"0c7b4aac1c40fdb7eb82bc355b41addf\",\n    \"sector_size\": 34359738368\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params\": {\n    \"cid\": \"QmS7ye6Ri2MfFzCkcUJ7FQ6zxDKuJ6J6B8k5PN7wzSR9sX\",\n    \"digest\": \"1801f8a6e1b00bceb00cc27314bb5ce3\",\n    \"sector_size\": 68719476736\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk\": {\n    \"cid\": \"QmehSmC6BhrgRZakPDta2ewoH9nosNzdjCqQRXsNFNUkLN\",\n    \"digest\": \"a89884252c04c298d0b3c81bfd884164\",\n    \"sector_size\": 68719476736\n  }\n}"
  },
  {
    "path": "proptest-regressions/crypto/sloth.txt",
    "content": "# Seeds for failure cases proptest has generated in the past. It is\n# automatically read and these particular cases re-run before any\n# novel cases are generated.\n#\n# It is recommended to check this file in to source control so that\n# everyone who runs the test benefits from these saved cases.\nxs 2333654024 1879057395 1148234502 4254212597 # shrinks to key = Fr(FrRepr([0, 0, 0, 0])), plaintext = Fr(FrRepr([6433232347557286543, 14241240406459990354, 3113366375994539378, 2168360237581758600]))\n"
  },
  {
    "path": "release.toml",
    "content": "pre-release-commit-message = \"chore({{crate_name}}): release {{version}}\"\npro-release-commit-message = \"chore({{crate_name}}): starting development cycle for {{next_version}}\"\nno-dev-version = true"
  },
  {
    "path": "rust-fil-proofs.config.toml.sample",
    "content": "# To use this configuration, copy this file to './rust-fil-proofs.config.toml'.\n\n# The location to store downloaded parameter files required for proofs.\nparameter_cache = \"/var/tmp/filecoin-proofs-parameters/\"\n\n# The location to store the on-disk parents cache.\nparent_cache = \"/var/tmp/filecoin-parents\"\n# The max number of parent cache elements to have mapped in RAM at a time.\nsdr_parents_cache_size = 2_048\n\n# This enables the use of the GPU for column tree building.\nuse_gpu_column_builder = false\n# If the GPU is used for column building, this is the batch size to send to the GPU at a time.\nmax_gpu_column_batch_size = 400_000\n# This is the batch size for writing out the column tree elements to disk after it's generated.\ncolumn_write_batch_size = 262_144\n\n# This enables the use of the GPU for tree r last building.\nuse_gpu_tree_builder = false\n# If the GPU is used for tree r last building, this is the batch size to send to the GPU at a time.\nmax_gpu_tree_batch_size = 700_000\n\n# This setting affects tree_r_last (MerkleTree) generation and access\n# and determines the size of the on disk tree caches.  This value MUST\n# NOT be changed after tree_r_last caches have been generated on your\n# system, as any remaining will no longer be accessible.  A tool\n# exists called 'update_tree_r_last' that can rebuild cache files if\n# it's required, but updating this setting is NOT recommended.\nrows_to_discard = 2\n\n# This value is defaulted to the number of cores available on your system.\n#window_post_synthesis_num_cpus = 8\n\n# This enables multicore SDR replication\nuse_multicore_sdr = false\n"
  },
  {
    "path": "rust-toolchain",
    "content": "1.51.0\n"
  },
  {
    "path": "scripts/bench-parser.sh",
    "content": "#!/usr/bin/env bash\nname=\"\"\nsamples=\"\"\ntime=\"\"\nslope=\"\"\nrsqr=\"\"\nmean=\"\"\nstddev=\"\"\nmedian=\"\"\nmedabsdev=\"\"\n\nfunction as_list {\n  out=$(echo $@ | sed \"s/ /\\\", \\\"/g\")\n  echo \"[\\\"$out\\\"]\"\n}\n\nresults=[]\nindex=0\n\nwhile IFS= read line; do\n  if [[ $line =~ ^Benchmarking ]]; then\n    if [[ -z $name ]]; then\n      name=`echo \"$line\" | cut -d' ' -f2-`\n    fi\n  fi\n  if [[ \"$line\" =~ Collecting ]]; then\n    samples=$(echo \"$line\" | cut -d'C' -f2 | cut -d' ' -f2)\n  fi\n  if [[ \"$line\" =~ time: ]]; then\n    time=$(echo \"$line\" | cut -d'[' -f2 | cut -d']' -f1 | awk '{ print $1$2, $3$4, $5$6 }')\n  fi\n  if [[ \"$line\" =~ ^slope ]]; then\n    slope=$(echo \"$line\" | cut -d'[' -f2 | cut -d']' -f1 | awk '{ print $1$2, $3$4 }')\n    rsqr=$(echo \"$line\" | cut -d'[' -f3 | cut -d']' -f1 | awk '{ print $1, $2 }')\n  fi\n  if [[ \"$line\" =~ ^mean ]]; then\n    mean=$(echo \"$line\" | cut -d'[' -f2 | cut -d']' -f1 | awk '{ print $1$2, $3$4 }')\n    stddev=$(echo \"$line\" | cut -d'[' -f3 | cut -d']' -f1 | awk '{ print $1$2, $3$4 }')\n  fi\n  if [[ \"$line\" =~ ^median ]]; then\n    median=$(echo \"$line\" | cut -d'[' -f2 | cut -d']' -f1 | awk '{ print $1$2, $3$4 }')\n    medabsdev=$(echo \"$line\" | cut -d'[' -f3 | cut -d']' -f1 | awk '{ print $1$2, $3$4 }')\n    results[index]=\"  {\n    \\\"name\\\": \"\\\"$name\\\"\",\n    \\\"samples\\\": $samples,\n    \\\"time\\\": $(as_list $time),\n    \\\"slope\\\": $(as_list $slope),\n    \\\"R^2\\\": $(as_list $rsqr),\n    \\\"mean\\\": $(as_list $mean),\n    \\\"std. dev.\\\": $(as_list $stddev),\n    \\\"median\\\": $(as_list $median),\n    \\\"med. abs. dev.\\\": $(as_list $medabsdev)\n  }\"\n    name=\"\"\n    index=$((index+1))\n  fi\ndone\n\ncount=$((index-1))\n\nif [ \"$count\" -ge \"1\" ]; then\n  echo \"[\"\n  for n in ${!results[@]}; do\n    printf \"${results[$n]}\"\n    if [ \"$n\" -ne \"$count\" ]; then\n      echo \", \"\n    else\n      echo\n    fi\n  done\n  echo \"]\"\nfi\n"
  },
  {
    "path": "scripts/package-release.sh",
    "content": "#!/usr/bin/env bash\n\nset -Eeuo pipefail\n\nif [ -z \"$1\" ]; then\n  TAR_FILE=`mktemp`.tar.gz\nelse\n  TAR_FILE=$1\nfi\n\nTAR_PATH=`mktemp -d`\n\nmkdir -p $TAR_PATH\nmkdir -p $TAR_PATH/bin\nmkdir -p $TAR_PATH/misc\n\ncp filecoin-proofs/parameters.json $TAR_PATH/misc/\ncp target/release/paramcache $TAR_PATH/bin/\ncp target/release/paramfetch $TAR_PATH/bin/\n\npushd $TAR_PATH\n\ntar -czf $TAR_FILE ./*\n\npopd\n\nrm -rf $TAR_PATH\n\necho $TAR_FILE\n"
  },
  {
    "path": "scripts/pin-params.sh",
    "content": "#!/usr/bin/env bash\nset -Eeuo pipefail\n\n# pin-params.sh\n#\n# - Post the directory of params to cluster.ipfs.io\n# - Grab the CID for the previous params from proofs.filecoin.io\n# - Add the old params as a `prev` dir to the new params dir to keep them around.\n# - Pin the new cid on cluster\n# - Publish the new cid as a dnslink to proofs.filecoin.io\n# - The gateways will pin the new dir by checking proofs.filecoin.io hourly.\n#\n# Requires:\n#  - `ipfs-cluster-ctl` - download from https://dist.ipfs.io/#ipfs-cluster-ctl\n#  - `npx`, as provide `npm` >= v6\n#  - `ipfs`\n#\n# You _must_ provide the following env vars\n#\n#  - CLUSTER_TOKEN - the basic auth string as \"username:password\"\n#  - DNSIMPLE_TOKEN - an api key for a dnsimple account with a zone for proofs.filecoin.io\n#\n# Optional: you can override the input dir by passing a path as the first param.\n#\n# Usage:\n#   CLUSTER_TOKEN=\"user:pass\" DNSIMPLE_TOKEN=\"xyz\" ./pin-params.sh\n#\n\nINPUT_DIR=${1:-\"/var/tmp/filecoin-proof-parameters\"}\n: \"${CLUSTER_TOKEN:?please set CLUSTER_TOKEN env var}\"\n: \"${DNSIMPLE_TOKEN:?please set DNSIMPLE_TOKEN env var}\"\n\necho \"checking $INPUT_DIR\"\n\n# Grab the version number from the files in the dir.\n# Fail if more than 1 version or doesnt match a version string like vNN, e.g v12\nif ls -A $INPUT_DIR &> /dev/null; then\n  # version will be a list if there is more than one...\n  VERSION=$(ls $INPUT_DIR | sort -r | cut -c 1-3 | uniq)\n  echo found $VERSION\n\n  if [[ $(echo $VERSION | wc -w) -eq 1 && $VERSION =~ ^v[0-9]+ ]]; then\n    # we have 1 version, lets go...\n    COUNT=$(ls -l $INPUT_DIR | wc -l | xargs echo -n)\n    echo \"adding $COUNT files to ipfs...\"\n\n  else\n    echo \"Error: input dir should contain just the current version of the params\"\n    exit 1\n  fi\nelse\n  echo \"Error: input dir '$INPUT_DIR' should contain the params\"\n  exit 1\nfi\n\nCLUSTER_HOST=\"/dnsaddr/cluster.ipfs.io\"\nCLUSTER_PIN_NAME=\"filecoin-proof-parameters-$VERSION\"\nDNSLINK_DOMAIN=\"proofs.filecoin.io\"\n\n# Pin to cluster\nROOT_CID=$(ipfs-cluster-ctl \\\n  --host $CLUSTER_HOST \\\n  --basic-auth $CLUSTER_TOKEN \\\n  add --quieter \\\n  --name $CLUSTER_PIN_NAME \\\n  --recursive $INPUT_DIR )\n\necho \"ok! root cid is $ROOT_CID\"\n\n# Publist the new cid to the dnslink\nnpx dnslink-dnsimple --domain $DNSLINK_DOMAIN --link \"/ipfs/$ROOT_CID\"\n\necho \"done!\"\n"
  },
  {
    "path": "scripts/publish-release.sh",
    "content": "#!/usr/bin/env bash\n\nset -Eeuo pipefail\n\nRELEASE_NAME=\"$CIRCLE_PROJECT_REPONAME-`uname`\"\nRELEASE_FILE=\"/tmp/$RELEASE_NAME.tar.gz\"\nRELEASE_TAG=\"${CIRCLE_SHA1:0:16}\"\n\n# make sure we have a token set, api requests won't work otherwise\nif [ -z $GITHUB_TOKEN ]; then\n  echo \"\\$GITHUB_TOKEN not set, publish failed\"\n  exit 1\nfi\n\necho \"preparing release file\"\n\n`dirname $0`/package-release.sh $RELEASE_FILE\n\necho \"release file created: $RELEASE_FILE\"\n\n# see if the release already exists by tag\nRELEASE_RESPONSE=`\n  curl \\\n    --header \"Authorization: token $GITHUB_TOKEN\" \\\n    \"https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/releases/tags/$RELEASE_TAG\"\n`\n\nRELEASE_ID=`echo $RELEASE_RESPONSE | jq '.id'`\n\nif [ \"$RELEASE_ID\" = \"null\" ]; then\n  echo \"creating release\"\n\n  RELEASE_DATA=\"{\n    \\\"tag_name\\\": \\\"$RELEASE_TAG\\\",\n    \\\"target_commitish\\\": \\\"$CIRCLE_SHA1\\\",\n    \\\"name\\\": \\\"$RELEASE_TAG\\\",\n    \\\"body\\\": \\\"\\\"\n  }\"\n\n  # create it if it doesn't exist yet\n  RELEASE_RESPONSE=`\n    curl \\\n      --request POST \\\n      --header \"Authorization: token $GITHUB_TOKEN\" \\\n      --header \"Content-Type: application/json\" \\\n      --data \"$RELEASE_DATA\" \\\n      \"https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/releases\"\n  `\nelse\n  echo \"release already exists\"\nfi\n\nRELEASE_UPLOAD_URL=`echo $RELEASE_RESPONSE | jq -r '.upload_url' | cut -d'{' -f1`\n\ncurl \\\n  --request POST \\\n  --header \"Authorization: token $GITHUB_TOKEN\" \\\n  --header \"Content-Type: application/octet-stream\" \\\n  --data-binary \"@$RELEASE_FILE\" \\\n  \"$RELEASE_UPLOAD_URL?name=$(basename $RELEASE_FILE)\"\n\necho \"release file uploaded\"\n"
  },
  {
    "path": "scripts/verify-parameters-json.sh",
    "content": "#!/bin/sh\n\n# This script verifies that a given `.params` file (and the corresponding\n# `.vk` file) is part of `parameters.json` and has the correct digest.\n#\n# This script runs on POSIX compatible shells. You need to have standard\n# utilities (`basename`, `head`, `grep`) as well as have `jq` and `b2sum`\n# installed.\n#\n# The inputs are a `parameter.json` file and a `.params' file.\n\nif [ \"${#}\" -ne 2 ]; then\n    echo \"Verify that a given .params file (and the corresponding .vk file)\"\n    echo \"is part of parameters.json and has the correct digest.\"\n    echo \"\"\n    echo \"Usage: $(basename \"${0}\") parameters.json parameter-file.params\"\n    exit 1\nfi\n\nif ! command -v b2sum >/dev/null 2>&1\nthen\n    echo \"ERROR: 'b2sum' needs to be installed.\"\n    exit 1\nfi\n\nif ! command -v jq >/dev/null 2>&1\nthen\n    echo \"ERROR: 'jq' needs to be installed.\"\n    exit 1\nfi\n\nPARAMS_JSON=${1}\nPARAMS_ID=\"${2%.*}\"\n\nPARAMS_FILE=\"${PARAMS_ID}.params\"\nVK_FILE=\"${PARAMS_ID}.vk\"\n\n# Transforms the `parameters.json` into a string that consists of digest and\n# filename pairs.\nPARAMS_JSON_DATA=$(jq -r 'to_entries[] | \"\\(.value.digest) \\(.key)\"' \"${PARAMS_JSON}\")\n\nVK_HASH_SHORT=$(b2sum \"${VK_FILE}\"|head --bytes 32)\nif echo \"${PARAMS_JSON_DATA}\"|grep --silent \"${VK_HASH_SHORT} ${VK_FILE}\"; then\n    echo \"ok Correct digest of VK file was found in ${PARAMS_JSON}.\"\nelse\n    echo \"not ok ERROR: Digest of VK file was *not* found/correct in ${PARAMS_JSON}.\"\n    exit 1\nfi\n\nPARAMS_HASH_SHORT=$(b2sum \"${PARAMS_FILE}\"|head --bytes 32)\nif echo \"${PARAMS_JSON_DATA}\"|grep --silent \"${PARAMS_HASH_SHORT} ${PARAMS_FILE}\"; then\n    echo \"ok Correct digest of params file was found in ${PARAMS_JSON}.\"\nelse\n    echo \"not ok ERROR: Digest of params file was *not* found/correct in ${PARAMS_JSON}.\"\n    exit 1\nfi\n\necho \"# Verification successfully completed.\"\n"
  },
  {
    "path": "sector-base/.gitignore",
    "content": "/target\n**/*.rs.bk\nCargo.lock\n.criterion\n**/libproofs.h\nheaptrack*"
  },
  {
    "path": "sector-base/Cargo.toml",
    "content": "[package]\nname = \"sector-base\"\nversion = \"0.1.0\"\nauthors = [\"dignifiedquire <dignifiedquire@gmail.com>\"]\nlicense = \"MIT OR Apache-2.0\"\n\nedition = \"2018\"\n\n[dependencies]\nbitvec = \"0.5\"\nfailure = \"0.1\"\nitertools = \"0.7.3\"\nlibc = \"0.2\"\nrand = \"0.4\"\nstorage-proofs = { path = \"../storage-proofs\" }\nffi-toolkit = { path = \"../ffi-toolkit\" }\n\n[dependencies.pairing]\nversion = \"0.14.2\"\nfeatures = [\"expose-arith\"]\n\n[dev-dependencies]\ntempfile = \"*\"\n"
  },
  {
    "path": "sector-base/LICENSE-APACHE",
    "content": "                              Apache License\n                        Version 2.0, January 2004\n                     http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n   \"License\" shall mean the terms and conditions for use, reproduction,\n   and distribution as defined by Sections 1 through 9 of this document.\n\n   \"Licensor\" shall mean the copyright owner or entity authorized by\n   the copyright owner that is granting the License.\n\n   \"Legal Entity\" shall mean the union of the acting entity and all\n   other entities that control, are controlled by, or are under common\n   control with that entity. For the purposes of this definition,\n   \"control\" means (i) the power, direct or indirect, to cause the\n   direction or management of such entity, whether by contract or\n   otherwise, or (ii) ownership of fifty percent (50%) or more of the\n   outstanding shares, or (iii) beneficial ownership of such entity.\n\n   \"You\" (or \"Your\") shall mean an individual or Legal Entity\n   exercising permissions granted by this License.\n\n   \"Source\" form shall mean the preferred form for making modifications,\n   including but not limited to software source code, documentation\n   source, and configuration files.\n\n   \"Object\" form shall mean any form resulting from mechanical\n   transformation or translation of a Source form, including but\n   not limited to compiled object code, generated documentation,\n   and conversions to other media types.\n\n   \"Work\" shall mean the work of authorship, whether in Source or\n   Object form, made available under the License, as indicated by a\n   copyright notice that is included in or attached to the work\n   (an example is provided in the Appendix below).\n\n   \"Derivative Works\" shall mean any work, whether in Source or Object\n   form, that is based on (or derived from) the Work and for which the\n   editorial revisions, annotations, elaborations, or other modifications\n   represent, as a whole, an original work of authorship. For the purposes\n   of this License, Derivative Works shall not include works that remain\n   separable from, or merely link (or bind by name) to the interfaces of,\n   the Work and Derivative Works thereof.\n\n   \"Contribution\" shall mean any work of authorship, including\n   the original version of the Work and any modifications or additions\n   to that Work or Derivative Works thereof, that is intentionally\n   submitted to Licensor for inclusion in the Work by the copyright owner\n   or by an individual or Legal Entity authorized to submit on behalf of\n   the copyright owner. For the purposes of this definition, \"submitted\"\n   means any form of electronic, verbal, or written communication sent\n   to the Licensor or its representatives, including but not limited to\n   communication on electronic mailing lists, source code control systems,\n   and issue tracking systems that are managed by, or on behalf of, the\n   Licensor for the purpose of discussing and improving the Work, but\n   excluding communication that is conspicuously marked or otherwise\n   designated in writing by the copyright owner as \"Not a Contribution.\"\n\n   \"Contributor\" shall mean Licensor and any individual or Legal Entity\n   on behalf of whom a Contribution has been received by Licensor and\n   subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   copyright license to reproduce, prepare Derivative Works of,\n   publicly display, publicly perform, sublicense, and distribute the\n   Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   (except as stated in this section) patent license to make, have made,\n   use, offer to sell, sell, import, and otherwise transfer the Work,\n   where such license applies only to those patent claims licensable\n   by such Contributor that are necessarily infringed by their\n   Contribution(s) alone or by combination of their Contribution(s)\n   with the Work to which such Contribution(s) was submitted. If You\n   institute patent litigation against any entity (including a\n   cross-claim or counterclaim in a lawsuit) alleging that the Work\n   or a Contribution incorporated within the Work constitutes direct\n   or contributory patent infringement, then any patent licenses\n   granted to You under this License for that Work shall terminate\n   as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n   Work or Derivative Works thereof in any medium, with or without\n   modifications, and in Source or Object form, provided that You\n   meet the following conditions:\n\n   (a) You must give any other recipients of the Work or\n       Derivative Works a copy of this License; and\n\n   (b) You must cause any modified files to carry prominent notices\n       stating that You changed the files; and\n\n   (c) You must retain, in the Source form of any Derivative Works\n       that You distribute, all copyright, patent, trademark, and\n       attribution notices from the Source form of the Work,\n       excluding those notices that do not pertain to any part of\n       the Derivative Works; and\n\n   (d) If the Work includes a \"NOTICE\" text file as part of its\n       distribution, then any Derivative Works that You distribute must\n       include a readable copy of the attribution notices contained\n       within such NOTICE file, excluding those notices that do not\n       pertain to any part of the Derivative Works, in at least one\n       of the following places: within a NOTICE text file distributed\n       as part of the Derivative Works; within the Source form or\n       documentation, if provided along with the Derivative Works; or,\n       within a display generated by the Derivative Works, if and\n       wherever such third-party notices normally appear. The contents\n       of the NOTICE file are for informational purposes only and\n       do not modify the License. You may add Your own attribution\n       notices within Derivative Works that You distribute, alongside\n       or as an addendum to the NOTICE text from the Work, provided\n       that such additional attribution notices cannot be construed\n       as modifying the License.\n\n   You may add Your own copyright statement to Your modifications and\n   may provide additional or different license terms and conditions\n   for use, reproduction, or distribution of Your modifications, or\n   for any such Derivative Works as a whole, provided Your use,\n   reproduction, and distribution of the Work otherwise complies with\n   the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n   any Contribution intentionally submitted for inclusion in the Work\n   by You to the Licensor shall be under the terms and conditions of\n   this License, without any additional terms or conditions.\n   Notwithstanding the above, nothing herein shall supersede or modify\n   the terms of any separate license agreement you may have executed\n   with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n   names, trademarks, service marks, or product names of the Licensor,\n   except as required for reasonable and customary use in describing the\n   origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n   agreed to in writing, Licensor provides the Work (and each\n   Contributor provides its Contributions) on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n   implied, including, without limitation, any warranties or conditions\n   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n   PARTICULAR PURPOSE. You are solely responsible for determining the\n   appropriateness of using or redistributing the Work and assume any\n   risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n   whether in tort (including negligence), contract, or otherwise,\n   unless required by applicable law (such as deliberate and grossly\n   negligent acts) or agreed to in writing, shall any Contributor be\n   liable to You for damages, including any direct, indirect, special,\n   incidental, or consequential damages of any character arising as a\n   result of this License or out of the use or inability to use the\n   Work (including but not limited to damages for loss of goodwill,\n   work stoppage, computer failure or malfunction, or any and all\n   other commercial damages or losses), even if such Contributor\n   has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n   the Work or Derivative Works thereof, You may choose to offer,\n   and charge a fee for, acceptance of support, warranty, indemnity,\n   or other liability obligations and/or rights consistent with this\n   License. However, in accepting such obligations, You may act only\n   on Your own behalf and on Your sole responsibility, not on behalf\n   of any other Contributor, and only if You agree to indemnify,\n   defend, and hold each Contributor harmless for any liability\n   incurred by, or claims asserted against, such Contributor by reason\n   of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n   To apply the Apache License to your work, attach the following\n   boilerplate notice, with the fields enclosed by brackets \"[]\"\n   replaced with your own identifying information. (Don't include\n   the brackets!)  The text should be enclosed in the appropriate\n   comment syntax for the file format. We also recommend that a\n   file or class name and description of purpose be included on the\n   same \"printed page\" as the copyright notice for easier\n   identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License."
  },
  {
    "path": "sector-base/LICENSE-MIT",
    "content": "Permission is hereby granted, free of charge, to any\nperson obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the\nSoftware without restriction, including without\nlimitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice\nshall be included in all copies or substantial portions\nof the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\nANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\nSHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\nIN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE."
  },
  {
    "path": "sector-base/README.md",
    "content": "# Sector Base\n\nThe Sector Base provides a database of sectors, implemented in Rust as a crate and exporting a C API for consumption by (for example) `go-filecoin`. It will eventually manage the configuration and implementation of access to sectors (sealed and unsealed) and the disk cache required to perform the operations of `filecoin-proofs` and `storage-proofs`. \n\nThese methods will include at least:\n- Incremental writing of pieces to sectors\n- Preprocessing raw sectors to add bit-level padding\n- [In reality, the two items above are merged: we preprocess as we write.]\n- Managing caching and generation of multiple representations of sectors as required\n- Merkle-tree caching/re-generation\n\nThe Sector Base's responsibilities may extend to:\n- Random access to sectors and their nodes across heterogeneous storage types and formats\n- Sector metadata management\n- Sector-packing\n- Sector-building\n- Construction of aggregate merkle trees (merging multiple sectors to generate a single root)\n\nOr even possibly:\n- Random access to sectors indexed by field element (further reducing padding) \n\n## A word on 'padding'\n\nThere are two types of padding performed on raw data before it can be replicated. For the sake of simplicity, we will refer to these as byte-padding and bit-padding. Byte-padding refers to (zero or more) bytes of zeroes added to the end of raw data in order to bring its total size up to the expected sector size.\n\nBit-padding, also called 'preprocessing', refers to the two zero *bits* added after every 254 bits of raw data. Padding is necessary (in the current implementation) so that successive logical field elements (Fr) will be byte-aligned. Because the modulus of the field is a 255-bit prime, there are some 255-bit numbers which are NOT elements of our field. Therefore, in order to ensure we never overflow the field, we can take at most 254 bits of real data for every 256 bits (32 bytes) of byte-aligned storage.\n  \n## Design Notes\n\n### Motivation for Sector Base as a Rust module. Why not part of `go-filecoin`?\n   We need to expose some control over disk/file storage at a level that is difficult to do from `go-filecoin` and across the FFI boundary.\n-   Example: Miners may want to treat disks as raw block devices without a filesystem.\n    \n  For example: Go code may pass a Go pointer to C, provided the Go memory to which it points does not contain any Go pointers. Our earlier attempts to write this disk/storage manager in Go involved passing a pointer to a function defined in Go to Rust, which, when applied to some key, returned a value from a Go map (this simulated the FPS acquiring file paths from the disk/file manager). In addition to this pointer, we passed the key. This panicked at runtime with a message \"cgo argument has Go pointer to Go pointer,\" which makes sense given the [cgo documentation](https://golang.org/cmd/cgo/). This isn't a problem if we let Rust manage the disk/storage manager state.  \n      \n   Moreover, this specific problem is emblematic of the friction imposed by the FFI boundary. In fact, communication between the Sector Base and the proving components of FPS (`filecoin-proofs` and `storage-proofs`) will need to be carefully coordinated for both efficiency and correctness. The issue is less that Go is problematic than that the FFI boundary will interfere with optimal function. That said, Rust’s type system will be quite helpful for the kind of performant genericity required. Although this naturally lives on the Rust side of that API boundary, the concerns controlling these functions include those which are only peripherally related to the proofs themselves.\n\n Configuration and tuning of storage concerns will depend on considerations derived from interacting with the Filecoin protocol and blockchain in ways which have potentially nothing to do with the storage proofs. This logic should not be contained in the proof modules. These two considerations delimit what the Sector Base is *not* (part of `go-filecoin` itself, or part of `storage-proofs` which must consume it). This also explains why `sector-base` currently exists as an independent crate packaged under the umbrella of the Filecoin Proving Subsystem (FPS) with source in the `rust-proofs` repository.\n\n## API Reference\n[**Sector Base API**](https://github.com/filecoin-project/rust-proofs/blob/master/sector-base/src/api/mod.rs). The Rust source code serves as the source of truth defining the **Sector Base** API and will be the eventual source of generated documentation.\n\n[**DiskBackedSectorStore API**](https://github.com/filecoin-project/rust-proofs/blob/master/sector-base/src/api/disk_backed_storage.rs)\n\n[**Response Structs**](https://github.com/filecoin-project/rust-proofs/blob/master/sector-base/src/api/responses.rs)\n## License\n\nMIT or Apache 2.0\n"
  },
  {
    "path": "sector-base/src/api/disk_backed_storage.rs",
    "content": "use crate::api::errors::SectorManagerErr;\nuse crate::api::sector_store::{SectorConfig, SectorManager, SectorStore};\nuse crate::api::util;\nuse crate::io::fr32::{\n    almost_truncate_to_unpadded_bytes, target_unpadded_bytes, unpadded_bytes, write_padded,\n};\nuse ffi_toolkit::{c_str_to_rust_str, raw_ptr};\nuse libc;\nuse std::env;\nuse std::fs::{create_dir_all, remove_file, File, OpenOptions};\nuse std::io::{Read, Seek, SeekFrom};\nuse std::path::Path;\n\n// These sizes are for SEALED sectors. They are used to calculate the values of setup parameters.\n// They can be overridden by setting the corresponding environment variable (with FILECOIN_PROOFS_ prefix),\n// but this is not recommended, since some sealed sector sizes are invalid. If you must set this manually,\n// ensure the chosen sector size is a multiple of 32.\n\n// Sector size, in bytes, to use when testing real proofs. (real sector store)\npub const REAL_SECTOR_SIZE: u64 = 128; // Override with FILECOIN_PROOFS_REAL_SECTOR_SIZE env var.\n\n// Sector size, in bytes, for tests which fake sealing with a subset of the data. (fast fake sector store)\npub const FAST_SECTOR_SIZE: u64 = 1024; // Override with FILECOIN_PROOFS_FAST_SECTOR_SIZE env var.\n\n// Sector size, in bytes, during live operation -- which also fakes sealing with a subset of the data. (slow fake sector store)\npub const SLOW_SECTOR_SIZE: u64 = 1 << 30; // Override with FILECOIN_PROOFS_SLOW_SECTOR_SIZE env var.\n\n// The delay constants can be overridden by setting the corresponding environment variable (with FILECOIN_PROOFS_ prefix)\n// For example, since SLOW_DELAY_SECONDS is used for live sealing, outside of tests,\n// setting the environment variable, FILECOIN_PROOFS_SLOW_DELAY_SECONDS to 30, will result in sealing\n// which takes approximately 30 seconds (with 15 seconds to get unsealed data).\n\n// Delay, in seconds, for tests which fake sealing with a subset of the data. (fast fake sector store)\npub const FAST_DELAY_SECONDS: u32 = 10; // Override with FILECOIN_PROOFS_FAST_DELAY_SECONDS env var.\n\n// Delay, in seconds during live operation which also fakes sealing with a subset of the data. (slow fake sector store)\npub const SLOW_DELAY_SECONDS: u32 = 0; // Override with FILECOIN_PROOFS_SLOW_DELAY_SECONDS env var.\n\nfn sector_size(env_var_name: &str, default: u64) -> u64 {\n    match env::var(env_var_name) {\n        Ok(bytes_string) => bytes_string.parse().unwrap_or(default),\n        Err(_) => default,\n    }\n}\n\nfn delay_seconds(env_var_name: &str, default: u32) -> u32 {\n    match env::var(env_var_name) {\n        Ok(seconds_string) => seconds_string.parse().unwrap_or(default),\n        Err(_) => default,\n    }\n}\n\n/// Initializes and returns a boxed SectorStore instance suitable for exercising the proofs code\n/// to its fullest capacity.\n///\n/// # Arguments\n///\n/// * `staging_dir_path` - path to the staging directory\n/// * `sealed_dir_path`  - path to the sealed directory\n#[no_mangle]\npub unsafe extern \"C\" fn init_new_proof_test_sector_store(\n    staging_dir_path: *const libc::c_char,\n    sealed_dir_path: *const libc::c_char,\n) -> *mut Box<SectorStore> {\n    let boxed = Box::new(new_sector_store(\n        &ConfiguredStore::ProofTest,\n        c_str_to_rust_str(sealed_dir_path).to_string(),\n        c_str_to_rust_str(staging_dir_path).to_string(),\n    ));\n    raw_ptr(boxed)\n}\n\n/// Initializes and returns a boxed SectorStore instance which is very similar to the Alpha-release\n/// SectorStore that Filecoin node-users will rely upon - but with manageably-small delays for seal\n/// and unseal.\n///\n/// # Arguments\n///\n/// * `staging_dir_path` - path to the staging directory\n/// * `sealed_dir_path`  - path to the sealed directory\n#[no_mangle]\npub unsafe extern \"C\" fn init_new_test_sector_store(\n    staging_dir_path: *const libc::c_char,\n    sealed_dir_path: *const libc::c_char,\n) -> *mut Box<SectorStore> {\n    let boxed = Box::new(new_sector_store(\n        &ConfiguredStore::Test,\n        c_str_to_rust_str(sealed_dir_path).to_string(),\n        c_str_to_rust_str(staging_dir_path).to_string(),\n    ));\n    raw_ptr(boxed)\n}\n\n/// Initializes and returns a boxed SectorStore instance which Alpha Filecoin node-users will rely\n/// upon. Some operations are substantially delayed; sealing an unsealed sector using this could\n/// take several hours.\n///\n/// # Arguments\n///\n/// * `staging_dir_path` - path to the staging directory\n/// * `sealed_dir_path`  - path to the sealed directory\n#[no_mangle]\npub unsafe extern \"C\" fn init_new_sector_store(\n    staging_dir_path: *const libc::c_char,\n    sealed_dir_path: *const libc::c_char,\n) -> *mut Box<SectorStore> {\n    let boxed = Box::new(new_sector_store(\n        &ConfiguredStore::Live,\n        c_str_to_rust_str(sealed_dir_path).to_string(),\n        c_str_to_rust_str(staging_dir_path).to_string(),\n    ));\n\n    raw_ptr(boxed)\n}\n\n/// Destroys a boxed SectorStore by freeing its memory.\n///\n/// # Arguments\n///\n/// * `ss_ptr` - pointer to a boxed SectorStore\n///\n#[no_mangle]\npub unsafe extern \"C\" fn destroy_storage(ss_ptr: *mut Box<SectorStore>) {\n    let _ = Box::from_raw(ss_ptr);\n}\n\npub struct DiskManager {\n    staging_path: String,\n    sealed_path: String,\n}\n\nimpl SectorManager for DiskManager {\n    fn new_sealed_sector_access(&self) -> Result<String, SectorManagerErr> {\n        self.new_sector_access(Path::new(&self.sealed_path))\n    }\n\n    fn new_staging_sector_access(&self) -> Result<String, SectorManagerErr> {\n        self.new_sector_access(Path::new(&self.staging_path))\n    }\n\n    fn num_unsealed_bytes(&self, access: &str) -> Result<u64, SectorManagerErr> {\n        OpenOptions::new()\n            .read(true)\n            .open(access)\n            .map_err(|err| SectorManagerErr::CallerError(format!(\"{:?}\", err)))\n            .map(|mut f| {\n                target_unpadded_bytes(&mut f)\n                    .map_err(|err| SectorManagerErr::ReceiverError(format!(\"{:?}\", err)))\n            })\n            .and_then(|n| n)\n    }\n\n    fn truncate_unsealed(&self, access: &str, size: u64) -> Result<(), SectorManagerErr> {\n        // I couldn't wrap my head around all ths result mapping, so here it is all laid out.\n        match OpenOptions::new().write(true).open(&access) {\n            Ok(mut file) => match almost_truncate_to_unpadded_bytes(&mut file, size) {\n                Ok(padded_size) => match file.set_len(padded_size as u64) {\n                    Ok(_) => Ok(()),\n                    Err(err) => Err(SectorManagerErr::ReceiverError(format!(\"{:?}\", err))),\n                },\n                Err(err) => Err(SectorManagerErr::ReceiverError(format!(\"{:?}\", err))),\n            },\n            Err(err) => Err(SectorManagerErr::CallerError(format!(\"{:?}\", err))),\n        }\n    }\n\n    // TODO: write_and_preprocess should refuse to write more data than will fit. In that case, return 0.\n    fn write_and_preprocess(&self, access: &str, data: &[u8]) -> Result<u64, SectorManagerErr> {\n        OpenOptions::new()\n            .read(true)\n            .write(true)\n            .open(access)\n            .map_err(|err| SectorManagerErr::CallerError(format!(\"{:?}\", err)))\n            .and_then(|mut file| {\n                write_padded(data, &mut file)\n                    .map_err(|err| SectorManagerErr::ReceiverError(format!(\"{:?}\", err)))\n                    .map(|n| n as u64)\n            })\n    }\n\n    fn delete_staging_sector_access(&self, access: &str) -> Result<(), SectorManagerErr> {\n        remove_file(access).map_err(|err| SectorManagerErr::CallerError(format!(\"{:?}\", err)))\n    }\n\n    fn read_raw(\n        &self,\n        access: &str,\n        start_offset: u64,\n        num_bytes: u64,\n    ) -> Result<Vec<u8>, SectorManagerErr> {\n        OpenOptions::new()\n            .read(true)\n            .open(access)\n            .map_err(|err| SectorManagerErr::CallerError(format!(\"{:?}\", err)))\n            .and_then(|mut file| -> Result<Vec<u8>, SectorManagerErr> {\n                file.seek(SeekFrom::Start(start_offset))\n                    .map_err(|err| SectorManagerErr::CallerError(format!(\"{:?}\", err)))?;\n\n                let mut buf = vec![0; num_bytes as usize];\n\n                file.read_exact(buf.as_mut_slice())\n                    .map_err(|err| SectorManagerErr::CallerError(format!(\"{:?}\", err)))?;\n\n                Ok(buf)\n            })\n    }\n}\n\nimpl DiskManager {\n    fn new_sector_access(&self, root: &Path) -> Result<String, SectorManagerErr> {\n        let pbuf = root.join(util::rand_alpha_string(32));\n\n        create_dir_all(root)\n            .map_err(|err| SectorManagerErr::ReceiverError(format!(\"{:?}\", err)))\n            .and_then(|_| {\n                File::create(&pbuf)\n                    .map(|_| 0)\n                    .map_err(|err| SectorManagerErr::ReceiverError(format!(\"{:?}\", err)))\n            })\n            .and_then(|_| {\n                pbuf.to_str().map_or_else(\n                    || {\n                        Err(SectorManagerErr::ReceiverError(\n                            \"could not create pbuf\".to_string(),\n                        ))\n                    },\n                    |str_ref| Ok(str_ref.to_owned()),\n                )\n            })\n    }\n}\n\npub struct RealConfig {\n    sector_bytes: u64,\n}\n\npub struct FakeConfig {\n    sector_bytes: u64,\n    delay_seconds: u32,\n}\n\n#[derive(Debug)]\n#[repr(C)]\npub enum ConfiguredStore {\n    Live = 0,\n    Test = 1,\n    ProofTest = 2,\n}\n\npub struct ConcreteSectorStore {\n    config: Box<SectorConfig>,\n    manager: Box<SectorManager>,\n}\n\nimpl SectorStore for ConcreteSectorStore {\n    fn config(&self) -> &SectorConfig {\n        self.config.as_ref()\n    }\n\n    fn manager(&self) -> &SectorManager {\n        self.manager.as_ref()\n    }\n}\n\npub fn new_sector_store(\n    cs: &ConfiguredStore,\n    sealed_path: String,\n    staging_path: String,\n) -> ConcreteSectorStore {\n    let manager = Box::new(DiskManager {\n        staging_path,\n        sealed_path,\n    });\n\n    let config = new_sector_config(cs);\n\n    ConcreteSectorStore { config, manager }\n}\n\npub fn new_sector_config(cs: &ConfiguredStore) -> Box<SectorConfig> {\n    match *cs {\n        ConfiguredStore::Live => Box::new(FakeConfig {\n            sector_bytes: sector_size(\"FILECOIN_PROOFS_SLOW_SECTOR_SIZE\", SLOW_SECTOR_SIZE),\n            delay_seconds: delay_seconds(\"FILECOIN_PROOFS_SLOW_DELAY_SECONDS\", SLOW_DELAY_SECONDS),\n        }),\n        ConfiguredStore::Test => Box::new(FakeConfig {\n            sector_bytes: sector_size(\"FILECOIN_PROOFS_FAST_SECTOR_SIZE\", FAST_SECTOR_SIZE),\n            delay_seconds: delay_seconds(\"FILECOIN_PROOFS_FAST_DELAY_SECONDS\", FAST_DELAY_SECONDS),\n        }),\n        ConfiguredStore::ProofTest => Box::new(RealConfig {\n            sector_bytes: sector_size(\"FILECOIN_PROOFS_SECTOR_SIZE\", REAL_SECTOR_SIZE),\n        }),\n    }\n}\n\nimpl SectorConfig for RealConfig {\n    fn is_fake(&self) -> bool {\n        false\n    }\n\n    fn simulate_delay_seconds(&self) -> Option<u32> {\n        None\n    }\n\n    fn max_unsealed_bytes_per_sector(&self) -> u64 {\n        unpadded_bytes(self.sector_bytes)\n    }\n\n    fn sector_bytes(&self) -> u64 {\n        self.sector_bytes\n    }\n\n    fn dummy_parameter_cache_name(&self) -> String {\n        String::from(\"REAL_DUMMY_API_PARAMETERS\")\n    }\n}\n\nimpl SectorConfig for FakeConfig {\n    fn is_fake(&self) -> bool {\n        true\n    }\n\n    fn simulate_delay_seconds(&self) -> Option<u32> {\n        Some(self.delay_seconds)\n    }\n\n    fn max_unsealed_bytes_per_sector(&self) -> u64 {\n        unpadded_bytes(self.sector_bytes)\n    }\n\n    fn sector_bytes(&self) -> u64 {\n        self.sector_bytes\n    }\n\n    fn dummy_parameter_cache_name(&self) -> String {\n        String::from(\"FAKE_DUMMY_API_PARAMETERS_{}\")\n    }\n}\n\n#[cfg(test)]\npub mod tests {\n    use super::*;\n\n    use crate::io::fr32::FR32_PADDING_MAP;\n    use std::fs::create_dir_all;\n    use std::fs::File;\n    use std::io::Read;\n    use tempfile;\n\n    fn create_sector_store(cs: &ConfiguredStore) -> Box<SectorStore> {\n        let staging_path = tempfile::tempdir().unwrap().path().to_owned();\n        let sealed_path = tempfile::tempdir().unwrap().path().to_owned();\n\n        create_dir_all(&staging_path).expect(\"failed to create staging dir\");\n        create_dir_all(&sealed_path).expect(\"failed to create sealed dir\");\n\n        Box::new(new_sector_store(\n            &cs,\n            sealed_path.to_str().unwrap().to_owned(),\n            staging_path.to_str().unwrap().to_owned(),\n        ))\n    }\n\n    fn read_all_bytes(access: &str) -> Vec<u8> {\n        let mut file = File::open(access).unwrap();\n        let mut buf = Vec::new();\n        file.read_to_end(&mut buf).unwrap();\n\n        buf\n    }\n\n    #[test]\n    fn max_unsealed_bytes_per_sector_checks() {\n        let xs = vec![\n            (ConfiguredStore::Live, 1065353216),\n            (ConfiguredStore::Test, 1016),\n            (ConfiguredStore::ProofTest, 127),\n        ];\n\n        for (configured_store, num_bytes) in xs {\n            let storage: Box<SectorStore> = create_sector_store(&configured_store);\n            let cfg = storage.config();\n            assert_eq!(cfg.max_unsealed_bytes_per_sector(), num_bytes);\n        }\n    }\n\n    #[test]\n    fn unsealed_sector_write_and_truncate() {\n        let configured_store = ConfiguredStore::ProofTest;\n        let storage: Box<SectorStore> = create_sector_store(&configured_store);\n        let mgr = storage.manager();\n\n        let access = mgr\n            .new_staging_sector_access()\n            .expect(\"failed to create staging file\");\n\n        // shared amongst test cases\n        let contents = &[2u8; 500];\n\n        // write_and_preprocess\n        {\n            let n = mgr\n                .write_and_preprocess(&access, contents)\n                .expect(\"failed to write\");\n\n            // buffer the file's bytes into memory after writing bytes\n            let buf = read_all_bytes(&access);\n            let output_bytes_written = buf.len();\n\n            // ensure that we reported the correct number of written bytes\n            assert_eq!(contents.len(), n as usize);\n\n            // ensure the file we wrote to contains the expected bytes\n            assert_eq!(contents[0..32], buf[0..32]);\n            assert_eq!(8u8, buf[32]);\n\n            // read the file into memory again - this time after we truncate\n            let buf = read_all_bytes(&access);\n\n            // ensure the file we wrote to contains the expected bytes\n            assert_eq!(504, buf.len());\n\n            // also ensure this is the amount we calculate\n            let expected_padded_bytes =\n                FR32_PADDING_MAP.transform_byte_offset(contents.len(), true);\n            assert_eq!(expected_padded_bytes, output_bytes_written);\n\n            // ensure num_unsealed_bytes returns the number of data bytes written.\n            let num_bytes_written = mgr\n                .num_unsealed_bytes(&access)\n                .expect(\"failed to get num bytes\");\n            assert_eq!(500, num_bytes_written as usize);\n        }\n\n        // truncation and padding\n        {\n            let xs: Vec<(usize, bool)> = vec![(32, true), (31, false), (1, false)];\n\n            for (num_bytes, expect_fr_shift) in xs {\n                mgr.truncate_unsealed(&access, num_bytes as u64)\n                    .expect(\"failed to truncate\");\n\n                // read the file into memory again - this time after we truncate\n                let buf = read_all_bytes(&access);\n\n                // All but last bytes are identical.\n                assert_eq!(contents[0..num_bytes], buf[0..num_bytes]);\n\n                if expect_fr_shift {\n                    // The last byte (first of new Fr) has been shifted by two bits of padding.\n                    assert_eq!(contents[num_bytes] << 2, buf[num_bytes]);\n\n                    // ensure the buffer contains the extra byte\n                    assert_eq!(num_bytes + 1, buf.len());\n                } else {\n                    // no extra byte here\n                    assert_eq!(num_bytes, buf.len());\n                }\n\n                // ensure num_unsealed_bytes returns the correct number post-truncation\n                let num_bytes_written = mgr\n                    .num_unsealed_bytes(&access)\n                    .expect(\"failed to get num bytes\");\n                assert_eq!(num_bytes, num_bytes_written as usize);\n            }\n        }\n    }\n\n    #[test]\n    fn deletes_staging_access() {\n        let configured_store = ConfiguredStore::ProofTest;\n\n        let store = create_sector_store(&configured_store);\n        let access = store.manager().new_staging_sector_access().unwrap();\n\n        assert!(store.manager().read_raw(&access, 0, 0).is_ok());\n\n        assert!(store\n            .manager()\n            .delete_staging_sector_access(&access)\n            .is_ok());\n\n        assert!(store.manager().read_raw(&access, 0, 0).is_err());\n    }\n}\n"
  },
  {
    "path": "sector-base/src/api/errors.rs",
    "content": "#[derive(Debug, Fail)]\npub enum SectorManagerErr {\n    #[fail(display = \"unclassified error: {}\", _0)]\n    UnclassifiedError(String),\n\n    #[fail(display = \"caller error: {}\", _0)]\n    CallerError(String),\n\n    #[fail(display = \"receiver error: {}\", _0)]\n    ReceiverError(String),\n}\n"
  },
  {
    "path": "sector-base/src/api/mod.rs",
    "content": "pub mod disk_backed_storage;\npub mod errors;\npub mod sector_store;\npub mod util;\n"
  },
  {
    "path": "sector-base/src/api/sector_store.rs",
    "content": "use crate::api::errors::SectorManagerErr;\n\npub trait SectorConfig {\n    /// if true, uses something other exact bits, correct parameters, or full proofs\n    fn is_fake(&self) -> bool;\n\n    /// if provided, an artificial delay to seal\n    fn simulate_delay_seconds(&self) -> Option<u32>;\n\n    /// returns the number of bytes that will fit into a sector managed by this store\n    fn max_unsealed_bytes_per_sector(&self) -> u64;\n\n    /// returns the number of bytes in a sealed sector managed by this store\n    fn sector_bytes(&self) -> u64;\n\n    /// We need a distinguished place to cache 'the' parameters corresponding to the SetupParams\n    /// currently being used. These are only easily generated at replication time but need to be\n    /// accessed at verification time too.\n    fn dummy_parameter_cache_name(&self) -> String;\n}\n\npub trait SectorManager {\n    /// provisions a new sealed sector and reports the corresponding access\n    fn new_sealed_sector_access(&self) -> Result<String, SectorManagerErr>;\n\n    /// provisions a new staging sector and reports the corresponding access\n    fn new_staging_sector_access(&self) -> Result<String, SectorManagerErr>;\n\n    /// reports the number of bytes written to an unsealed sector\n    fn num_unsealed_bytes(&self, access: &str) -> Result<u64, SectorManagerErr>;\n\n    /// sets the number of bytes in an unsealed sector identified by `access`\n    fn truncate_unsealed(&self, access: &str, size: u64) -> Result<(), SectorManagerErr>;\n\n    /// writes `data` to the staging sector identified by `access`, incrementally preprocessing `access`\n    fn write_and_preprocess(&self, access: &str, data: &[u8]) -> Result<u64, SectorManagerErr>;\n\n    fn delete_staging_sector_access(&self, access: &str) -> Result<(), SectorManagerErr>;\n\n    fn read_raw(\n        &self,\n        access: &str,\n        start_offset: u64,\n        num_bytes: u64,\n    ) -> Result<Vec<u8>, SectorManagerErr>;\n}\n\npub trait SectorStore {\n    fn config(&self) -> &SectorConfig;\n    fn manager(&self) -> &SectorManager;\n}\n"
  },
  {
    "path": "sector-base/src/api/util/mod.rs",
    "content": "use rand::{thread_rng, Rng};\n\n// creates a string of size len containing uppercase alpha-chars\npub fn rand_alpha_string(len: u8) -> String {\n    let mut str = String::new();\n    let mut rng = thread_rng();\n\n    for _ in 0..len {\n        let ch = rng.gen_range(b'A', b'Z') as char;\n        str.push(ch);\n    }\n\n    str\n}\n"
  },
  {
    "path": "sector-base/src/error.rs",
    "content": "use failure::Error;\n\npub type Result<T> = ::std::result::Result<T, Error>;\n"
  },
  {
    "path": "sector-base/src/io/fr32.rs",
    "content": "use std::cmp::min;\nuse std::io::{self, Read, Seek, SeekFrom, Write};\nuse std::iter::FromIterator;\n\nuse bitvec::{self, BitVec};\nuse itertools::Itertools;\n\n/** PaddingMap represents a mapping between data and its padded equivalent.\n\nThe padding process takes a *byte-aligned stream* of unpadded *raw* data\nas input and returns another byte stream where padding is applied every\n`data_bits` to align them to the byte boundary (`element_bits`). The\n(inverse) *unpadding* process maps that output back to the raw input\nthat generated it.\n\n# Padded layout\n\nAt the *byte-level*, the padded layout is:\n\n```text\n      (full element)              (full)                 (incomplete)\n||  data_bits  pad_bits  ||  data_bits  pad_bits  ||  some_data  (no_padding)\n                         ^^                               ^^\n                  element boundary                (some_data < data_bits)\n                   (byte-aligned)\n```\n\nEach *element* is a byte-aligned stream comprised of a *full unit* of `data_bits`\nwith `pad_bits` at the end to byte-align it (where `pad_bits` is less than a byte,\nthis is a *sub-byte padding* scheme). After the last element boundary there may be\nan incomplete unit of data (`some_data`) with a length smaller than `data_bits`\nthat hasn't been padded. The padding rules are:\n  1. Padding is always applied to a full unit of `data_bits`.\n  2. A full data unit cannot exist without its corresponding padding.\n  3. A unit of padding is complete by definition: padding can only be\n     applied fully to each element.\n  4. If there is padding present then there has to be an already formed\n     element there (an element is full if and only if its data unit is full).\n\n# Last byte\n\nWhen returning the byte-aligned output generated from the padded *bitstream*\n(since the padding is done at the bit-level) the conversion results in the\nlast byte having (potentially) more bits than desired. At the *bit-level*\nthe layout of the last byte can either be a complete element (bits of raw\ndata followed by the corresponding padding bits) or an incomplete unit of\ndata: some number of *valid* data (D) bits followed by any number of *extra*\nbits (X) necessary to complete the byte-aligned stream:\n\n```text\n |   D   D   D   D   X   X   X   X   |\n         (data)         (extra)      ^ byte boundary (end of output)\n```\n\n(This diagram is just for illustrative purposes, we actually return the output\n in little-endian order, see `Fr32BitVec`).\n\nIt's important to distinguish these extra bits (generated as a side\neffect of the conversion to a byte-aligned stream) from the padding bits\nthemselves introduced in the padding process: even though both will be\nleft with a zero value, these extra bits are a place-holder for the actual\nraw data bits needed to complete the current unit of data (and hence also\nthe element, with the corresponding padding bits added after it). Since\nextra bits are only a product of an incomplete unit of data there can't\nbe extra bits after padding bits.\n\nThere's no metadata signaling the number of extra bits present in the\nlast byte in any given padded layout, this is deduced from the fact\nthat there's only a single number of valid data bits in the last byte,\nand hence a number of data bits in total, that maps to a byte-aligned\n(multiple of 8) raw data stream that could have been used as input.\n\n# Example: `FR32_PADDING_MAP`\n\nIn this case the `PaddingMap` is defined with a data unit of 254 bits that\nare byte aligned to a 256-bit (32-byte) element. If the user writes as input,\nsay, 40 bytes (320 bits) of raw input data to the padding process the resulting\nlayout would be, at the element (byte) level:\n\n```text\n      (full element: 32 bytes)         (incomplete: 9 bytes)\n||  data_bits: 254  pad_bits: 2  ||   some_data: 66 bits (+ extra bits)\n                                 ^^\n                          element boundary\n```\n\nThat is, of the original 320 bits (40 bytes) of raw input data, 254 are\npadded in the first element and the remaining 66 bits form the incomplete\ndata unit after it, which is aligned to 9 bytes. At the bit level, that\nlast incomplete byte will have 2 valid bits and 6 extra bits.\n\n# Key terms\n\nCollection of terms introduced in this documentation (with the format\n`*<new-term>*`). This section doesn't provide a self-contained definition\nof them (to avoid unnecessary repetition), it just provides (when appropriate)\nan additional summary of what was already discussed.\n\n * Raw data: unpadded user-supplied data (we don't use the *unpadded* term\n   to avoid excessive *padding* suffixes in the code). Padding (data) bits.\n * Element: byte-aligned stream consisting of a full unit of data plus the\n   padding bits.\n * Full unit of raw `data_bits` (always followed by padding). Incomplete unit,\n   not followed by padding, doesn't form an element.\n * Byte-aligned stream: always input and output of the (un)padding process,\n   either as raw data or padded (using the term \"byte-aligned\" and not \"byte\n   stream\" to stress the boundaries of the elements). Bit streams: used internally\n   when padding data (never returned as bits).\n * Valid data bits, only in the context of the last byte of a byte-aligned stream\n   generated from the padding process. Extra bits: what's left unused of the last\n   byte (in a way the extra bits are the padding at the byte-level, but we don't\n   use that term here to avoid confusions).\n * Sub-byte padding.\n\n**/\n#[derive(Debug)]\npub struct PaddingMap {\n    /// The number of bits of raw data in an element.\n    data_bits: usize,\n    /// Number of bits in an element: `data_bits` + `pad_bits()`. Its value\n    /// is fixed to the next byte-aligned size after `data_bits` (sub-byte padding).\n    element_bits: usize,\n}\n// TODO: Optimization: Evaluate saving the state of a (un)padding operation\n// inside (e.g., as a cursor like in `BitVec`), maybe not in this structure but\n// in a new `Padder` structure which would remember the positions (remaining\n// data bits in the element, etc.) to avoid recalculating them each time across\n// different (un)pad calls.\n\n// This is the padding map corresponding to Fr32.\n// Most of the code in this module is general-purpose and could move elsewhere.\n// The application-specific wrappers which implicitly use Fr32 embed the FR32_PADDING_MAP.\npub const FR32_PADDING_MAP: PaddingMap = PaddingMap {\n    data_bits: 254,\n    element_bits: 256,\n};\n\npub type Fr32BitVec = BitVec<bitvec::LittleEndian, u8>;\n// TODO: Rename, drop the `Fr32` prefix. Leaving it for now since\n// the optimization stage will likely remove it.\n\n////////////////////////////////////////////////////////////////////////////////////////////////////\n// Convenience interface for API functions – all bundling FR32_PADDING_MAP\n// parameter/return types are tuned for current caller convenience.\n\npub fn target_unpadded_bytes<W: ?Sized>(target: &mut W) -> io::Result<u64>\nwhere\n    W: Seek,\n{\n    let (_, unpadded, _) = FR32_PADDING_MAP.target_offsets(target)?;\n\n    Ok(unpadded)\n}\n\n// Leave the actual truncation to caller, since we can't do it generically.\n// Return the length to which target should be truncated.\n// We might should also handle zero-padding what will become the final byte of target.\n// Technically, this should be okay though because that byte will always be overwritten later.\n// If we decide this is unnecessary, then we don't need to pass target at all.\npub fn almost_truncate_to_unpadded_bytes<W: ?Sized>(\n    _target: &mut W,\n    length: u64,\n) -> io::Result<usize>\nwhere\n    W: Read + Write + Seek,\n{\n    let padded =\n        BitByte::from_bits(FR32_PADDING_MAP.transform_bit_offset((length * 8) as usize, true));\n    let real_length = padded.bytes_needed();\n    let _final_bit_count = padded.bits;\n    Ok(real_length)\n}\n\npub fn unpadded_bytes(padded_bytes: u64) -> u64 {\n    FR32_PADDING_MAP.transform_byte_offset(padded_bytes as usize, false) as u64\n}\n\npub fn padded_bytes(unpadded_bytes: usize) -> usize {\n    FR32_PADDING_MAP.transform_byte_offset(unpadded_bytes, true)\n}\n\n////////////////////////////////////////////////////////////////////////////////////////////////////\n// BitByte represents a size expressed in bytes extended\n// with bit precision, that is, not rounded.\n// Invariant: it is an error for bits to be > 7.\n#[derive(Debug)]\npub struct BitByte {\n    bytes: usize,\n    bits: usize,\n}\n\nimpl BitByte {\n    // Create a BitByte from number of bits. Guaranteed to return a well-formed value (bits < 8)\n    pub fn from_bits(bits: usize) -> BitByte {\n        BitByte {\n            bytes: bits / 8,\n            bits: bits % 8,\n        }\n    }\n\n    pub fn from_bytes(bytes: usize) -> BitByte {\n        Self::from_bits(bytes * 8)\n    }\n\n    // How many bits in the BitByte (inverse of from_bits).\n    pub fn total_bits(&self) -> usize {\n        self.bytes * 8 + self.bits\n    }\n\n    // True if the BitByte has no bits component.\n    pub fn is_byte_aligned(&self) -> bool {\n        self.bits == 0\n    }\n\n    // How many distinct bytes are needed to represent data of this size?\n    pub fn bytes_needed(&self) -> usize {\n        self.bytes\n            + if self.bits == 0 {\n                0\n            } else {\n                (self.bits + 8) / 8\n            }\n    }\n}\n\nimpl PaddingMap {\n    pub fn new(data_bits: usize, element_bits: usize) -> PaddingMap {\n        // Check that we add less than 1 byte of padding (sub-byte padding).\n        assert!(element_bits - data_bits <= 7);\n        // Check that the element is byte aligned.\n        assert_eq!(element_bits % 8, 0);\n\n        PaddingMap {\n            data_bits,\n            element_bits,\n        }\n    }\n\n    pub fn pad(&self, bits_out: &mut Fr32BitVec) {\n        for _ in 0..self.pad_bits() {\n            bits_out.push(false)\n        }\n        // TODO: Optimization: Drop this explicit `push` padding, the padding\n        // should happen implicitly when byte-aligning the data unit.\n    }\n\n    pub fn pad_bits(&self) -> usize {\n        self.element_bits - self.data_bits\n    }\n\n    // Transform an offset (either a position or a size) *expressed in\n    // bits* in a raw byte-aligned data stream to its equivalent in a\n    // generated padded bit stream, that is, not byte aligned (so we\n    // don't count the extra bits here). If `padding` is `false` calculate\n    // the inverse transformation.\n    pub fn transform_bit_offset(&self, pos: usize, padding: bool) -> usize {\n        // Set the sizes we're converting to and from.\n        let (from_size, to_size) = if padding {\n            (self.data_bits, self.element_bits)\n        } else {\n            (self.element_bits, self.data_bits)\n        };\n\n        // For both the padding and unpadding cases the operation is the same.\n        // The quotient is the number of full, either elements, in the padded layout,\n        // or groups of `data_bits`, in the raw data input (that will be converted\n        // to full elements).\n        // The remainder (in both cases) is the last *incomplete* part of either of\n        // the two. Even in the padded layout, if there is an incomplete element it\n        // has to consist *only* of data (see `PaddingMap#padded-layout`). That amount\n        // of spare raw data doesn't need conversion, it can just be added to the new\n        // position.\n        let (full_elements, incomplete_data) = div_rem(pos, from_size);\n        (full_elements * to_size) + incomplete_data\n    }\n\n    // Similar to `transform_bit_pos` this function transforms an offset\n    // expressed in bytes, that is, we are taking into account the extra\n    // bits here.\n    // TODO: Evaluate the relationship between this function and `transform_bit_offset`,\n    // it seems the two could be merged, or at least restructured to better expose\n    // their differences.\n    pub fn transform_byte_offset(&self, pos: usize, padding: bool) -> usize {\n        let transformed_bit_pos = self.transform_bit_offset(pos * 8, padding);\n\n        let transformed_byte_pos = transformed_bit_pos as f64 / 8.;\n        // TODO: Optimization: It might end up being cheaper to avoid this\n        // float conversion and use / and %.\n\n        // When padding, the final bits in the bit stream will grow into the\n        // last (potentially incomplete) byte of the byte stream, so round the\n        // number up (`ceil`). When unpadding, there's no way to know a priori\n        // how many valid bits are in the last byte, we have to choose the number\n        // that fits in a byte-aligned raw data stream, so round the number down\n        // to that (`floor`).\n        (if padding {\n            transformed_byte_pos.ceil()\n        } else {\n            transformed_byte_pos.floor()\n        }) as usize\n    }\n\n    // From the `position` specified, it returns:\n    // - the absolute position of the start of the next element,\n    //   in bytes (since elements -with padding- are byte aligned).\n    // - the number of bits left to read (write) from (to) the current\n    //   data unit (assuming it's full).\n    pub fn next_boundary(&self, position: &BitByte) -> (usize, usize) {\n        let position_bits = position.total_bits();\n\n        let (_, bits_after_last_boundary) = div_rem(position_bits, self.element_bits);\n\n        let remaining_data_unit_bits = self.data_bits - bits_after_last_boundary;\n\n        let next_element_position_bits = position_bits + remaining_data_unit_bits + self.pad_bits();\n\n        (next_element_position_bits / 8, remaining_data_unit_bits)\n    }\n\n    // For a `Seek`able `target` of a byte-aligned padded layout, return:\n    // - the size in bytes\n    // - the size in bytes of raw data which corresponds to the `target` size\n    // - a BitByte representing the number of padded bits contained in the\n    //   byte-aligned padded layout\n    pub fn target_offsets<W: ?Sized>(&self, target: &mut W) -> io::Result<(u64, u64, BitByte)>\n    where\n        W: Seek,\n    {\n        // The current position in `target` is the number of padded bytes already written\n        // to the byte-aligned stream.\n        let padded_bytes = target.seek(SeekFrom::End(0))?;\n\n        // Deduce the number of input raw bytes that generated that padded byte size.\n        let raw_data_bytes = self.transform_byte_offset(padded_bytes as usize, false);\n\n        // With the number of raw data bytes elucidated it can now be specified the\n        // number of padding bits in the generated bit stream (before it was converted\n        // to a byte-aligned stream), that is, `raw_data_bytes * 8` is not necessarily\n        // `padded_bits`).\n        let padded_bits = self.transform_bit_offset(raw_data_bytes * 8, true);\n\n        Ok((\n            padded_bytes,\n            raw_data_bytes as u64,\n            BitByte::from_bits(padded_bits),\n        ))\n        // TODO: Why do we use `usize` internally and `u64` externally?\n    }\n}\n\n#[inline]\nfn div_rem(a: usize, b: usize) -> (usize, usize) {\n    let div = a / b;\n    let rem = a % b;\n    (div, rem)\n}\n\npub fn write_padded<W: ?Sized>(source: &[u8], target: &mut W) -> io::Result<usize>\nwhere\n    W: Read + Write + Seek,\n{\n    // In order to optimize alignment in the common case of writing from an aligned start,\n    // we should make the chunk a multiple of 128.\n    // n was hand-tuned to do reasonably well in the benchmarks.\n    let n = 1000;\n    let chunk_size = 128 * n;\n\n    let mut written = 0;\n\n    for chunk in source.chunks(chunk_size) {\n        written += write_padded_aux(&FR32_PADDING_MAP, chunk, target)?;\n    }\n\n    Ok(written)\n}\n\n/** Padding process.\n\nRead a `source` of raw byte-aligned data, pad it in a bit stream and\nwrite a byte-aligned version of it in the `target`. The `target` needs\nto implement (besides `Write`) the `Read` and `Seek` traits since the\nlast byte written may be incomplete and will need to be rewritten.\n\nThe reader will always be byte-aligned, the writer will operate with\nbit precision since we may have (when calling this function multiple\ntimes) a written `target` with extra bits (that need to be overwritten)\nand also incomplete data units.\nThe ideal alignment scenario is for the writer to be positioned at the\nbyte-aligned element boundary and just write whole chunks of `data_chunk_bits`\n(full data units) followed by its corresponding padding. To get there then we\nneed to handle the potential bit-level misalignments:\n  1. extra bits: the last byte is only partially valid so we\n     need to get some bits from the `source` to overwrite them.\n  2. Incomplete data unit: we need to fill the rest of it and add the padding\n     to form a element that would position the writer at the desired boundary.\n**/\nfn write_padded_aux<W: ?Sized>(\n    padding_map: &PaddingMap,\n    source: &[u8],\n    target: &mut W,\n) -> io::Result<usize>\nwhere\n    W: Read + Write + Seek,\n{\n    // TODO: Change name, this is the real write padded function, the previous one\n    // just partition data in chunks.\n\n    // TODO: Check `source` length, if it's zero we should return here and avoid all\n    // the alignment calculations that will be worthless (because we wont' have any\n    // data with which to align).\n\n    // Bit stream collecting the bits that will be written to the byte-aligned `target`.\n    let mut bit_stream = Fr32BitVec::new();\n\n    let (padded_bytes, _, padded_bits) = padding_map.target_offsets(target)?;\n\n    // (1): Overwrite the extra bits (if any): we actually don't write in-place, we\n    // remove the last byte and extract its valid bits to `bit_stream` to be later rewritten\n    // with new data taken from the `source`.\n    if !padded_bits.is_byte_aligned() {\n        // Read the last incomplete byte and left the `target` positioned to overwrite\n        // it in the next `write_all`.\n        let last_byte = &mut [0u8; 1];\n        target.seek(SeekFrom::Start(padded_bytes - 1))?;\n        target.read_exact(last_byte)?;\n        target.seek(SeekFrom::Start(padded_bytes - 1))?;\n        // TODO: Can we use a relative `SeekFrom::End` seek to avoid\n        // setting our absolute `padded_bytes` position?\n\n        // Extract the valid bit from the last byte (the `bits` fraction\n        // of the `padded_bits` bit stream that doesn't complete a byte).\n        let mut last_byte_as_bitvec = Fr32BitVec::from(&last_byte[..]);\n        last_byte_as_bitvec.truncate(padded_bits.bits);\n        bit_stream.extend(last_byte_as_bitvec);\n    };\n\n    // (2): Fill the current data unit adding `missing_data_bits` from the\n    // `source` (if available, or as many bits as we have).\n    let (_, missing_data_bits) = padding_map.next_boundary(&padded_bits);\n\n    // Check if we have enough `source_bits` to complete the data unit (and hence\n    // add the padding and complete the element) or if we'll use all the `source_bits`\n    // just to increase (but not complete) the current data unit (and hence we won't pad).\n    let source_bits = source.len() * 8;\n    let (data_bits_to_write, fills_data_unit) = if missing_data_bits <= source_bits {\n        (missing_data_bits, true)\n    } else {\n        (source_bits, false)\n    };\n    // TODO: What happens if we were already at the element boundary?\n    // Would this code write 0 (`data_bits_to_write`) bits and then\n    // add an extra padding?\n    bit_stream.extend(\n        Fr32BitVec::from(source)\n            .into_iter()\n            .take(data_bits_to_write),\n    );\n    if fills_data_unit {\n        padding_map.pad(&mut bit_stream);\n    }\n\n    // TODO: Optimization case: if `missing_data_bits == source_bits` (last chunk being\n    // processed) do not bother to pad (setting `fills_data_unit` to `false`) which will\n    // implicitly convert the extra bits to padding bits.\n\n    // Now we are at the element boundary, write entire chunks of full data\n    // units with its padding.\n\n    // If we completed the previous element (`fills_data_unit`) then we may still have\n    // some data left.\n    if fills_data_unit {\n        let remaining_unpadded_chunks = Fr32BitVec::from(source)\n            .into_iter()\n            .skip(data_bits_to_write)\n            // TODO: Not having a \"drop first N bits\" in `BitVec` makes us remember\n            // the already used bits in our logic dragging them until we apply the\n            // iterator.\n            .chunks(padding_map.data_bits);\n\n        for chunk in remaining_unpadded_chunks.into_iter() {\n            let mut bits = Fr32BitVec::from_iter(chunk);\n\n            // If this chunk is a full unit of data then add the padding; if not,\n            // this is the last (incomplete) chunk, it will be `some_data` in the\n            // next write cycle (which we'll again try to align it to the element\n            // boundary).\n            if bits.len() == padding_map.data_bits {\n                padding_map.pad(&mut bits);\n            }\n\n            bit_stream.extend(bits);\n        }\n    }\n\n    let out = &bit_stream.into_boxed_slice();\n    target.write_all(&out)?;\n\n    // Always return the expected number of bytes, since this function will fail if write_all does.\n    Ok(source.len())\n}\n\n// offset and num_bytes are based on the unpadded data, so\n// if [0, 1, ..., 255] was the original unpadded data, offset 3 and len 4 would return\n// [3, 4, 5, 6].\npub fn write_unpadded<W: ?Sized>(\n    source: &[u8],\n    target: &mut W,\n    offset: usize,\n    len: usize,\n) -> io::Result<usize>\nwhere\n    W: Write,\n{\n    // In order to optimize alignment in the common case of writing from an aligned start,\n    // we should make the chunk a multiple of 128.\n    // n was hand-tuned to do reasonably well in the benchmarks.\n    let n = 1000;\n    let chunk_size = 128 * n;\n\n    let mut written = 0;\n\n    let mut offset = offset;\n    let mut len = len;\n\n    for chunk in source.chunks(chunk_size) {\n        let write_len = min(len, chunk.len());\n\n        written += write_unpadded_aux(&FR32_PADDING_MAP, source, target, offset, write_len)?;\n        offset += write_len;\n        len -= write_len;\n    }\n\n    Ok(written)\n}\n\n/**  Unpadding process.\n\nRead a `source` of padded data and recover from it the byte-aligned\nraw data writing it in `target`, where `write_pos` specifies from which\nbyte of the raw data stream to start recovering to, up to `max_write_size`\nbytes.\n\nThere are 3 limits that tell us how much padded data to process in\neach iteration (`bits_to_extract`):\n1. Element boundary: we can process only one element at a time (to be\n   able to skip the padding bits).\n2. End of `source`: no more data to read.\n3. No more space to write the recovered raw data: we shouldn't write\n   into the `target` beyond `max_write_size`.\n\nThe reader will generally operate with bit precision, even if the padded\nlayout is byte-aligned (no extra bits) the data inside it isn't (since\nwe pad at the bit-level).\n**/\npub fn write_unpadded_aux<W: ?Sized>(\n    padding_map: &PaddingMap,\n    source: &[u8],\n    target: &mut W,\n    write_pos: usize,\n    max_write_size: usize,\n) -> io::Result<usize>\nwhere\n    W: Write,\n{\n    // Position of the reader in the padded bit stream layout, deduced from\n    // the position of the writer (`write_pos`) in the raw data layout.\n    let mut read_pos = BitByte::from_bits(padding_map.transform_bit_offset(write_pos * 8, true));\n\n    // Specify the maximum data to recover (write) in bits, since the data unit\n    // in the element (in contrast with the original raw data that generated it)\n    // is not byte aligned.\n    let max_write_size_bits = max_write_size * 8;\n\n    // Recovered raw data unpadded from the `source` which will\n    // be later packed in bytes and written to the `target`.\n    let mut raw_data = Fr32BitVec::new();\n\n    // If there is no more data to read or no more space to write stop.\n    while read_pos.bytes < source.len() && raw_data.len() < max_write_size_bits {\n        // (1): Find the element boundary and, assuming that there is a full\n        //      unit of data (which actually may be incomplete), how many bits\n        //      are left to read from `read_pos`.\n        let (next_element_position, mut bits_to_extract) = padding_map.next_boundary(&read_pos);\n\n        // (2): As the element may be incomplete check how much data is\n        //      actually available so as not to access the `source` past\n        //      its limit.\n        let read_element_end = min(next_element_position, source.len());\n\n        // (3): Don't read more than `max_write_size`.\n        let bits_left_to_write = max_write_size_bits - raw_data.len();\n        bits_to_extract = min(bits_to_extract, bits_left_to_write);\n\n        // Extract the specified `bits_to_extract` bits, skipping the first\n        // `read_pos.bits` which have already been processed in a previous\n        // iteration.\n        raw_data.extend(\n            Fr32BitVec::from(&source[read_pos.bytes..read_element_end])\n                .into_iter()\n                .skip(read_pos.bits)\n                .take(bits_to_extract),\n        );\n\n        // Position the reader in the next element boundary, this will be ignored\n        // if we already hit limits (2) or (3) (in that case this was the last iteration).\n        read_pos = BitByte {\n            bytes: next_element_position,\n            bits: 0,\n        };\n    }\n\n    // TODO: Don't write the whole output into a huge BitVec.\n    // Instead, write it incrementally –\n    // but ONLY when the bits waiting in bits_out are byte-aligned. i.e. a multiple of 8\n\n    let boxed_slice = raw_data.into_boxed_slice();\n\n    target.write_all(&boxed_slice)?;\n\n    Ok(boxed_slice.len())\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use pairing::bls12_381::Bls12;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n    use std::io::Cursor;\n    use storage_proofs::fr32::bytes_into_fr;\n\n    #[test]\n    fn test_position() {\n        let mut bits = 0;\n        for i in 0..10 {\n            for j in 0..8 {\n                let position = BitByte { bytes: i, bits: j };\n                assert_eq!(position.total_bits(), bits);\n                bits += 1;\n            }\n        }\n    }\n\n    // `write_padded` for 151 bytes of 1s, check padding bits in byte 31 and 63.\n    #[test]\n    fn test_write_padded() {\n        let data = vec![255u8; 151];\n        let buf = Vec::new();\n        let mut cursor = Cursor::new(buf);\n        let written = write_padded(&data, &mut cursor).unwrap();\n        let padded = cursor.into_inner();\n        assert_eq!(written, 151);\n        assert_eq!(\n            padded.len(),\n            FR32_PADDING_MAP.transform_byte_offset(151, true)\n        );\n        assert_eq!(&padded[0..31], &data[0..31]);\n        assert_eq!(padded[31], 0b0011_1111);\n        assert_eq!(padded[32], 0b1111_1111);\n        assert_eq!(&padded[33..63], vec![255u8; 30].as_slice());\n        assert_eq!(padded[63], 0b0011_1111);\n    }\n\n    // `write_padded` for 256 bytes of 1s, splitting it in two calls of 128 bytes,\n    // aligning the calls with the padded element boundaries, check padding bits\n    // in byte 31 and 63.\n    #[test]\n    fn test_write_padded_multiple_aligned() {\n        let data = vec![255u8; 256];\n        let buf = Vec::new();\n        let mut cursor = Cursor::new(buf);\n        let mut written = write_padded(&data[0..128], &mut cursor).unwrap();\n        written += write_padded(&data[128..], &mut cursor).unwrap();\n        let padded = cursor.into_inner();\n\n        assert_eq!(written, 256);\n        assert_eq!(\n            padded.len(),\n            FR32_PADDING_MAP.transform_byte_offset(256, true)\n        );\n        assert_eq!(&padded[0..31], &data[0..31]);\n        assert_eq!(padded[31], 0b0011_1111);\n        assert_eq!(padded[32], 0b1111_1111);\n        assert_eq!(&padded[33..63], vec![255u8; 30].as_slice());\n        assert_eq!(padded[63], 0b0011_1111);\n        // TODO: This test is not checking the padding in the boundary between the\n        // `write_padded` calls, it doesn't seem then to be testing anything different\n        // from the previous one.\n    }\n\n    // `write_padded` for 265 bytes of 1s, splitting it in two calls of 128 bytes,\n    // aligning the calls with the padded element boundaries, check padding bits\n    // in byte 31 and 63.\n    #[test]\n    fn test_write_padded_multiple_first_aligned() {\n        let data = vec![255u8; 265];\n        let buf = Vec::new();\n        let mut cursor = Cursor::new(buf);\n        let mut written = write_padded(&data[0..128], &mut cursor).unwrap();\n        written += write_padded(&data[128..], &mut cursor).unwrap();\n        let padded = cursor.into_inner();\n\n        assert_eq!(written, 265);\n        assert_eq!(\n            padded.len(),\n            FR32_PADDING_MAP.transform_byte_offset(265, true)\n        );\n        assert_eq!(&padded[0..31], &data[0..31]);\n        assert_eq!(padded[31], 0b0011_1111);\n        assert_eq!(padded[32], 0b1111_1111);\n        assert_eq!(&padded[33..63], vec![255u8; 30].as_slice());\n        assert_eq!(padded[63], 0b0011_1111);\n        // TODO: Same observation as before, what are we testing here?\n    }\n\n    fn validate_fr32(bytes: &[u8]) {\n        for (i, chunk) in bytes.chunks(32).enumerate() {\n            let _ = bytes_into_fr::<Bls12>(chunk).expect(&format!(\n                \"{}th chunk cannot be converted to valid Fr: {:?}\",\n                i + 1,\n                chunk\n            ));\n        }\n    }\n\n    // `write_padded` for 127 bytes of 1s, splitting it in two calls of varying\n    // sizes, from 0 to the full size, generating many unaligned calls, check\n    // padding bits in byte 31 and 63.\n    #[test]\n    fn test_write_padded_multiple_unaligned() {\n        // Use 127 for this test because it unpads to 128 – a multiple of 32.\n        // Otherwise the last chunk will be too short and cannot be converted to Fr.\n        for i in 0..127 {\n            let data = vec![255u8; 127];\n            let buf = Vec::new();\n            let mut cursor = Cursor::new(buf);\n            let mut written = write_padded(&data[0..i], &mut cursor).unwrap();\n            written += write_padded(&data[i..], &mut cursor).unwrap();\n            let padded = cursor.into_inner();\n            validate_fr32(&padded);\n            assert_eq!(written, 127);\n            assert_eq!(\n                padded.len(),\n                FR32_PADDING_MAP.transform_byte_offset(127, true)\n            );\n            assert_eq!(&padded[0..31], &data[0..31]);\n            assert_eq!(padded[31], 0b0011_1111);\n            assert_eq!(padded[32], 0b1111_1111);\n            assert_eq!(&padded[33..63], vec![255u8; 30].as_slice());\n            assert_eq!(padded[63], 0b0011_1111);\n            // TODO: We seem to be repeating the same series of asserts,\n            // maybe this can be abstracted away in a helper function.\n        }\n    }\n\n    // `write_padded` for a raw data stream of increasing values and specific\n    // outliers (0xFF, 9), check the content of the raw data encoded (with\n    // different alignments) in the padded layouts.\n    #[test]\n    fn test_write_padded_alt() {\n        let mut source = vec![\n            1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,\n            25, 26, 27, 28, 29, 30, 31, 0xff, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,\n            16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 0xff, 9, 9,\n        ];\n        // FIXME: This doesn't exercise the ability to write a second time, which is the point of the extra_bytes in write_test.\n        source.extend(vec![9, 0xff]);\n\n        let buf = Vec::new();\n        let mut cursor = Cursor::new(buf);\n        write_padded(&source, &mut cursor).unwrap();\n        let buf = cursor.into_inner();\n\n        for i in 0..31 {\n            assert_eq!(buf[i], i as u8 + 1);\n        }\n        assert_eq!(buf[31], 63); // Six least significant bits of 0xff\n        assert_eq!(buf[32], (1 << 2) | 0b11); // 7\n        for i in 33..63 {\n            assert_eq!(buf[i], (i as u8 - 31) << 2);\n        }\n        assert_eq!(buf[63], (0x0f << 2)); // 4-bits of ones, half of 0xff, shifted by two, followed by two bits of 0-padding.\n        assert_eq!(buf[64], 0x0f | 9 << 4); // The last half of 0xff, 'followed' by 9.\n        assert_eq!(buf[65], 9 << 4); // A shifted 9.\n        assert_eq!(buf[66], 9 << 4); // Another.\n        assert_eq!(buf[67], 0xf0); // The final 0xff is split into two bytes. Here is the first half.\n        assert_eq!(buf[68], 0x0f); // And here is the second.\n    }\n\n    // `write_padded` and `write_unpadded` for 1016 bytes of 1s, check the\n    // recovered raw data.\n    #[test]\n    fn test_read_write_padded() {\n        let len = 1016; // Use a multiple of 254.\n        let data = vec![255u8; len];\n        let buf = Vec::new();\n        let mut cursor = Cursor::new(buf);\n        let padded_written = write_padded(&data, &mut cursor).unwrap();\n        let padded = cursor.into_inner();\n\n        assert_eq!(padded_written, len);\n        assert_eq!(\n            padded.len(),\n            FR32_PADDING_MAP.transform_byte_offset(len, true)\n        );\n\n        let mut unpadded = Vec::new();\n        let unpadded_written = write_unpadded(&padded, &mut unpadded, 0, len).unwrap();\n        assert_eq!(unpadded_written, len);\n        assert_eq!(data, unpadded);\n    }\n\n    // `write_padded` and `write_unpadded` for 1016 bytes of random data, recover\n    // different lengths of raw data at different offset, check integrity.\n    #[test]\n    fn test_read_write_padded_offset() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let len = 1016;\n        let data: Vec<u8> = (0..len).map(|_| rng.gen()).collect();\n        let buf = Vec::new();\n        let mut cursor = Cursor::new(buf);\n        write_padded(&data, &mut cursor).unwrap();\n        let padded = cursor.into_inner();\n\n        {\n            let mut unpadded = Vec::new();\n            write_unpadded(&padded, &mut unpadded, 0, 1016).unwrap();\n            let expected = &data[0..1016];\n\n            assert_eq!(expected.len(), unpadded.len());\n            assert_eq!(expected, &unpadded[..]);\n        }\n\n        {\n            let mut unpadded = Vec::new();\n            write_unpadded(&padded, &mut unpadded, 0, 44).unwrap();\n            let expected = &data[0..44];\n\n            assert_eq!(expected.len(), unpadded.len());\n            assert_eq!(expected, &unpadded[..]);\n        }\n        for start in 0..1016 {\n            let mut unpadded = Vec::new();\n\n            let len = 35;\n            let unpadded_bytes = write_unpadded(&padded, &mut unpadded, start, len).unwrap();\n            let actual_len = min(data.len() - start, len);\n            assert_eq!(unpadded_bytes, actual_len);\n\n            let expected = &data[start..start + actual_len];\n            assert_eq!(expected, &unpadded[..]);\n        }\n    }\n\n    // TODO: Add a test that checks integrity counting the number of set bits\n    // before and after padding. This would need to assume that padding is\n    // always zero and the DC bit are also zero in the underlying implementation.\n\n    // TODO: Add a test that drops the last part of an element and tries to recover\n    // the rest of the data (may already be present in some form in the above tests).\n}\n"
  },
  {
    "path": "sector-base/src/io/mod.rs",
    "content": "pub mod fr32;\n"
  },
  {
    "path": "sector-base/src/lib.rs",
    "content": "#![deny(clippy::all, clippy::perf, clippy::correctness)]\n#![allow(clippy::unreadable_literal)]\n#![warn(clippy::type_complexity, clippy::too_many_arguments)]\n\nextern crate bitvec;\n#[macro_use]\nextern crate failure;\nextern crate ffi_toolkit;\nextern crate itertools;\nextern crate libc;\nextern crate pairing;\nextern crate rand;\nextern crate storage_proofs;\n\n#[cfg(test)]\nextern crate tempfile;\n\npub mod api;\npub mod error;\npub mod io;\n"
  },
  {
    "path": "sha2raw/Cargo.toml",
    "content": "[package]\nname = \"sha2raw\"\nversion = \"0.1.0\"\nauthors = [\"RustCrypto Developers\", \"Friedel Ziegelmayer <me@dignifiedquire.com>\"]\nlicense = \"MIT OR Apache-2.0\"\ndescription = \"SHA-2 hash function\"\ndocumentation = \"https://docs.rs/sha2raw\"\nrepository = \"https://github.com/filecoin-project/rust-fil-proofs\"\nkeywords = [\"crypto\", \"sha2\", \"hash\", \"digest\"]\ncategories = [\"cryptography\", \"no-std\"]\nedition = \"2018\"\n\n[dependencies]\ndigest = \"0.8\"\nblock-buffer = \"0.7\"\nfake-simd = \"0.1\"\nopaque-debug = \"0.2\"\nsha2-asm = { version=\"0.5\", optional=true }\nraw-cpuid = \"7.0.3\"\n\n[dependencies.lazy_static]\nversion = \"1.4.0\"\n\n[dev-dependencies]\ndigest = { version = \"0.8\", features = [\"dev\", \"std\"] }\nhex-literal = \"0.1\"\nsha2 = \"0.8.1\"\nrand = \"0.7.3\"\nrand_xorshift = \"0.2.0\"\n\n[features]\ndefault = []\nasm = [\"sha2-asm\"]\n\n\n"
  },
  {
    "path": "sha2raw/README.md",
    "content": "# sha2raw\n\n\n> Implementation of Sha256 with a focus on hashing fixed sizes chunks, that do not require padding. Based on [sha2](https://docs.rs/sha2).\n"
  },
  {
    "path": "sha2raw/src/consts.rs",
    "content": "use fake_simd::u32x4;\n\npub const STATE_LEN: usize = 8;\npub const BLOCK_LEN: usize = 16;\n\n/// Constants necessary for SHA-256 family of digests.\npub const K32: [u32; 64] = [\n    0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,\n    0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,\n    0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,\n    0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,\n    0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,\n    0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,\n    0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,\n    0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,\n];\n\n/// Constants necessary for SHA-256 family of digests.\npub const K32X4: [u32x4; 16] = [\n    u32x4(K32[3], K32[2], K32[1], K32[0]),\n    u32x4(K32[7], K32[6], K32[5], K32[4]),\n    u32x4(K32[11], K32[10], K32[9], K32[8]),\n    u32x4(K32[15], K32[14], K32[13], K32[12]),\n    u32x4(K32[19], K32[18], K32[17], K32[16]),\n    u32x4(K32[23], K32[22], K32[21], K32[20]),\n    u32x4(K32[27], K32[26], K32[25], K32[24]),\n    u32x4(K32[31], K32[30], K32[29], K32[28]),\n    u32x4(K32[35], K32[34], K32[33], K32[32]),\n    u32x4(K32[39], K32[38], K32[37], K32[36]),\n    u32x4(K32[43], K32[42], K32[41], K32[40]),\n    u32x4(K32[47], K32[46], K32[45], K32[44]),\n    u32x4(K32[51], K32[50], K32[49], K32[48]),\n    u32x4(K32[55], K32[54], K32[53], K32[52]),\n    u32x4(K32[59], K32[58], K32[57], K32[56]),\n    u32x4(K32[63], K32[62], K32[61], K32[60]),\n];\n\npub static H256: [u32; STATE_LEN] = [\n    0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,\n];\n"
  },
  {
    "path": "sha2raw/src/lib.rs",
    "content": "//! An implementation of the [SHA-2][1] cryptographic hash algorithms.\n\n// Give relevant error messages if the user tries to enable AArch64 asm on unsupported platforms.\n\n#![deny(clippy::all, clippy::perf, clippy::correctness)]\n#![allow(clippy::unreadable_literal)]\n\nmod consts;\n\nmod platform;\nmod sha256;\n#[cfg(any(target_arch = \"x86\", target_arch = \"x86_64\"))]\nmod sha256_intrinsics;\nmod sha256_utils;\n\npub use digest::Digest;\npub use sha256::Sha256;\n"
  },
  {
    "path": "sha2raw/src/platform.rs",
    "content": "#[cfg(any(target_arch = \"x86\", target_arch = \"x86_64\"))]\nuse crate::sha256_intrinsics;\nuse crate::sha256_utils;\n\n#[allow(dead_code)]\n#[derive(Clone, Copy, Debug, Eq, PartialEq)]\nenum Platform {\n    Portable,\n    #[cfg(feature = \"asm\")]\n    Asm,\n    #[cfg(any(target_arch = \"x86\", target_arch = \"x86_64\"))]\n    Sha,\n}\n\n#[derive(Clone, Copy, Debug)]\npub struct Implementation(Platform);\n\nimpl Implementation {\n    pub fn detect() -> Self {\n        // Try the different implementations in order of how fast/modern they are.\n        #[cfg(target_arch = \"x86_64\")]\n        {\n            if let Some(sha_impl) = Self::sha_if_supported() {\n                return sha_impl;\n            }\n        }\n        #[cfg(feature = \"asm\")]\n        {\n            if let Some(asm_impl) = Self::asm_if_supported() {\n                return asm_impl;\n            }\n        }\n\n        Self::portable()\n    }\n\n    pub fn portable() -> Self {\n        Implementation(Platform::Portable)\n    }\n\n    #[cfg(target_arch = \"x86_64\")]\n    #[allow(unreachable_code)]\n    pub fn sha_if_supported() -> Option<Self> {\n        // Use raw_cpuid instead of is_x86_feature_detected, to ensure the check\n        // never happens at compile time.\n        let is_runtime_ok = cpuid_bool::cpuid_bool!(\"sha\");\n\n        #[cfg(target_feature = \"sha\")]\n        {\n            if !is_runtime_ok {\n                println!(\"WARN: sha-ni not available, falling back\");\n            }\n        }\n\n        // Make sure this computer actually supports it\n        if is_runtime_ok {\n            return Some(Implementation(Platform::Sha));\n        }\n\n        None\n    }\n\n    #[cfg(feature = \"asm\")]\n    pub fn asm_if_supported() -> Option<Self> {\n        Some(Implementation(Platform::Asm))\n    }\n\n    #[inline]\n    pub fn compress256(self, state: &mut [u32; 8], blocks: &[&[u8]]) {\n        match self.0 {\n            Platform::Portable => {\n                sha256_utils::compress256(state, blocks);\n            }\n            #[cfg(any(target_arch = \"x86\", target_arch = \"x86_64\"))]\n            Platform::Sha => {\n                unsafe { sha256_intrinsics::compress256(state, blocks) };\n            }\n            #[cfg(feature = \"asm\")]\n            Platform::Asm => {\n                let mut buffer = [0u8; 64];\n                for block in blocks.chunks(2) {\n                    buffer[..32].copy_from_slice(&block[0]);\n                    buffer[32..].copy_from_slice(&block[1]);\n                    sha2_asm::compress256(state, &[buffer]);\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "sha2raw/src/sha256.rs",
    "content": "use byteorder::{ByteOrder, BE};\nuse lazy_static::lazy_static;\n\nuse crate::{consts::H256, platform::Implementation};\n\nlazy_static! {\n    static ref IMPL: Implementation = Implementation::detect();\n}\n\n#[derive(Clone)]\npub struct Sha256 {\n    len: u64,\n    state: [u32; 8],\n}\n\nimpl Default for Sha256 {\n    fn default() -> Self {\n        Sha256 {\n            len: 0,\n            state: H256,\n        }\n    }\n}\n\nimpl Sha256 {\n    pub fn new() -> Self {\n        Sha256::default()\n    }\n\n    pub fn digest(blocks: &[&[u8]]) -> [u8; 32] {\n        let mut sha = Sha256::new();\n        sha.input(blocks);\n        sha.finish()\n    }\n\n    pub fn input(&mut self, blocks: &[&[u8]]) {\n        debug_assert_eq!(blocks.len() % 2, 0, \"invalid block length\");\n\n        self.len += (blocks.len() as u64) << 8;\n\n        IMPL.compress256(&mut self.state, blocks);\n    }\n\n    pub fn finish(mut self) -> [u8; 32] {\n        let mut block0 = [0u8; 32];\n        let mut block1 = [0u8; 32];\n\n        // Append single 1 bit\n        block0[0] = 0b1000_0000;\n\n        // Write L as 64 big endian integer\n        let l = self.len;\n        block1[32 - 8..].copy_from_slice(&l.to_be_bytes()[..]);\n\n        IMPL.compress256(&mut self.state, &[&block0[..], &block1[..]][..]);\n\n        let mut out = [0u8; 32];\n        BE::write_u32_into(&self.state, &mut out);\n        out\n    }\n\n    pub fn finish_with(mut self, block0: &[u8]) -> [u8; 32] {\n        debug_assert_eq!(block0.len(), 32);\n\n        let mut block1 = [0u8; 32];\n\n        // Append single 1 bit\n        block1[0] = 0b1000_0000;\n\n        // Write L as 64 big endian integer\n        let l = self.len + 256;\n        block1[32 - 8..].copy_from_slice(&l.to_be_bytes()[..]);\n\n        IMPL.compress256(&mut self.state, &[block0, &block1[..]][..]);\n\n        let mut out = [0u8; 32];\n        BE::write_u32_into(&self.state, &mut out);\n        out\n    }\n}\n\nopaque_debug::implement!(Sha256);\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use rand::{RngCore, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n    use sha2::{Digest, Sha256 as Original};\n\n    #[test]\n    fn test_fuzz_simple() {\n        fuzz(10);\n    }\n\n    #[test]\n    #[ignore]\n    fn test_fuzz_long() {\n        fuzz(1_000);\n    }\n\n    fn fuzz(n: usize) {\n        let rng = &mut XorShiftRng::from_seed([\n            0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06,\n            0xbc, 0xe5,\n        ]);\n        for k in 1..n {\n            for _ in 0..100 {\n                let mut input = vec![0u8; 64 * k];\n                rng.fill_bytes(&mut input);\n                let chunked = input.chunks(32).collect::<Vec<_>>();\n                assert_eq!(&Sha256::digest(&chunked)[..], &Original::digest(&input)[..])\n            }\n        }\n\n        for k in (1..n).step_by(2) {\n            for _ in 0..100 {\n                let mut input = vec![0u8; 32 * k];\n                rng.fill_bytes(&mut input);\n                let mut hasher = Sha256::new();\n                for chunk in input.chunks(64) {\n                    if chunk.len() == 64 {\n                        hasher.input(&[&chunk[..32], &chunk[32..]]);\n                    }\n                }\n                assert_eq!(input.len() % 64, 32);\n                let hash = hasher.finish_with(&input[input.len() - 32..]);\n\n                assert_eq!(\n                    &hash[..],\n                    &Original::digest(&input)[..],\n                    \"input: {:?}\",\n                    &input\n                );\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "sha2raw/src/sha256_intrinsics.rs",
    "content": "#![allow(clippy::many_single_char_names)]\n#![allow(clippy::cast_ptr_alignment)] // Safe to cast without alignment checks as the loads and stores do not require alignment.\n\n#[cfg(target_arch = \"x86\")]\nuse std::arch::x86;\n#[cfg(target_arch = \"x86_64\")]\nuse std::arch::x86_64 as x86;\n\nuse x86::{\n    __m128i, _mm_add_epi32, _mm_alignr_epi8, _mm_blend_epi16, _mm_loadu_si128, _mm_set_epi64x,\n    _mm_sha256msg1_epu32, _mm_sha256msg2_epu32, _mm_sha256rnds2_epu32, _mm_shuffle_epi32,\n    _mm_shuffle_epi8, _mm_storeu_si128,\n};\n\n/// Process a block with the SHA-256 algorithm.\n/// Based on https://github.com/noloader/SHA-Intrinsics/blob/master/sha256-x86.c\n#[inline(always)]\npub unsafe fn compress256(state: &mut [u32; 8], blocks: &[&[u8]]) {\n    assert_eq!(blocks.len() % 2, 0);\n\n    let mut state0: __m128i;\n    let mut state1: __m128i;\n\n    let mut msg: __m128i;\n    let mut tmp: __m128i;\n\n    let mut msg0: __m128i;\n    let mut msg1: __m128i;\n    let mut msg2: __m128i;\n    let mut msg3: __m128i;\n\n    let mut abef_save: __m128i;\n    let mut cdgh_save: __m128i;\n\n    #[allow(non_snake_case)]\n    let MASK: __m128i = _mm_set_epi64x(\n        0x0c0d_0e0f_0809_0a0bu64 as i64,\n        0x0405_0607_0001_0203u64 as i64,\n    );\n\n    // Load initial values\n    tmp = _mm_loadu_si128(state.as_ptr().add(0) as *const __m128i);\n    state1 = _mm_loadu_si128(state.as_ptr().add(4) as *const __m128i);\n\n    tmp = _mm_shuffle_epi32(tmp, 0xB1); // CDAB\n    state1 = _mm_shuffle_epi32(state1, 0x1B); // EFGH\n    state0 = _mm_alignr_epi8(tmp, state1, 8); // ABEF\n    state1 = _mm_blend_epi16(state1, tmp, 0xF0); // CDGH\n\n    for i in (0..blocks.len()).step_by(2) {\n        // Save current state\n        abef_save = state0;\n        cdgh_save = state1;\n\n        // Rounds 0-3\n        msg = _mm_loadu_si128(blocks[i].as_ptr().add(0) as *const __m128i);\n        msg0 = _mm_shuffle_epi8(msg, MASK);\n        msg = _mm_add_epi32(\n            msg0,\n            _mm_set_epi64x(0xE9B5DBA5B5C0FBCFu64 as i64, 0x71374491428A2F98u64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n\n        // Rounds 4-7\n        msg1 = _mm_loadu_si128(blocks[i].as_ptr().add(16) as *const __m128i);\n        msg1 = _mm_shuffle_epi8(msg1, MASK);\n        msg = _mm_add_epi32(\n            msg1,\n            _mm_set_epi64x(0xAB1C5ED5923F82A4u64 as i64, 0x59F111F13956C25Bu64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n        msg0 = _mm_sha256msg1_epu32(msg0, msg1);\n\n        // Rounds 8-11\n        msg2 = _mm_loadu_si128(blocks[i + 1].as_ptr().add(0) as *const __m128i);\n        msg2 = _mm_shuffle_epi8(msg2, MASK);\n        msg = _mm_add_epi32(\n            msg2,\n            _mm_set_epi64x(0x550C7DC3243185BEu64 as i64, 0x12835B01D807AA98u64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n        msg1 = _mm_sha256msg1_epu32(msg1, msg2);\n\n        // Rounds 12-15\n        msg3 = _mm_loadu_si128(blocks[i + 1].as_ptr().add(16) as *const __m128i);\n        msg3 = _mm_shuffle_epi8(msg3, MASK);\n        msg = _mm_add_epi32(\n            msg3,\n            _mm_set_epi64x(0xC19BF1749BDC06A7u64 as i64, 0x80DEB1FE72BE5D74u64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        tmp = _mm_alignr_epi8(msg3, msg2, 4);\n        msg0 = _mm_add_epi32(msg0, tmp);\n        msg0 = _mm_sha256msg2_epu32(msg0, msg3);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n        msg2 = _mm_sha256msg1_epu32(msg2, msg3);\n\n        // Rounds 16-19\n        msg = _mm_add_epi32(\n            msg0,\n            _mm_set_epi64x(0x240CA1CC0FC19DC6u64 as i64, 0xEFBE4786E49B69C1u64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        tmp = _mm_alignr_epi8(msg0, msg3, 4);\n        msg1 = _mm_add_epi32(msg1, tmp);\n        msg1 = _mm_sha256msg2_epu32(msg1, msg0);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n        msg3 = _mm_sha256msg1_epu32(msg3, msg0);\n\n        // Rounds 20-23\n        msg = _mm_add_epi32(\n            msg1,\n            _mm_set_epi64x(0x76F988DA5CB0A9DCu64 as i64, 0x4A7484AA2DE92C6Fu64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        tmp = _mm_alignr_epi8(msg1, msg0, 4);\n        msg2 = _mm_add_epi32(msg2, tmp);\n        msg2 = _mm_sha256msg2_epu32(msg2, msg1);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n        msg0 = _mm_sha256msg1_epu32(msg0, msg1);\n\n        // Rounds 24-27\n        msg = _mm_add_epi32(\n            msg2,\n            _mm_set_epi64x(0xBF597FC7B00327C8u64 as i64, 0xA831C66D983E5152u64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        tmp = _mm_alignr_epi8(msg2, msg1, 4);\n        msg3 = _mm_add_epi32(msg3, tmp);\n        msg3 = _mm_sha256msg2_epu32(msg3, msg2);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n        msg1 = _mm_sha256msg1_epu32(msg1, msg2);\n\n        // Rounds 28-31\n        msg = _mm_add_epi32(\n            msg3,\n            _mm_set_epi64x(0x1429296706CA6351u64 as i64, 0xD5A79147C6E00BF3u64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        tmp = _mm_alignr_epi8(msg3, msg2, 4);\n        msg0 = _mm_add_epi32(msg0, tmp);\n        msg0 = _mm_sha256msg2_epu32(msg0, msg3);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n        msg2 = _mm_sha256msg1_epu32(msg2, msg3);\n\n        // Rounds 32-35\n        msg = _mm_add_epi32(\n            msg0,\n            _mm_set_epi64x(0x53380D134D2C6DFCu64 as i64, 0x2E1B213827B70A85u64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        tmp = _mm_alignr_epi8(msg0, msg3, 4);\n        msg1 = _mm_add_epi32(msg1, tmp);\n        msg1 = _mm_sha256msg2_epu32(msg1, msg0);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n        msg3 = _mm_sha256msg1_epu32(msg3, msg0);\n\n        // Rounds 36-39\n        msg = _mm_add_epi32(\n            msg1,\n            _mm_set_epi64x(0x92722C8581C2C92Eu64 as i64, 0x766A0ABB650A7354u64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        tmp = _mm_alignr_epi8(msg1, msg0, 4);\n        msg2 = _mm_add_epi32(msg2, tmp);\n        msg2 = _mm_sha256msg2_epu32(msg2, msg1);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n        msg0 = _mm_sha256msg1_epu32(msg0, msg1);\n\n        // Rounds 40-43\n        msg = _mm_add_epi32(\n            msg2,\n            _mm_set_epi64x(0xC76C51A3C24B8B70u64 as i64, 0xA81A664BA2BFE8A1u64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        tmp = _mm_alignr_epi8(msg2, msg1, 4);\n        msg3 = _mm_add_epi32(msg3, tmp);\n        msg3 = _mm_sha256msg2_epu32(msg3, msg2);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n        msg1 = _mm_sha256msg1_epu32(msg1, msg2);\n\n        // Rounds 44-47\n        msg = _mm_add_epi32(\n            msg3,\n            _mm_set_epi64x(0x106AA070F40E3585u64 as i64, 0xD6990624D192E819u64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        tmp = _mm_alignr_epi8(msg3, msg2, 4);\n        msg0 = _mm_add_epi32(msg0, tmp);\n        msg0 = _mm_sha256msg2_epu32(msg0, msg3);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n        msg2 = _mm_sha256msg1_epu32(msg2, msg3);\n\n        // Rounds 48-51\n        msg = _mm_add_epi32(\n            msg0,\n            _mm_set_epi64x(0x34B0BCB52748774Cu64 as i64, 0x1E376C0819A4C116u64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        tmp = _mm_alignr_epi8(msg0, msg3, 4);\n        msg1 = _mm_add_epi32(msg1, tmp);\n        msg1 = _mm_sha256msg2_epu32(msg1, msg0);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n        msg3 = _mm_sha256msg1_epu32(msg3, msg0);\n\n        // Rounds 52-55\n        msg = _mm_add_epi32(\n            msg1,\n            _mm_set_epi64x(0x682E6FF35B9CCA4Fu64 as i64, 0x4ED8AA4A391C0CB3u64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        tmp = _mm_alignr_epi8(msg1, msg0, 4);\n        msg2 = _mm_add_epi32(msg2, tmp);\n        msg2 = _mm_sha256msg2_epu32(msg2, msg1);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n\n        // Rounds 56-59\n        msg = _mm_add_epi32(\n            msg2,\n            _mm_set_epi64x(0x8CC7020884C87814u64 as i64, 0x78A5636F748F82EEu64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        tmp = _mm_alignr_epi8(msg2, msg1, 4);\n        msg3 = _mm_add_epi32(msg3, tmp);\n        msg3 = _mm_sha256msg2_epu32(msg3, msg2);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n\n        // Rounds 60-63\n        msg = _mm_add_epi32(\n            msg3,\n            _mm_set_epi64x(0xC67178F2BEF9A3F7u64 as i64, 0xA4506CEB90BEFFFAu64 as i64),\n        );\n        state1 = _mm_sha256rnds2_epu32(state1, state0, msg);\n        msg = _mm_shuffle_epi32(msg, 0x0E);\n        state0 = _mm_sha256rnds2_epu32(state0, state1, msg);\n\n        // Combine state\n        state0 = _mm_add_epi32(state0, abef_save);\n        state1 = _mm_add_epi32(state1, cdgh_save);\n    }\n\n    tmp = _mm_shuffle_epi32(state0, 0x1B); // FEBA\n    state1 = _mm_shuffle_epi32(state1, 0xB1); // DCHG\n    state0 = _mm_blend_epi16(tmp, state1, 0xF0); // DCBA\n    state1 = _mm_alignr_epi8(state1, tmp, 8); // ABEF\n\n    // Save state\n    _mm_storeu_si128(state.as_ptr().add(0) as *mut __m128i, state0);\n    _mm_storeu_si128(state.as_ptr().add(4) as *mut __m128i, state1);\n}\n"
  },
  {
    "path": "sha2raw/src/sha256_utils.rs",
    "content": "#![allow(clippy::many_single_char_names)]\n\nuse byteorder::{ByteOrder, BE};\nuse fake_simd::u32x4;\n\nuse crate::consts::{BLOCK_LEN, K32X4};\n\n/// Not an intrinsic, but works like an unaligned load.\n#[inline]\nfn sha256load(v2: u32x4, v3: u32x4) -> u32x4 {\n    u32x4(v3.3, v2.0, v2.1, v2.2)\n}\n\n/// Not an intrinsic, but useful for swapping vectors.\n#[inline]\nfn sha256swap(v0: u32x4) -> u32x4 {\n    u32x4(v0.2, v0.3, v0.0, v0.1)\n}\n\n/// Emulates `llvm.x86.sha256msg1` intrinsic.\n// #[inline]\nfn sha256msg1(v0: u32x4, v1: u32x4) -> u32x4 {\n    // sigma 0 on vectors\n    #[inline]\n    fn sigma0x4(x: u32x4) -> u32x4 {\n        ((x >> u32x4(7, 7, 7, 7)) | (x << u32x4(25, 25, 25, 25)))\n            ^ ((x >> u32x4(18, 18, 18, 18)) | (x << u32x4(14, 14, 14, 14)))\n            ^ (x >> u32x4(3, 3, 3, 3))\n    }\n\n    v0 + sigma0x4(sha256load(v0, v1))\n}\n\n/// Emulates `llvm.x86.sha256msg2` intrinsic.\n// #[inline]\nfn sha256msg2(v4: u32x4, v3: u32x4) -> u32x4 {\n    macro_rules! sigma1 {\n        ($a:expr) => {\n            $a.rotate_right(17) ^ $a.rotate_right(19) ^ ($a >> 10)\n        };\n    }\n\n    let u32x4(x3, x2, x1, x0) = v4;\n    let u32x4(w15, w14, _, _) = v3;\n\n    let w16 = x0.wrapping_add(sigma1!(w14));\n    let w17 = x1.wrapping_add(sigma1!(w15));\n    let w18 = x2.wrapping_add(sigma1!(w16));\n    let w19 = x3.wrapping_add(sigma1!(w17));\n\n    u32x4(w19, w18, w17, w16)\n}\n\n/*\n/// Performs 4 rounds of the SHA-256 message schedule update.\nfn sha256_schedule_x4(v0: u32x4, v1: u32x4, v2: u32x4, v3: u32x4) -> u32x4 {\n    sha256msg2(sha256msg1(v0, v1) + sha256load(v2, v3), v3)\n}*/\n\n/// Emulates `llvm.x86.sha256rnds2` intrinsic.\n// #[inline]\nfn sha256_digest_round_x2(cdgh: u32x4, abef: u32x4, wk: u32x4) -> u32x4 {\n    macro_rules! big_sigma0 {\n        ($a:expr) => {\n            ($a.rotate_right(2) ^ $a.rotate_right(13) ^ $a.rotate_right(22))\n        };\n    }\n    macro_rules! big_sigma1 {\n        ($a:expr) => {\n            ($a.rotate_right(6) ^ $a.rotate_right(11) ^ $a.rotate_right(25))\n        };\n    }\n    macro_rules! bool3ary_202 {\n        ($a:expr, $b:expr, $c:expr) => {\n            $c ^ ($a & ($b ^ $c))\n        };\n    } // Choose, MD5F, SHA1C\n    macro_rules! bool3ary_232 {\n        ($a:expr, $b:expr, $c:expr) => {\n            ($a & $b) ^ ($a & $c) ^ ($b & $c)\n        };\n    } // Majority, SHA1M\n\n    let u32x4(_, _, wk1, wk0) = wk;\n    let u32x4(a0, b0, e0, f0) = abef;\n    let u32x4(c0, d0, g0, h0) = cdgh;\n\n    // a round\n    let x0 = big_sigma1!(e0)\n        .wrapping_add(bool3ary_202!(e0, f0, g0))\n        .wrapping_add(wk0)\n        .wrapping_add(h0);\n    let y0 = big_sigma0!(a0).wrapping_add(bool3ary_232!(a0, b0, c0));\n    let (a1, b1, c1, d1, e1, f1, g1, h1) = (\n        x0.wrapping_add(y0),\n        a0,\n        b0,\n        c0,\n        x0.wrapping_add(d0),\n        e0,\n        f0,\n        g0,\n    );\n\n    // a round\n    let x1 = big_sigma1!(e1)\n        .wrapping_add(bool3ary_202!(e1, f1, g1))\n        .wrapping_add(wk1)\n        .wrapping_add(h1);\n    let y1 = big_sigma0!(a1).wrapping_add(bool3ary_232!(a1, b1, c1));\n    let (a2, b2, _, _, e2, f2, _, _) = (\n        x1.wrapping_add(y1),\n        a1,\n        b1,\n        c1,\n        x1.wrapping_add(d1),\n        e1,\n        f1,\n        g1,\n    );\n\n    u32x4(a2, b2, e2, f2)\n}\n\n/// Process a block with the SHA-256 algorithm.\nfn sha256_digest_block_u32(state: &mut [u32; 8], block: &[u32; 16]) {\n    let k = &K32X4;\n\n    macro_rules! schedule {\n        ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {\n            sha256msg2(sha256msg1($v0, $v1) + sha256load($v2, $v3), $v3)\n        };\n    }\n\n    macro_rules! rounds4 {\n        ($abef:ident, $cdgh:ident, $rest:expr) => {{\n            $cdgh = sha256_digest_round_x2($cdgh, $abef, $rest);\n            $abef = sha256_digest_round_x2($abef, $cdgh, sha256swap($rest));\n        }};\n    }\n\n    let mut abef = u32x4(state[0], state[1], state[4], state[5]);\n    let mut cdgh = u32x4(state[2], state[3], state[6], state[7]);\n\n    // Rounds 0..64\n    let mut w0 = u32x4(block[3], block[2], block[1], block[0]);\n    rounds4!(abef, cdgh, k[0] + w0);\n    let mut w1 = u32x4(block[7], block[6], block[5], block[4]);\n    rounds4!(abef, cdgh, k[1] + w1);\n    let mut w2 = u32x4(block[11], block[10], block[9], block[8]);\n    rounds4!(abef, cdgh, k[2] + w2);\n    let mut w3 = u32x4(block[15], block[14], block[13], block[12]);\n    rounds4!(abef, cdgh, k[3] + w3);\n    let mut w4 = schedule!(w0, w1, w2, w3);\n    rounds4!(abef, cdgh, k[4] + w4);\n    w0 = schedule!(w1, w2, w3, w4);\n    rounds4!(abef, cdgh, k[5] + w0);\n    w1 = schedule!(w2, w3, w4, w0);\n    rounds4!(abef, cdgh, k[6] + w1);\n    w2 = schedule!(w3, w4, w0, w1);\n    rounds4!(abef, cdgh, k[7] + w2);\n    w3 = schedule!(w4, w0, w1, w2);\n    rounds4!(abef, cdgh, k[8] + w3);\n    w4 = schedule!(w0, w1, w2, w3);\n    rounds4!(abef, cdgh, k[9] + w4);\n    w0 = schedule!(w1, w2, w3, w4);\n    rounds4!(abef, cdgh, k[10] + w0);\n    w1 = schedule!(w2, w3, w4, w0);\n    rounds4!(abef, cdgh, k[11] + w1);\n    w2 = schedule!(w3, w4, w0, w1);\n    rounds4!(abef, cdgh, k[12] + w2);\n    w3 = schedule!(w4, w0, w1, w2);\n    rounds4!(abef, cdgh, k[13] + w3);\n    w4 = schedule!(w0, w1, w2, w3);\n    rounds4!(abef, cdgh, k[14] + w4);\n    w0 = schedule!(w1, w2, w3, w4);\n    rounds4!(abef, cdgh, k[15] + w0);\n\n    let u32x4(a, b, e, f) = abef;\n    let u32x4(c, d, g, h) = cdgh;\n\n    state[0] = state[0].wrapping_add(a);\n    state[1] = state[1].wrapping_add(b);\n    state[2] = state[2].wrapping_add(c);\n    state[3] = state[3].wrapping_add(d);\n    state[4] = state[4].wrapping_add(e);\n    state[5] = state[5].wrapping_add(f);\n    state[6] = state[6].wrapping_add(g);\n    state[7] = state[7].wrapping_add(h);\n}\n\n/// Process a block with the SHA-256 algorithm. (See more...)\n///\n/// Internally, this uses functions which resemble the new Intel SHA instruction\n/// sets, and so it's data locality properties may improve performance. However,\n/// to benefit the most from this implementation, replace these functions with\n/// x86 intrinsics to get a possible speed boost.\n///\n/// # Implementation\n///\n/// The `Sha256` algorithm is implemented with functions that resemble the new\n/// Intel SHA instruction set extensions. These intructions fall into two\n/// categories: message schedule calculation, and the message block 64-round\n/// digest calculation. The schedule-related instructions allow 4 rounds to be\n/// calculated as:\n///\n/// ```compile_fail\n/// use std::simd::u32x4;\n/// use self::crypto::sha2::{\n///     sha256msg1,\n///     sha256msg2,\n///     sha256load\n/// };\n///\n/// fn schedule4_data(work: &mut [u32x4], w: &[u32]) {\n///\n///     // this is to illustrate the data order\n///     work[0] = u32x4(w[3], w[2], w[1], w[0]);\n///     work[1] = u32x4(w[7], w[6], w[5], w[4]);\n///     work[2] = u32x4(w[11], w[10], w[9], w[8]);\n///     work[3] = u32x4(w[15], w[14], w[13], w[12]);\n/// }\n///\n/// fn schedule4_work(work: &mut [u32x4], t: usize) {\n///\n///     // this is the core expression\n///     work[t] = sha256msg2(sha256msg1(work[t - 4], work[t - 3]) +\n///                          sha256load(work[t - 2], work[t - 1]),\n///                          work[t - 1])\n/// }\n/// ```\n///\n/// instead of 4 rounds of:\n///\n/// ```compile_fail\n/// fn schedule_work(w: &mut [u32], t: usize) {\n///     w[t] = sigma1!(w[t - 2]) + w[t - 7] + sigma0!(w[t - 15]) + w[t - 16];\n/// }\n/// ```\n///\n/// and the digest-related instructions allow 4 rounds to be calculated as:\n///\n/// ```compile_fail\n/// use std::simd::u32x4;\n/// use self::crypto::sha2::{K32X4,\n///     sha256rnds2,\n///     sha256swap\n/// };\n///\n/// fn rounds4(state: &mut [u32; 8], work: &mut [u32x4], t: usize) {\n///     let [a, b, c, d, e, f, g, h]: [u32; 8] = *state;\n///\n///     // this is to illustrate the data order\n///     let mut abef = u32x4(a, b, e, f);\n///     let mut cdgh = u32x4(c, d, g, h);\n///     let temp = K32X4[t] + work[t];\n///\n///     // this is the core expression\n///     cdgh = sha256rnds2(cdgh, abef, temp);\n///     abef = sha256rnds2(abef, cdgh, sha256swap(temp));\n///\n///     *state = [abef.0, abef.1, cdgh.0, cdgh.1,\n///               abef.2, abef.3, cdgh.2, cdgh.3];\n/// }\n/// ```\n///\n/// instead of 4 rounds of:\n///\n/// ```compile_fail\n/// fn round(state: &mut [u32; 8], w: &mut [u32], t: usize) {\n///     let [a, b, c, mut d, e, f, g, mut h]: [u32; 8] = *state;\n///\n///     h += big_sigma1!(e) +   choose!(e, f, g) + K32[t] + w[t]; d += h;\n///     h += big_sigma0!(a) + majority!(a, b, c);\n///\n///     *state = [h, a, b, c, d, e, f, g];\n/// }\n/// ```\n///\n/// **NOTE**: It is important to note, however, that these instructions are not\n/// implemented by any CPU (at the time of this writing), and so they are\n/// emulated in this library until the instructions become more common, and gain\n///  support in LLVM (and GCC, etc.).\n#[inline]\npub fn compress256(state: &mut [u32; 8], blocks: &[&[u8]]) {\n    let mut block_u32 = [0u32; BLOCK_LEN];\n\n    for block in blocks.chunks(2) {\n        assert_eq!(block[0].len(), 32);\n        assert_eq!(block[1].len(), 32);\n        BE::read_u32_into(&block[0], &mut block_u32[..BLOCK_LEN / 2]);\n        BE::read_u32_into(&block[1], &mut block_u32[BLOCK_LEN / 2..]);\n\n        sha256_digest_block_u32(state, &block_u32);\n    }\n}\n"
  },
  {
    "path": "src/bin/bencher.rs",
    "content": "#[macro_use]\nextern crate serde_derive;\n#[macro_use]\nextern crate failure;\n#[macro_use]\nextern crate lazy_static;\nextern crate clap;\nextern crate glob;\nextern crate human_size;\nextern crate permutate;\nextern crate prettytable;\nextern crate serde;\nextern crate serde_json;\nextern crate toml;\n\nuse std::collections::HashMap;\nuse std::env;\nuse std::fmt;\nuse std::fs::{self, File};\nuse std::io::prelude::*;\nuse std::process::Command;\nuse std::string::ToString;\nuse std::time::{Duration, SystemTime};\n\nuse clap::{App, Arg};\nuse failure::Error;\nuse filecoin_proofs::error::ExpectWithBacktrace;\nuse glob::glob;\nuse human_size::{Byte, Kibibyte, SpecificSize};\nuse permutate::Permutator;\nuse prettytable::{format, Cell, Row, Table};\nuse serde::de::{self, Deserialize, Deserializer, Visitor};\nuse serde::ser::{Serialize, Serializer};\n\ntype Result<T> = ::std::result::Result<T, Error>;\n\n#[derive(Debug, Deserialize)]\nstruct Case {\n    command: Option<String>,\n    challenges: Vec<usize>,\n    size: Vec<Size>,\n    sloth: Vec<usize>,\n    m: Vec<usize>,\n    hasher: Option<Vec<String>>,\n}\n\n#[derive(Debug, Copy, Clone, PartialEq)]\nstruct Size(SpecificSize<Byte>);\n\nimpl Default for Size {\n    fn default() -> Self {\n        Size(SpecificSize::new(0, Byte).unwrap())\n    }\n}\n\nimpl ToString for Size {\n    fn to_string(&self) -> String {\n        // return as KiB as that is what the examples expect\n        let kb: SpecificSize<Kibibyte> = self.0.into();\n        kb.value().to_string()\n    }\n}\n\nimpl Serialize for Size {\n    fn serialize<S>(&self, serializer: S) -> ::std::result::Result<S::Ok, S::Error>\n    where\n        S: Serializer,\n    {\n        serializer.serialize_str(&self.0.to_string())\n    }\n}\n\nimpl<'de> Deserialize<'de> for Size {\n    fn deserialize<D>(deserializer: D) -> ::std::result::Result<Self, D::Error>\n    where\n        D: Deserializer<'de>,\n    {\n        struct SizeVisitor;\n\n        impl<'de> Visitor<'de> for SizeVisitor {\n            type Value = Size;\n\n            fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result {\n                f.write_str(\"user ID as a number or string\")\n            }\n\n            fn visit_u64<E>(self, size: u64) -> ::std::result::Result<Self::Value, E>\n            where\n                E: de::Error,\n            {\n                SpecificSize::new(size as f64, Byte)\n                    .map(Size)\n                    .map_err(de::Error::custom)\n            }\n\n            fn visit_str<E>(self, size: &str) -> ::std::result::Result<Self::Value, E>\n            where\n                E: de::Error,\n            {\n                size.parse().map(Size).map_err(de::Error::custom)\n            }\n        }\n\n        deserializer.deserialize_any(SizeVisitor)\n    }\n}\n\nimpl Case {\n    pub fn params(&self) -> Vec<Vec<String>> {\n        let mut res = Vec::with_capacity(4);\n\n        res.push(self.challenges.iter().map(ToString::to_string).collect());\n        res.push(self.size.iter().map(ToString::to_string).collect());\n        res.push(self.sloth.iter().map(ToString::to_string).collect());\n        res.push(self.m.iter().map(ToString::to_string).collect());\n        if let Some(ref hasher) = self.hasher {\n            res.push(hasher.clone());\n        }\n\n        res\n    }\n\n    pub fn get_param_name(&self, i: usize) -> Result<String> {\n        let params = self.get_param_names();\n        if i > params.len() {\n            return Err(format_err!(\"invalid param index {}\", i));\n        }\n\n        Ok(params[i].to_string())\n    }\n\n    pub fn get_param_names(&self) -> Vec<String> {\n        let mut res = vec![\n            \"challenges\".to_owned(),\n            \"size\".to_owned(),\n            \"sloth\".to_owned(),\n            \"m\".to_owned(),\n        ];\n\n        if self.hasher.is_some() {\n            res.push(\"hasher\".to_owned());\n        }\n\n        res\n    }\n}\n\n#[cfg(not(target_os = \"macos\"))]\nconst TIME_CMD: &str = \"/usr/bin/time\";\n\n#[cfg(target_os = \"macos\")]\nconst TIME_CMD: &str = \"gtime\";\n\n/// The directory in which we expect the compiled binaries to be in.\nconst BINARY_DIR: &str = \"target/release/examples\";\n\n/// The glob of which files to clear out before starting the run.\nconst CACHE_DIR: &str = \"/tmp/filecoin-proofs-cache-*\";\n\n/// The directory in which the benchmark results will be stored.\nconst RESULT_DIR: &str = \".bencher\";\n\nlazy_static! {\n    static ref PRELUDE: Vec<(&'static str, Vec<&'static str>)> =\n        vec![(\"cargo\", vec![\"build\", \"--all\", \"--examples\", \"--release\"]),];\n    static ref MARKDOWN_TABLE_FORMAT: format::TableFormat = format::FormatBuilder::new()\n        .column_separator('|')\n        .borders('|')\n        .separators(\n            &[format::LinePosition::Title],\n            format::LineSeparator::new('-', '|', '|', '|'),\n        )\n        .padding(1, 1)\n        .build();\n}\n\nfn combine<'a, T: ?Sized>(options: &'a [&'a [&'a T]]) -> Vec<Vec<&'a T>> {\n    Permutator::new(options).collect()\n}\n\nfn run(config_path: &str, print_table: bool) -> Result<()> {\n    println!(\"reading config \\\"{}\\\"...\", config_path);\n\n    let mut f = File::open(config_path)?;\n    let mut contents = String::new();\n    f.read_to_string(&mut contents)?;\n\n    let config: HashMap<String, Case> = toml::from_str(&contents)?;\n\n    println!(\"preparing...\");\n\n    // make sure we are cleaning up the cache\n    for file in glob(CACHE_DIR)? {\n        fs::remove_file(file?)?;\n    }\n\n    for (cmd, args) in &PRELUDE[..] {\n        let output = Command::new(cmd).args(args).output()?;\n        if !output.status.success() {\n            return Err(format_err!(\n                \"failed to execute '{} {:?}': {} stdout: {}, stdout: {}\",\n                cmd,\n                args,\n                output.status,\n                String::from_utf8_lossy(&output.stdout),\n                String::from_utf8_lossy(&output.stderr),\n            ));\n        }\n    }\n\n    for (name, example) in config.iter() {\n        let results = run_benchmark(name, example)?;\n        if print_table {\n            print_result_table(name, example, &results);\n        }\n    }\n\n    Ok(())\n}\n\nfn print_result_table(name: &str, example: &Case, results: &[BenchmarkResult]) {\n    let params = example.get_param_names();\n\n    let mut table = Table::new();\n    table.set_format(*MARKDOWN_TABLE_FORMAT);\n\n    let mut titles: Vec<&str> = vec![\n        \"name\",\n        \"size\",\n        \"proving\",\n        \"verifying\",\n        \"params gen\",\n        \"replication\",\n        \"max resident set size\",\n    ];\n\n    titles.extend(params.iter().map(|v| v.as_str()));\n\n    table.set_titles(Row::new(titles.iter().map(|v| Cell::new(v)).collect()));\n\n    for res in results {\n        let timing = res.time_res.max_resident_set_size.to_string();\n        let mut values: Vec<&str> = vec![\n            name,\n            &res.log_res.config[\"data_size\"],\n            &res.log_res.stats[\"avg_proving_time\"],\n            &res.log_res.stats[\"avg_verifying_time\"],\n            res.log_res\n                .stats\n                .get(\"params_generation_time\")\n                .map(|v| v.as_str())\n                .unwrap_or_else(|| \"\"),\n            res.log_res\n                .stats\n                .get(\"replication_time\")\n                .map(|v| v.as_str())\n                .unwrap_or_else(|| \"\"),\n            &timing,\n        ];\n        values.extend(res.combination.iter().map(|v| v.as_str()));\n\n        table.add_row(Row::new(values.into_iter().map(Cell::new).collect()));\n    }\n\n    println!(\"\\n\");\n    table.printstd();\n    println!(\"\\n\");\n}\n\n#[derive(Default, Debug, Serialize)]\nstruct TimeResult {\n    // Command being timed: \"/Users/dignifiedquire/work/filecoin/rust-proofs/target/release/examples/drgporep-vanilla --challenges 1 --size 1 --sloth 0 --m 6 --hasher sha256\"\n    command: String,\n    // User time (seconds): 118.33\n    user_time: f64,\n    // System time (seconds): 1.07\n    system_time: f64,\n    // Percent of CPU this job got: 959%\n    cpu: usize,\n    // Elapsed (wall clock) time (h:mm:ss or m:ss): 0:12.44\n    elapsed_time: Duration,\n    // Average shared text size (kbytes): 0\n    avg_shared_text_size: usize,\n    // Average unshared data size (kbytes): 0\n    avg_unshared_data_size: usize,\n    // Average stack size (kbytes): 0\n    avg_stack_size: usize,\n    // Average total size (kbytes): 0\n    avg_total_size: usize,\n    // Maximum resident set size (kbytes): 117604\n    max_resident_set_size: usize,\n    // Average resident set size (kbytes): 0\n    avg_resident_set_size: usize,\n    // Major (requiring I/O) page faults: 0\n    major_page_faults: usize,\n    // Minor (reclaiming a frame) page faults: 69788\n    minor_page_faults: usize,\n    // Voluntary context switches: 7\n    voluntary_context_switches: usize,\n    // Involuntary context switches: 70063\n    involuntary_context_switches: usize,\n    // Swaps: 0\n    swaps: usize,\n    // File system inputs: 0\n    file_system_inputs: usize,\n    // File system outputs: 0\n    file_system_outputs: usize,\n    // Socket messages sent: 0\n    socket_messages_sent: usize,\n    // Socket messages received: 0\n    socket_messages_received: usize,\n    // Signals delivered: 0\n    signals_delivered: usize,\n    // Page size (bytes): 4096\n    page_size: usize,\n    // Exit status: 0\n    exit_status: usize,\n}\n\nimpl TimeResult {\n    fn from_str(raw: &str) -> Result<Self> {\n        let mut res = TimeResult::default();\n\n        for line in raw.trim().split('\\n') {\n            let line = line.trim();\n            let kv = line.split(\": \").collect::<Vec<&str>>();\n            let key = kv[0].trim();\n            let value = kv[1].trim();\n\n            match key {\n                \"Command being timed\" => {\n                    res.command = value.trim_matches('\"').to_string();\n                }\n                \"User time (seconds)\" => {\n                    res.user_time = value.parse()?;\n                }\n                \"System time (seconds)\" => {\n                    res.system_time = value.parse()?;\n                }\n                \"Percent of CPU this job got\" => {\n                    res.cpu = value.replace('%', \"\").parse()?;\n                }\n                \"Elapsed (wall clock) time (h:mm:ss or m:ss)\" => {\n                    let parts = value.split(':').collect::<Vec<&str>>();\n                    match parts.len() {\n                        2 => {\n                            let minutes = Duration::from_secs(parts[0].parse::<u64>()? * 60);\n                            let seconds =\n                                Duration::from_millis((parts[1].parse::<f64>()? * 1000.0) as u64);\n                            res.elapsed_time = minutes + seconds;\n                        }\n                        3 => {\n                            let hours = Duration::from_secs(parts[0].parse::<u64>()? * 60 * 60);\n                            let minutes = Duration::from_secs(parts[1].parse::<u64>()? * 60);\n                            let seconds =\n                                Duration::from_millis((parts[2].parse::<f64>()? * 1000.0) as u64);\n                            res.elapsed_time = hours + minutes + seconds;\n                        }\n                        _ => return Err(format_err!(\"invalid time format: '{}'\", value)),\n                    }\n                }\n                \"Average shared text size (kbytes)\" => {\n                    res.avg_shared_text_size = value.parse()?;\n                }\n                \"Average unshared data size (kbytes)\" => {\n                    res.avg_unshared_data_size = value.parse()?;\n                }\n                \"Average stack size (kbytes)\" => {\n                    res.avg_stack_size = value.parse()?;\n                }\n                \"Average total size (kbytes)\" => {\n                    res.avg_total_size = value.parse()?;\n                }\n                \"Maximum resident set size (kbytes)\" => {\n                    res.max_resident_set_size = value.parse()?;\n                }\n                \"Average resident set size (kbytes)\" => {\n                    res.avg_resident_set_size = value.parse()?;\n                }\n                \"Major (requiring I/O) page faults\" => {\n                    res.major_page_faults = value.parse()?;\n                }\n                \"Minor (reclaiming a frame) page faults\" => {\n                    res.minor_page_faults = value.parse()?;\n                }\n                \"Voluntary context switches\" => {\n                    res.voluntary_context_switches = value.parse()?;\n                }\n                \"Involuntary context switches\" => {\n                    res.involuntary_context_switches = value.parse()?;\n                }\n                \"Swaps\" => {\n                    res.swaps = value.parse()?;\n                }\n                \"File system inputs\" => {\n                    res.file_system_inputs = value.parse()?;\n                }\n                \"File system outputs\" => {\n                    res.file_system_outputs = value.parse()?;\n                }\n                \"Socket messages sent\" => {\n                    res.socket_messages_sent = value.parse()?;\n                }\n                \"Socket messages received\" => {\n                    res.socket_messages_received = value.parse()?;\n                }\n                \"Signals delivered\" => {\n                    res.signals_delivered = value.parse()?;\n                }\n                \"Page size (bytes)\" => {\n                    res.page_size = value.parse()?;\n                }\n                \"Exit status\" => {\n                    res.exit_status = value.parse()?;\n                }\n                _ => {\n                    return Err(format_err!(\"unknown key: {}\", key));\n                }\n            }\n        }\n\n        Ok(res)\n    }\n}\n\n#[derive(Default, Debug, Serialize)]\nstruct BenchmarkResult {\n    combination: Vec<String>,\n    stdout: String,\n    stderr: String,\n    time_res: TimeResult,\n    log_res: LogResult,\n}\n\nimpl BenchmarkResult {\n    pub fn new(combination: &[&str], stdout: &str, stderr: &str) -> Result<Self> {\n        // removes the annoying progress bar\n        let stderr = \"Command being timed\".to_owned()\n            + stderr.split(\"Command being timed\").collect::<Vec<&str>>()[1];\n\n        let time_res = TimeResult::from_str(&stderr)?;\n        let log_res = LogResult::from_str(&stdout)?;\n\n        Ok(BenchmarkResult {\n            combination: combination.iter().map(|v| v.to_string()).collect(),\n            stdout: stdout.to_owned(),\n            stderr,\n            time_res,\n            log_res,\n        })\n    }\n}\n\n#[derive(Default, Debug, Serialize)]\nstruct LogResult {\n    config: HashMap<String, String>,\n    stats: HashMap<String, String>,\n}\n\nimpl LogResult {\n    fn from_str(raw: &str) -> Result<Self> {\n        let lines = raw.trim().split('\\n').map(|l| {\n            let parsed: serde_json::Result<HashMap<String, String>> = serde_json::from_str(l);\n            let parsed = parsed.expects(\"The bencher requires JSON log-output.\");\n\n            let raw = parsed.get(\"msg\").unwrap();\n            let system = parsed\n                .get(\"target\")\n                .map(|x| x.clone())\n                .unwrap_or(String::from(\"\"));\n            let kv = raw.trim().split(\": \").collect::<Vec<&str>>();\n            let key = kv[0].trim();\n            let value = if kv.len() > 1 { kv[1].trim() } else { \"\" };\n\n            (String::from(system), String::from(key), String::from(value))\n        });\n\n        let mut config = HashMap::new();\n        let mut stats = HashMap::new();\n\n        for (system, key, value) in lines {\n            match system.as_ref() {\n                \"config\" => {\n                    config.insert(key.to_owned(), value.to_owned());\n                }\n                \"stats\" => {\n                    stats.insert(key.to_owned(), value.to_owned());\n                }\n                // ignoring unknown subsystems for now\n                _ => {}\n            }\n        }\n\n        Ok(LogResult { config, stats })\n    }\n}\n\nfn run_benchmark(name: &str, config: &Case) -> Result<Vec<BenchmarkResult>> {\n    println!(\"benchmarking example: {}\", name);\n\n    // create dir to store results\n    let result_dir = env::current_dir()?.join(RESULT_DIR).join(name);\n    fs::create_dir_all(&result_dir)?;\n\n    // the dance below is to avoid copies\n    let params = config.params();\n    let tmp_1: Vec<Vec<&str>> = params\n        .iter()\n        .map(|list| list.iter().map(AsRef::as_ref).collect::<Vec<&str>>())\n        .collect();\n    let tmp_2: Vec<&[&str]> = tmp_1.iter().map(AsRef::as_ref).collect();\n\n    let combinations = combine(&tmp_2[..]);\n\n    let binary_path = fs::canonicalize(BINARY_DIR)?.join(name);\n\n    let mut results = Vec::with_capacity(combinations.len());\n\n    for combination in &combinations {\n        let mut cmd = Command::new(TIME_CMD);\n        cmd.arg(\"-v\").arg(&binary_path);\n\n        let mut print_comb = \"\\t\".to_owned();\n        for (i, param) in combination.iter().enumerate() {\n            let n = config.get_param_name(i)?;\n            cmd.arg(format!(\"--{}\", n)).arg(param);\n            print_comb += &format!(\"{}: {}\\t\", n, param);\n        }\n        println!(\"{}\", print_comb);\n\n        if let Some(ref command) = config.command {\n            cmd.arg(command);\n        }\n\n        let output = cmd.output()?;\n        let res = BenchmarkResult::new(\n            combination,\n            &String::from_utf8_lossy(&output.stdout),\n            &String::from_utf8_lossy(&output.stderr),\n        )?;\n\n        let mut data = serde_json::to_string(&res)?;\n        data.push('\\n');\n        results.push(res);\n\n        // store result on disk\n        let timestamp = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?;\n        let filename = result_dir.join(format!(\n            \"{}-{}.json\",\n            combination.join(\"-\"),\n            timestamp.as_secs(),\n        ));\n\n        fs::write(filename, data)?;\n    }\n\n    Ok(results)\n}\n\nfn main() {\n    // the bencher output-parsing code requires JSON, and an environment\n    // variable is the mechanism for enabling JSON-log support\n    std::env::set_var(\"RUST_PROOFS_LOG_JSON\", \"true\");\n\n    let matches = App::new(\"Rust Proofs Bencher\")\n        .version(\"1.0\")\n        .about(\"Benchmark all the things\")\n        .arg(\n            Arg::with_name(\"config\")\n                .short(\"c\")\n                .long(\"config\")\n                .value_name(\"FILE\")\n                .default_value(\"bench.config.toml\")\n                .help(\"Sets a custom config file\")\n                .takes_value(true),\n        )\n        .arg(\n            Arg::with_name(\"table\")\n                .long(\"table\")\n                .takes_value(false)\n                .help(\"Print a summary as markdown table\"),\n        )\n        .get_matches();\n\n    let config = matches.value_of(\"config\").unwrap();\n    let print_table = matches.is_present(\"table\");\n\n    ::std::process::exit(match run(config, print_table) {\n        Ok(_) => 0,\n        Err(err) => {\n            eprintln!(\"error: {:?}\", err);\n            1\n        }\n    });\n}\n\n#[test]\nfn test_combine() {\n    let input = vec![vec![\"1\", \"2\", \"3\"], vec![\"4\", \"5\"]];\n    let refs: Vec<&[&str]> = input.iter().map(AsRef::as_ref).collect();\n    assert_eq!(\n        combine(&refs[..]),\n        vec![\n            vec![\"1\", \"4\"],\n            vec![\"1\", \"5\"],\n            vec![\"2\", \"4\"],\n            vec![\"2\", \"5\"],\n            vec![\"3\", \"4\"],\n            vec![\"3\", \"5\"]\n        ],\n    );\n}\n\n#[test]\nfn test_time_result_from_str() {\n    let res = TimeResult::from_str(\"\n\tCommand being timed: \\\"/Users/dignifiedquire/work/filecoin/rust-proofs/target/release/examples/drgporep-vanilla --challenges 1 --size 1 --sloth 0 --m 6 --hasher sha256\\\"\n\tUser time (seconds): 0.01\n\tSystem time (seconds): 0.01\n\tPercent of CPU this job got: 184%\n\tElapsed (wall clock) time (h:mm:ss or m:ss): 0:00.01\n\tAverage shared text size (kbytes): 0\n\tAverage unshared data size (kbytes): 0\n\tAverage stack size (kbytes): 0\n\tAverage total size (kbytes): 0\n\tMaximum resident set size (kbytes): 6932\n\tAverage resident set size (kbytes): 0\n\tMajor (requiring I/O) page faults: 0\n\tMinor (reclaiming a frame) page faults: 1932\n\tVoluntary context switches: 0\n\tInvoluntary context switches: 889\n\tSwaps: 0\n\tFile system inputs: 0\n\tFile system outputs: 0\n\tSocket messages sent: 0\n\tSocket messages received: 0\n\tSignals delivered: 0\n\tPage size (bytes): 4096\n\tExit status: 0\n\").unwrap();\n\n    assert_eq!(res.command, \"/Users/dignifiedquire/work/filecoin/rust-proofs/target/release/examples/drgporep-vanilla --challenges 1 --size 1 --sloth 0 --m 6 --hasher sha256\");\n    assert_eq!(res.user_time, 0.01);\n    assert_eq!(res.swaps, 0);\n    assert_eq!(res.involuntary_context_switches, 889);\n    assert_eq!(res.cpu, 184);\n    assert_eq!(res.elapsed_time, Duration::from_millis(10));\n}\n\n#[test]\nfn test_log_results_str_json() {\n    let res = LogResult::from_str(\"\n{\\\"msg\\\":\\\"constraint system: Groth\\\",\\\"level\\\":\\\"INFO\\\",\\\"ts\\\":\\\"2018-12-14T13:57:19.315918-08:00\\\",\\\"place\\\":\\\"storage-proofs/src/example_helper.rs:86 storage_proofs::example_helper\\\",\\\"root\\\":\\\"storage-proofs\\\",\\\"target\\\":\\\"config\\\"}\n{\\\"msg\\\":\\\"data_size:  1 kB\\\",\\\"level\\\":\\\"INFO\\\",\\\"ts\\\":\\\"2018-12-14T13:57:19.316948-08:00\\\",\\\"place\\\":\\\"storage-proofs/src/example_helper.rs:87 storage_proofs::example_helper\\\",\\\"root\\\":\\\"storage-proofs\\\",\\\"target\\\":\\\"config\\\"}\n{\\\"msg\\\":\\\"challenge_count: 1\\\",\\\"level\\\":\\\"INFO\\\",\\\"ts\\\":\\\"2018-12-14T13:57:19.316961-08:00\\\",\\\"place\\\":\\\"storage-proofs/src/example_helper.rs:88 storage_proofs::example_helper\\\",\\\"root\\\":\\\"storage-proofs\\\",\\\"target\\\":\\\"config\\\"}\n{\\\"msg\\\":\\\"m: 6\\\",\\\"level\\\":\\\"INFO\\\",\\\"ts\\\":\\\"2018-12-14T13:57:19.316970-08:00\\\",\\\"place\\\":\\\"storage-proofs/src/example_helper.rs:89 storage_proofs::example_helper\\\",\\\"root\\\":\\\"storage-proofs\\\",\\\"target\\\":\\\"config\\\"}\n{\\\"msg\\\":\\\"sloth: 0\\\",\\\"level\\\":\\\"INFO\\\",\\\"ts\\\":\\\"2018-12-14T13:57:19.316978-08:00\\\",\\\"place\\\":\\\"storage-proofs/src/example_helper.rs:90 storage_proofs::example_helper\\\",\\\"root\\\":\\\"storage-proofs\\\",\\\"target\\\":\\\"config\\\"}\n{\\\"msg\\\":\\\"tree_depth: 5\\\",\\\"level\\\":\\\"INFO\\\",\\\"ts\\\":\\\"2018-12-14T13:57:19.317011-08:00\\\",\\\"place\\\":\\\"storage-proofs/src/example_helper.rs:91 storage_proofs::example_helper\\\",\\\"root\\\":\\\"storage-proofs\\\",\\\"target\\\":\\\"config\\\"}\n{\\\"msg\\\":\\\"reading groth params from cache: \\\\\\\"/tmp/filecoin-proofs-cache-multi-challenge merklepor-1024-1-6-0\\\\\\\"\\\",\\\"level\\\":\\\"INFO\\\",\\\"ts\\\":\\\"2018-12-14T13:57:19.317046-08:00\\\",\\\"place\\\":\\\"storage-proofs/src/example_helper.rs:102 storage_proofs::example_helper\\\",\\\"root\\\":\\\"storage-proofs\\\",\\\"target\\\":\\\"params\\\"}\n{\\\"msg\\\":\\\"generating verification key\\\",\\\"level\\\":\\\"INFO\\\",\\\"ts\\\":\\\"2018-12-14T13:57:19.388725-08:00\\\",\\\"place\\\":\\\"storage-proofs/src/example_helper.rs:123 storage_proofs::example_helper\\\",\\\"root\\\":\\\"storage-proofs\\\",\\\"target\\\":\\\"params\\\"}\n{\\\"msg\\\":\\\"avg_proving_time: 0.213533235 seconds\\\",\\\"level\\\":\\\"INFO\\\",\\\"ts\\\":\\\"2018-12-14T13:57:20.480250-08:00\\\",\\\"place\\\":\\\"storage-proofs/src/example_helper.rs:180 storage_proofs::example_helper\\\",\\\"root\\\":\\\"storage-proofs\\\",\\\"target\\\":\\\"stats\\\"}\n{\\\"msg\\\":\\\"avg_verifying_time: 0.003935171 seconds\\\",\\\"level\\\":\\\"INFO\\\",\\\"ts\\\":\\\"2018-12-14T13:57:20.480273-08:00\\\",\\\"place\\\":\\\"storage-proofs/src/example_helper.rs:181 storage_proofs::example_helper\\\",\\\"root\\\":\\\"storage-proofs\\\",\\\"target\\\":\\\"stats\\\"}\n{\\\"msg\\\":\\\"params_generation_time: 76.536768ms\\\",\\\"level\\\":\\\"INFO\\\",\\\"ts\\\":\\\"2018-12-14T13:57:20.480283-08:00\\\",\\\"place\\\":\\\"storage-proofs/src/example_helper.rs:182 storage_proofs::example_helper\\\",\\\"root\\\":\\\"storage-proofs\\\",\\\"target\\\":\\\"stats\\\"}\n\n\").unwrap();\n\n    assert_eq!(res.config.get(\"constraint system\").unwrap(), \"Groth\");\n    assert_eq!(res.config.get(\"data_size\").unwrap(), \"1 kB\",);\n    assert_eq!(\n        res.stats.get(\"avg_proving_time\").unwrap(),\n        \"0.213533235 seconds\"\n    );\n}\n"
  },
  {
    "path": "src/lib.rs",
    "content": "// Hello, I am a a toplevel placeholder. Do not put any code here.\n"
  },
  {
    "path": "storage-backend/.gitignore",
    "content": "/target\n**/*.rs.bk\nCargo.lock\n.criterion\n**/libproofs.h\nheaptrack*"
  },
  {
    "path": "storage-backend/Cargo.toml",
    "content": "[package]\nname = \"storage-backend\"\nversion = \"0.1.0\"\nauthors = [\"dignifiedquire <dignifiedquire@gmail.com>\"]\nlicense = \"MIT OR Apache-2.0\"\n\nedition = \"2018\"\n\n[dependencies]\n"
  },
  {
    "path": "storage-backend/LICENSE-APACHE",
    "content": "                              Apache License\n                        Version 2.0, January 2004\n                     http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n   \"License\" shall mean the terms and conditions for use, reproduction,\n   and distribution as defined by Sections 1 through 9 of this document.\n\n   \"Licensor\" shall mean the copyright owner or entity authorized by\n   the copyright owner that is granting the License.\n\n   \"Legal Entity\" shall mean the union of the acting entity and all\n   other entities that control, are controlled by, or are under common\n   control with that entity. For the purposes of this definition,\n   \"control\" means (i) the power, direct or indirect, to cause the\n   direction or management of such entity, whether by contract or\n   otherwise, or (ii) ownership of fifty percent (50%) or more of the\n   outstanding shares, or (iii) beneficial ownership of such entity.\n\n   \"You\" (or \"Your\") shall mean an individual or Legal Entity\n   exercising permissions granted by this License.\n\n   \"Source\" form shall mean the preferred form for making modifications,\n   including but not limited to software source code, documentation\n   source, and configuration files.\n\n   \"Object\" form shall mean any form resulting from mechanical\n   transformation or translation of a Source form, including but\n   not limited to compiled object code, generated documentation,\n   and conversions to other media types.\n\n   \"Work\" shall mean the work of authorship, whether in Source or\n   Object form, made available under the License, as indicated by a\n   copyright notice that is included in or attached to the work\n   (an example is provided in the Appendix below).\n\n   \"Derivative Works\" shall mean any work, whether in Source or Object\n   form, that is based on (or derived from) the Work and for which the\n   editorial revisions, annotations, elaborations, or other modifications\n   represent, as a whole, an original work of authorship. For the purposes\n   of this License, Derivative Works shall not include works that remain\n   separable from, or merely link (or bind by name) to the interfaces of,\n   the Work and Derivative Works thereof.\n\n   \"Contribution\" shall mean any work of authorship, including\n   the original version of the Work and any modifications or additions\n   to that Work or Derivative Works thereof, that is intentionally\n   submitted to Licensor for inclusion in the Work by the copyright owner\n   or by an individual or Legal Entity authorized to submit on behalf of\n   the copyright owner. For the purposes of this definition, \"submitted\"\n   means any form of electronic, verbal, or written communication sent\n   to the Licensor or its representatives, including but not limited to\n   communication on electronic mailing lists, source code control systems,\n   and issue tracking systems that are managed by, or on behalf of, the\n   Licensor for the purpose of discussing and improving the Work, but\n   excluding communication that is conspicuously marked or otherwise\n   designated in writing by the copyright owner as \"Not a Contribution.\"\n\n   \"Contributor\" shall mean Licensor and any individual or Legal Entity\n   on behalf of whom a Contribution has been received by Licensor and\n   subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   copyright license to reproduce, prepare Derivative Works of,\n   publicly display, publicly perform, sublicense, and distribute the\n   Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   (except as stated in this section) patent license to make, have made,\n   use, offer to sell, sell, import, and otherwise transfer the Work,\n   where such license applies only to those patent claims licensable\n   by such Contributor that are necessarily infringed by their\n   Contribution(s) alone or by combination of their Contribution(s)\n   with the Work to which such Contribution(s) was submitted. If You\n   institute patent litigation against any entity (including a\n   cross-claim or counterclaim in a lawsuit) alleging that the Work\n   or a Contribution incorporated within the Work constitutes direct\n   or contributory patent infringement, then any patent licenses\n   granted to You under this License for that Work shall terminate\n   as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n   Work or Derivative Works thereof in any medium, with or without\n   modifications, and in Source or Object form, provided that You\n   meet the following conditions:\n\n   (a) You must give any other recipients of the Work or\n       Derivative Works a copy of this License; and\n\n   (b) You must cause any modified files to carry prominent notices\n       stating that You changed the files; and\n\n   (c) You must retain, in the Source form of any Derivative Works\n       that You distribute, all copyright, patent, trademark, and\n       attribution notices from the Source form of the Work,\n       excluding those notices that do not pertain to any part of\n       the Derivative Works; and\n\n   (d) If the Work includes a \"NOTICE\" text file as part of its\n       distribution, then any Derivative Works that You distribute must\n       include a readable copy of the attribution notices contained\n       within such NOTICE file, excluding those notices that do not\n       pertain to any part of the Derivative Works, in at least one\n       of the following places: within a NOTICE text file distributed\n       as part of the Derivative Works; within the Source form or\n       documentation, if provided along with the Derivative Works; or,\n       within a display generated by the Derivative Works, if and\n       wherever such third-party notices normally appear. The contents\n       of the NOTICE file are for informational purposes only and\n       do not modify the License. You may add Your own attribution\n       notices within Derivative Works that You distribute, alongside\n       or as an addendum to the NOTICE text from the Work, provided\n       that such additional attribution notices cannot be construed\n       as modifying the License.\n\n   You may add Your own copyright statement to Your modifications and\n   may provide additional or different license terms and conditions\n   for use, reproduction, or distribution of Your modifications, or\n   for any such Derivative Works as a whole, provided Your use,\n   reproduction, and distribution of the Work otherwise complies with\n   the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n   any Contribution intentionally submitted for inclusion in the Work\n   by You to the Licensor shall be under the terms and conditions of\n   this License, without any additional terms or conditions.\n   Notwithstanding the above, nothing herein shall supersede or modify\n   the terms of any separate license agreement you may have executed\n   with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n   names, trademarks, service marks, or product names of the Licensor,\n   except as required for reasonable and customary use in describing the\n   origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n   agreed to in writing, Licensor provides the Work (and each\n   Contributor provides its Contributions) on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n   implied, including, without limitation, any warranties or conditions\n   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n   PARTICULAR PURPOSE. You are solely responsible for determining the\n   appropriateness of using or redistributing the Work and assume any\n   risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n   whether in tort (including negligence), contract, or otherwise,\n   unless required by applicable law (such as deliberate and grossly\n   negligent acts) or agreed to in writing, shall any Contributor be\n   liable to You for damages, including any direct, indirect, special,\n   incidental, or consequential damages of any character arising as a\n   result of this License or out of the use or inability to use the\n   Work (including but not limited to damages for loss of goodwill,\n   work stoppage, computer failure or malfunction, or any and all\n   other commercial damages or losses), even if such Contributor\n   has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n   the Work or Derivative Works thereof, You may choose to offer,\n   and charge a fee for, acceptance of support, warranty, indemnity,\n   or other liability obligations and/or rights consistent with this\n   License. However, in accepting such obligations, You may act only\n   on Your own behalf and on Your sole responsibility, not on behalf\n   of any other Contributor, and only if You agree to indemnify,\n   defend, and hold each Contributor harmless for any liability\n   incurred by, or claims asserted against, such Contributor by reason\n   of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n   To apply the Apache License to your work, attach the following\n   boilerplate notice, with the fields enclosed by brackets \"[]\"\n   replaced with your own identifying information. (Don't include\n   the brackets!)  The text should be enclosed in the appropriate\n   comment syntax for the file format. We also recommend that a\n   file or class name and description of purpose be included on the\n   same \"printed page\" as the copyright notice for easier\n   identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License."
  },
  {
    "path": "storage-backend/LICENSE-MIT",
    "content": "Permission is hereby granted, free of charge, to any\nperson obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the\nSoftware without restriction, including without\nlimitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice\nshall be included in all copies or substantial portions\nof the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\nANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\nSHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\nIN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE."
  },
  {
    "path": "storage-backend/README.md",
    "content": "# Storage Backend\n\nThis crate contains various trait based abstractions for storage. These are used in\n`sector-base` and `storage-proofs`.\n\n## License\n\nMIT or Apache 2.0\n"
  },
  {
    "path": "storage-backend/src/lib.rs",
    "content": "#![deny(clippy::all, clippy::perf, clippy::correctness)]\n#![allow(clippy::unreadable_literal)]\n#![warn(clippy::type_complexity, clippy::too_many_arguments)]\n"
  },
  {
    "path": "storage-proofs/.gitignore",
    "content": "/target\n**/*.rs.bk\nCargo.lock\n.criterion\n**/libproofs.h\nheaptrack*"
  },
  {
    "path": "storage-proofs/Cargo.toml",
    "content": "[package]\nname = \"storage-proofs\"\nversion = \"0.1.0\"\nauthors = [\"dignifiedquire <dignifiedquire@gmail.com>\"]\nlicense = \"MIT OR Apache-2.0\"\n\nedition = \"2018\"\n\n[dependencies]\nlogging-toolkit = { path = \"../logging-toolkit\" }\nbitvec = \"0.5\"\nsapling-crypto = { git = \"https://github.com/zcash-hackworks/sapling-crypto\", branch = \"master\" }\nrand = \"0.4\"\nlibc = \"0.2\"\nmerkle_light = { git = \"https://github.com/filecoin-project/merkle_light\", branch = \"master\" }\nfailure = \"0.1\"\nbellman = \"0.1\"\nbyteorder = \"1\"\ncrossbeam-utils = \"0.6\"\nitertools = \"0.7.3\"\nlazy_static = \"1.2\"\nmemmap = \"0.6\"\nnum-bigint = \"0.2\"\nnum-traits = \"0.2\"\nclap = \"2\"\ncolored = \"1.6\"\naes = \"0.1\"\nblock-modes = \"0.1\"\nsha2 = \"0.8\"\nblake2 = \"0.8\"\npbr = \"1.0\"\ntempfile = \"3\"\nfs2 = \"0.4\"\nrayon = \"1.0.0\"\nslog = { version = \"2.4.1\", features = [\"max_level_trace\", \"release_max_level_trace\"] }\nserde = \"1.0\"\nserde_derive = \"1.0\"\nbase64 = \"0.10.0\"\n\n[dependencies.pairing]\nversion = \"0.14.2\"\nfeatures = [\"expose-arith\"]\n\n[features]\ndefault = [\"u128-support\"]\nu128-support = [\"pairing/u128-support\"]\nsimd = [\"blake2/simd_opt\"]\nasm = [\"sha2/sha2-asm\", \"blake2/simd_asm\"]\n\n[dev-dependencies]\nproptest = \"0.7\"\ncriterion = \"0.2\"\nsector-base = { path = \"../sector-base\" }\nserde_json = \"1.0\"\n\n[[bench]]\nname = \"pedersen\"\nharness = false\n\n[[bench]]\nname = \"sha256\"\nharness = false\n\n[[bench]]\nname = \"blake2s\"\nharness = false\n\n[[bench]]\nname = \"drgraph\"\nharness = false\n\n[[bench]]\nname = \"preprocessing\"\nharness = false\n\n[[bench]]\nname = \"sloth\"\nharness = false\n"
  },
  {
    "path": "storage-proofs/LICENSE-APACHE",
    "content": "                              Apache License\n                        Version 2.0, January 2004\n                     http://www.apache.org/licenses/\n\nTERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n1. Definitions.\n\n   \"License\" shall mean the terms and conditions for use, reproduction,\n   and distribution as defined by Sections 1 through 9 of this document.\n\n   \"Licensor\" shall mean the copyright owner or entity authorized by\n   the copyright owner that is granting the License.\n\n   \"Legal Entity\" shall mean the union of the acting entity and all\n   other entities that control, are controlled by, or are under common\n   control with that entity. For the purposes of this definition,\n   \"control\" means (i) the power, direct or indirect, to cause the\n   direction or management of such entity, whether by contract or\n   otherwise, or (ii) ownership of fifty percent (50%) or more of the\n   outstanding shares, or (iii) beneficial ownership of such entity.\n\n   \"You\" (or \"Your\") shall mean an individual or Legal Entity\n   exercising permissions granted by this License.\n\n   \"Source\" form shall mean the preferred form for making modifications,\n   including but not limited to software source code, documentation\n   source, and configuration files.\n\n   \"Object\" form shall mean any form resulting from mechanical\n   transformation or translation of a Source form, including but\n   not limited to compiled object code, generated documentation,\n   and conversions to other media types.\n\n   \"Work\" shall mean the work of authorship, whether in Source or\n   Object form, made available under the License, as indicated by a\n   copyright notice that is included in or attached to the work\n   (an example is provided in the Appendix below).\n\n   \"Derivative Works\" shall mean any work, whether in Source or Object\n   form, that is based on (or derived from) the Work and for which the\n   editorial revisions, annotations, elaborations, or other modifications\n   represent, as a whole, an original work of authorship. For the purposes\n   of this License, Derivative Works shall not include works that remain\n   separable from, or merely link (or bind by name) to the interfaces of,\n   the Work and Derivative Works thereof.\n\n   \"Contribution\" shall mean any work of authorship, including\n   the original version of the Work and any modifications or additions\n   to that Work or Derivative Works thereof, that is intentionally\n   submitted to Licensor for inclusion in the Work by the copyright owner\n   or by an individual or Legal Entity authorized to submit on behalf of\n   the copyright owner. For the purposes of this definition, \"submitted\"\n   means any form of electronic, verbal, or written communication sent\n   to the Licensor or its representatives, including but not limited to\n   communication on electronic mailing lists, source code control systems,\n   and issue tracking systems that are managed by, or on behalf of, the\n   Licensor for the purpose of discussing and improving the Work, but\n   excluding communication that is conspicuously marked or otherwise\n   designated in writing by the copyright owner as \"Not a Contribution.\"\n\n   \"Contributor\" shall mean Licensor and any individual or Legal Entity\n   on behalf of whom a Contribution has been received by Licensor and\n   subsequently incorporated within the Work.\n\n2. Grant of Copyright License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   copyright license to reproduce, prepare Derivative Works of,\n   publicly display, publicly perform, sublicense, and distribute the\n   Work and such Derivative Works in Source or Object form.\n\n3. Grant of Patent License. Subject to the terms and conditions of\n   this License, each Contributor hereby grants to You a perpetual,\n   worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n   (except as stated in this section) patent license to make, have made,\n   use, offer to sell, sell, import, and otherwise transfer the Work,\n   where such license applies only to those patent claims licensable\n   by such Contributor that are necessarily infringed by their\n   Contribution(s) alone or by combination of their Contribution(s)\n   with the Work to which such Contribution(s) was submitted. If You\n   institute patent litigation against any entity (including a\n   cross-claim or counterclaim in a lawsuit) alleging that the Work\n   or a Contribution incorporated within the Work constitutes direct\n   or contributory patent infringement, then any patent licenses\n   granted to You under this License for that Work shall terminate\n   as of the date such litigation is filed.\n\n4. Redistribution. You may reproduce and distribute copies of the\n   Work or Derivative Works thereof in any medium, with or without\n   modifications, and in Source or Object form, provided that You\n   meet the following conditions:\n\n   (a) You must give any other recipients of the Work or\n       Derivative Works a copy of this License; and\n\n   (b) You must cause any modified files to carry prominent notices\n       stating that You changed the files; and\n\n   (c) You must retain, in the Source form of any Derivative Works\n       that You distribute, all copyright, patent, trademark, and\n       attribution notices from the Source form of the Work,\n       excluding those notices that do not pertain to any part of\n       the Derivative Works; and\n\n   (d) If the Work includes a \"NOTICE\" text file as part of its\n       distribution, then any Derivative Works that You distribute must\n       include a readable copy of the attribution notices contained\n       within such NOTICE file, excluding those notices that do not\n       pertain to any part of the Derivative Works, in at least one\n       of the following places: within a NOTICE text file distributed\n       as part of the Derivative Works; within the Source form or\n       documentation, if provided along with the Derivative Works; or,\n       within a display generated by the Derivative Works, if and\n       wherever such third-party notices normally appear. The contents\n       of the NOTICE file are for informational purposes only and\n       do not modify the License. You may add Your own attribution\n       notices within Derivative Works that You distribute, alongside\n       or as an addendum to the NOTICE text from the Work, provided\n       that such additional attribution notices cannot be construed\n       as modifying the License.\n\n   You may add Your own copyright statement to Your modifications and\n   may provide additional or different license terms and conditions\n   for use, reproduction, or distribution of Your modifications, or\n   for any such Derivative Works as a whole, provided Your use,\n   reproduction, and distribution of the Work otherwise complies with\n   the conditions stated in this License.\n\n5. Submission of Contributions. Unless You explicitly state otherwise,\n   any Contribution intentionally submitted for inclusion in the Work\n   by You to the Licensor shall be under the terms and conditions of\n   this License, without any additional terms or conditions.\n   Notwithstanding the above, nothing herein shall supersede or modify\n   the terms of any separate license agreement you may have executed\n   with Licensor regarding such Contributions.\n\n6. Trademarks. This License does not grant permission to use the trade\n   names, trademarks, service marks, or product names of the Licensor,\n   except as required for reasonable and customary use in describing the\n   origin of the Work and reproducing the content of the NOTICE file.\n\n7. Disclaimer of Warranty. Unless required by applicable law or\n   agreed to in writing, Licensor provides the Work (and each\n   Contributor provides its Contributions) on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n   implied, including, without limitation, any warranties or conditions\n   of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n   PARTICULAR PURPOSE. You are solely responsible for determining the\n   appropriateness of using or redistributing the Work and assume any\n   risks associated with Your exercise of permissions under this License.\n\n8. Limitation of Liability. In no event and under no legal theory,\n   whether in tort (including negligence), contract, or otherwise,\n   unless required by applicable law (such as deliberate and grossly\n   negligent acts) or agreed to in writing, shall any Contributor be\n   liable to You for damages, including any direct, indirect, special,\n   incidental, or consequential damages of any character arising as a\n   result of this License or out of the use or inability to use the\n   Work (including but not limited to damages for loss of goodwill,\n   work stoppage, computer failure or malfunction, or any and all\n   other commercial damages or losses), even if such Contributor\n   has been advised of the possibility of such damages.\n\n9. Accepting Warranty or Additional Liability. While redistributing\n   the Work or Derivative Works thereof, You may choose to offer,\n   and charge a fee for, acceptance of support, warranty, indemnity,\n   or other liability obligations and/or rights consistent with this\n   License. However, in accepting such obligations, You may act only\n   on Your own behalf and on Your sole responsibility, not on behalf\n   of any other Contributor, and only if You agree to indemnify,\n   defend, and hold each Contributor harmless for any liability\n   incurred by, or claims asserted against, such Contributor by reason\n   of your accepting any such warranty or additional liability.\n\nEND OF TERMS AND CONDITIONS\n\nAPPENDIX: How to apply the Apache License to your work.\n\n   To apply the Apache License to your work, attach the following\n   boilerplate notice, with the fields enclosed by brackets \"[]\"\n   replaced with your own identifying information. (Don't include\n   the brackets!)  The text should be enclosed in the appropriate\n   comment syntax for the file format. We also recommend that a\n   file or class name and description of purpose be included on the\n   same \"printed page\" as the copyright notice for easier\n   identification within third-party archives.\n\nCopyright [yyyy] [name of copyright owner]\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License."
  },
  {
    "path": "storage-proofs/LICENSE-MIT",
    "content": "Permission is hereby granted, free of charge, to any\nperson obtaining a copy of this software and associated\ndocumentation files (the \"Software\"), to deal in the\nSoftware without restriction, including without\nlimitation the rights to use, copy, modify, merge,\npublish, distribute, sublicense, and/or sell copies of\nthe Software, and to permit persons to whom the Software\nis furnished to do so, subject to the following\nconditions:\n\nThe above copyright notice and this permission notice\nshall be included in all copies or substantial portions\nof the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF\nANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED\nTO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\nSHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\nCLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR\nIN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE."
  },
  {
    "path": "storage-proofs/README.md",
    "content": "# Storage Proofs\n\nThis crate contains various implementations of proofs of storage.\n\n## License\n\nMIT or Apache 2.0\n"
  },
  {
    "path": "storage-proofs/benches/blake2s.rs",
    "content": "#[macro_use]\nextern crate criterion;\nextern crate bellman;\nextern crate bitvec;\nextern crate pairing;\nextern crate rand;\nextern crate sapling_crypto;\nextern crate sha2;\nextern crate storage_proofs;\n\nuse bellman::groth16::*;\nuse bellman::{Circuit, ConstraintSystem, SynthesisError};\nuse criterion::{black_box, Criterion, ParameterizedBenchmark};\nuse pairing::bls12_381::Bls12;\nuse rand::{thread_rng, Rng};\nuse sapling_crypto::circuit as scircuit;\nuse sapling_crypto::circuit::boolean::{self, Boolean};\nuse sapling_crypto::jubjub::JubjubEngine;\nuse storage_proofs::circuit::bench::BenchCS;\nuse storage_proofs::crypto;\n\nstruct Blake2sExample<'a> {\n    data: &'a [Option<bool>],\n}\n\nimpl<'a, E> Circuit<E> for Blake2sExample<'a>\nwhere\n    E: JubjubEngine,\n{\n    fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let data: Vec<Boolean> = self\n            .data\n            .into_iter()\n            .enumerate()\n            .map(|(i, b)| {\n                Ok(Boolean::from(boolean::AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"bit {}\", i)),\n                    *b,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n        let cs = cs.namespace(|| \"blake2s\");\n        let personalization = vec![0u8; 8];\n        let _res = scircuit::blake2s::blake2s(cs, &data, &personalization)?;\n        Ok(())\n    }\n}\n\nfn blake2s_benchmark(c: &mut Criterion) {\n    let mut rng1 = thread_rng();\n    let rng2 = thread_rng();\n\n    let groth_params = generate_random_parameters::<Bls12, _, _>(\n        Blake2sExample {\n            data: &vec![None; 256],\n        },\n        &mut rng1,\n    )\n    .unwrap();\n\n    let params = vec![32];\n\n    c.bench(\n        \"blake2s\",\n        ParameterizedBenchmark::new(\n            \"non-circuit-32bytes\",\n            |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n                b.iter(|| crypto::blake2s::blake2s(&data))\n            },\n            params,\n        )\n        .with_function(\"circuit-32bytes-create_proof\", move |b, bytes| {\n            b.iter(|| {\n                let mut rng = rng1.clone();\n                let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n                let proof = create_random_proof(\n                    Blake2sExample {\n                        data: data.as_slice(),\n                    },\n                    &groth_params,\n                    &mut rng,\n                )\n                .unwrap();\n\n                black_box(proof)\n            });\n        })\n        .with_function(\"circuit-32bytes-synthesize_circuit\", move |b, bytes| {\n            b.iter(|| {\n                let mut cs = BenchCS::<Bls12>::new();\n\n                let mut rng = rng2.clone();\n                let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n                Blake2sExample {\n                    data: data.as_slice(),\n                }\n                .synthesize(&mut cs)\n                .unwrap();\n\n                black_box(cs)\n            });\n        })\n        .sample_size(20),\n    );\n}\n\ncriterion_group!(benches, blake2s_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/benches/drgraph.rs",
    "content": "#[macro_use]\nextern crate criterion;\nextern crate storage_proofs;\n\nuse criterion::{black_box, Criterion, ParameterizedBenchmark};\nuse storage_proofs::drgraph::*;\nuse storage_proofs::hasher::pedersen::*;\n\nfn drgraph(c: &mut Criterion) {\n    let params: Vec<_> = vec![12, 24, 128, 1024]\n        .iter()\n        .map(|n| (BucketGraph::<PedersenHasher>::new(*n, 6, 0, new_seed()), 2))\n        .collect();\n    c.bench(\n        \"sample\",\n        ParameterizedBenchmark::new(\n            \"bucket/m=6\",\n            |b, (graph, i)| {\n                b.iter(|| {\n                    black_box(graph.parents(*i));\n                })\n            },\n            params,\n        ),\n    );\n}\n\ncriterion_group!(benches, drgraph);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/benches/pedersen.rs",
    "content": "#[macro_use]\nextern crate criterion;\nextern crate bellman;\nextern crate bitvec;\nextern crate pairing;\nextern crate rand;\nextern crate sapling_crypto;\nextern crate storage_proofs;\n\nuse bellman::groth16::*;\nuse bellman::{Circuit, ConstraintSystem, SynthesisError};\nuse criterion::{black_box, Criterion, ParameterizedBenchmark};\nuse pairing::bls12_381::Bls12;\nuse rand::{thread_rng, Rng};\nuse sapling_crypto::circuit::boolean::{self, Boolean};\nuse sapling_crypto::jubjub::{JubjubBls12, JubjubEngine};\nuse storage_proofs::circuit::bench::BenchCS;\n\nuse storage_proofs::circuit;\nuse storage_proofs::crypto::pedersen;\n\nstruct PedersenExample<'a, E: JubjubEngine> {\n    params: &'a E::Params,\n    data: &'a [Option<bool>],\n}\n\nimpl<'a, E: JubjubEngine> Circuit<E> for PedersenExample<'a, E> {\n    fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let data: Vec<Boolean> = self\n            .data\n            .into_iter()\n            .enumerate()\n            .map(|(i, b)| {\n                Ok(Boolean::from(boolean::AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"bit {}\", i)),\n                    *b,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n        let cs = cs.namespace(|| \"pedersen\");\n        let res = circuit::pedersen::pedersen_compression_num(cs, self.params, &data)?;\n        // please compiler don't optimize the result away\n        // only check if we actually have input data\n        if self.data[0].is_some() {\n            res.get_value().unwrap();\n        }\n\n        Ok(())\n    }\n}\n\nfn pedersen_benchmark(c: &mut Criterion) {\n    // FIXME: We're duplicating these params because of compiler errors, presumably related to\n    // the move closures. There must be a better way.\n    let jubjub_params = JubjubBls12::new();\n    let jubjub_params2 = JubjubBls12::new();\n    let mut rng1 = thread_rng();\n    let rng2 = thread_rng();\n    let groth_params = generate_random_parameters::<Bls12, _, _>(\n        PedersenExample {\n            params: &jubjub_params,\n            data: &vec![None; 256],\n        },\n        &mut rng1,\n    )\n    .unwrap();\n\n    let params = vec![32];\n\n    c.bench(\n        \"pedersen\",\n        ParameterizedBenchmark::new(\n            \"non-circuit-32bytes\",\n            |b, bytes| {\n                let mut rng = thread_rng();\n                let mut data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n                b.iter(|| black_box(pedersen::pedersen_compression(&mut data)))\n            },\n            params,\n        )\n        .with_function(\"circuit-32bytes-create_proof\", move |b, bytes| {\n            b.iter(|| {\n                let mut rng = rng1.clone();\n                let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n                let proof = create_random_proof(\n                    PedersenExample {\n                        params: &jubjub_params,\n                        data: data.as_slice(),\n                    },\n                    &groth_params,\n                    &mut rng,\n                )\n                .unwrap();\n\n                black_box(proof)\n            });\n        })\n        .with_function(\"circuit-32bytes-synthesize_circuit\", move |b, bytes| {\n            b.iter(|| {\n                let mut cs = BenchCS::<Bls12>::new();\n\n                let mut rng = rng2.clone();\n                let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n                PedersenExample {\n                    params: &jubjub_params2,\n                    data: data.as_slice(),\n                }\n                .synthesize(&mut cs)\n                .unwrap();\n\n                black_box(cs)\n            });\n        })\n        .sample_size(20),\n    );\n}\n\ncriterion_group!(benches, pedersen_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/benches/preprocessing.rs",
    "content": "#[macro_use]\nextern crate criterion;\nextern crate rand;\nextern crate sector_base;\nextern crate storage_proofs;\nextern crate tempfile;\n\nuse criterion::{Criterion, ParameterizedBenchmark, Throughput};\nuse rand::{thread_rng, Rng};\nuse sector_base::io::fr32::{write_padded, write_unpadded};\nuse std::fs::File;\nuse std::io::{Read, Seek, SeekFrom};\nuse std::time::Duration;\n\nfn random_data(size: usize) -> Vec<u8> {\n    let mut rng = thread_rng();\n    let mut data = vec![0u8; size as usize];\n    for i in 0..data.len() {\n        data[i] = rng.gen();\n    }\n    data\n}\n\nfn preprocessing_benchmark(c: &mut Criterion) {\n    c.bench(\n        \"preprocessing\",\n        ParameterizedBenchmark::new(\n            \"write_padded\",\n            |b, size| {\n                let data = &random_data(*size);\n\n                b.iter(|| {\n                    let mut tmpfile: File = tempfile::tempfile().unwrap();\n\n                    write_padded_bench(&mut tmpfile, data);\n                })\n            },\n            vec![128, 256, 512, 256_000, 512_000, 1024_000, 2048_000],\n        )\n        .with_function(\"write_padded + unpadded\", |b, size| {\n            let data = &random_data(*size);\n            b.iter(|| {\n                let mut tmpfile: File = tempfile::tempfile().unwrap();\n\n                write_padded_unpadded_bench(&mut tmpfile, &data);\n            })\n        })\n        .sample_size(2)\n        .throughput(|s| Throughput::Bytes(*s as u32))\n        .warm_up_time(Duration::from_secs(1)),\n    );\n}\n\nfn write_padded_bench(file: &mut File, data: &[u8]) {\n    write_padded(&data, file).unwrap();\n\n    let padded_written = file.seek(SeekFrom::End(0)).unwrap() as usize;\n\n    assert!(padded_written > data.len());\n}\n\nfn write_padded_unpadded_bench(file: &mut File, data: &[u8]) {\n    write_padded(&data, file).unwrap();\n\n    let padded_written = file.seek(SeekFrom::End(0)).unwrap() as usize;\n\n    assert!(padded_written > data.len());\n\n    let mut buf = Vec::with_capacity(padded_written);\n    file.seek(SeekFrom::Start(0)).unwrap();\n    file.read_to_end(&mut buf).unwrap();\n\n    let mut unpadded_file: File = tempfile::tempfile().unwrap();\n\n    write_unpadded(&buf, &mut unpadded_file, 0, data.len()).unwrap();\n\n    let unpadded_written = unpadded_file.seek(SeekFrom::End(0)).unwrap() as usize;\n\n    assert!(unpadded_written == data.len());\n}\n\ncriterion_group!(benches, preprocessing_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/benches/sha256.rs",
    "content": "#[macro_use]\nextern crate criterion;\nextern crate bellman;\nextern crate bitvec;\nextern crate pairing;\nextern crate rand;\nextern crate sapling_crypto;\nextern crate sha2;\nextern crate storage_proofs;\n\nuse bellman::groth16::*;\nuse bellman::{Circuit, ConstraintSystem, SynthesisError};\nuse criterion::{black_box, Criterion, ParameterizedBenchmark};\nuse pairing::bls12_381::Bls12;\nuse rand::{thread_rng, Rng};\nuse sapling_crypto::circuit as scircuit;\nuse sapling_crypto::circuit::boolean::{self, Boolean};\nuse sapling_crypto::jubjub::JubjubEngine;\nuse storage_proofs::circuit::bench::BenchCS;\n\nuse sha2::{Digest, Sha256};\n\nstruct Sha256Example<'a> {\n    data: &'a [Option<bool>],\n}\n\nimpl<'a, E> Circuit<E> for Sha256Example<'a>\nwhere\n    E: JubjubEngine,\n{\n    fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let data: Vec<Boolean> = self\n            .data\n            .into_iter()\n            .enumerate()\n            .map(|(i, b)| {\n                Ok(Boolean::from(boolean::AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"bit {}\", i)),\n                    *b,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n        let cs = cs.namespace(|| \"sha256\");\n\n        let _res = scircuit::sha256::sha256(cs, &data)?;\n        Ok(())\n    }\n}\n\nfn sha256_benchmark(c: &mut Criterion) {\n    let mut rng1 = thread_rng();\n    let rng2 = thread_rng();\n\n    let groth_params = generate_random_parameters::<Bls12, _, _>(\n        Sha256Example {\n            data: &vec![None; 256],\n        },\n        &mut rng1,\n    )\n    .unwrap();\n\n    let params = vec![32];\n\n    c.bench(\n        \"sha256\",\n        ParameterizedBenchmark::new(\n            \"non-circuit-32bytes\",\n            |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n                b.iter(|| black_box(Sha256::digest(&data)))\n            },\n            params,\n        )\n        .with_function(\"circuit-32bytes-create_proof\", move |b, bytes| {\n            b.iter(|| {\n                let mut rng = rng1.clone();\n                let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n                let proof = create_random_proof(\n                    Sha256Example {\n                        data: data.as_slice(),\n                    },\n                    &groth_params,\n                    &mut rng,\n                )\n                .unwrap();\n\n                black_box(proof)\n            });\n        })\n        .with_function(\"circuit-32bytes-synthesize_circuit\", move |b, bytes| {\n            b.iter(|| {\n                let mut cs = BenchCS::<Bls12>::new();\n\n                let mut rng = rng2.clone();\n                let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n                Sha256Example {\n                    data: data.as_slice(),\n                }\n                .synthesize(&mut cs)\n                .unwrap();\n\n                black_box(cs)\n            });\n        })\n        .sample_size(20),\n    );\n}\n\ncriterion_group!(benches, sha256_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/benches/sloth.rs",
    "content": "#[macro_use]\nextern crate criterion;\nextern crate bellman;\nextern crate bitvec;\nextern crate pairing;\nextern crate rand;\nextern crate sapling_crypto;\nextern crate storage_proofs;\n\nuse bellman::groth16::*;\nuse bellman::{Circuit, ConstraintSystem, SynthesisError};\nuse criterion::{black_box, Criterion, ParameterizedBenchmark};\nuse pairing::bls12_381::{Bls12, Fr};\nuse rand::{thread_rng, Rng};\nuse sapling_crypto::circuit::num;\nuse sapling_crypto::jubjub::JubjubEngine;\nuse storage_proofs::circuit::bench::BenchCS;\n\nuse storage_proofs::circuit;\nuse storage_proofs::crypto::sloth;\n\nstruct SlothExample<E: JubjubEngine> {\n    key: Option<E::Fr>,\n    ciphertext: Option<E::Fr>,\n    rounds: usize,\n}\n\nimpl<E: JubjubEngine> Circuit<E> for SlothExample<E> {\n    fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let key_num = num::AllocatedNum::alloc(cs.namespace(|| \"sloth-key\"), || {\n            Ok(self.key.ok_or_else(|| SynthesisError::AssignmentMissing)?)\n        })?;\n        let res = circuit::sloth::decode(\n            cs.namespace(|| \"sloth\"),\n            &key_num,\n            self.ciphertext,\n            self.rounds,\n        )?;\n        // please compiler don't optimize the result away\n        // only check if we actually have input data\n        if self.ciphertext.is_some() {\n            res.get_value().unwrap();\n        }\n\n        Ok(())\n    }\n}\n\nfn sloth_benchmark(c: &mut Criterion) {\n    let params = vec![1, 4, 8];\n\n    c.bench(\n        \"sloth\",\n        ParameterizedBenchmark::new(\n            \"decode-non-circuit\",\n            |b, rounds| {\n                let mut rng = thread_rng();\n                let key: Fr = rng.gen();\n                let plaintext: Fr = rng.gen();\n                let ciphertext = sloth::encode::<Bls12>(&key, &plaintext, *rounds);\n\n                b.iter(|| black_box(sloth::decode::<Bls12>(&key, &ciphertext, *rounds)))\n            },\n            params,\n        )\n        .with_function(\"decode-circuit-create_proof\", move |b, rounds| {\n            let mut rng = thread_rng();\n            let groth_params = generate_random_parameters::<Bls12, _, _>(\n                SlothExample {\n                    key: None,\n                    ciphertext: None,\n                    rounds: *rounds,\n                },\n                &mut rng,\n            )\n            .unwrap();\n\n            let key: Fr = rng.gen();\n            let plaintext: Fr = rng.gen();\n            let ciphertext = sloth::encode::<Bls12>(&key, &plaintext, *rounds);\n\n            b.iter(|| {\n                let proof = create_random_proof(\n                    SlothExample {\n                        key: Some(key),\n                        ciphertext: Some(ciphertext),\n                        rounds: *rounds,\n                    },\n                    &groth_params,\n                    &mut rng,\n                )\n                .unwrap();\n\n                black_box(proof)\n            });\n        })\n        .with_function(\"decode-circuit-synthesize_circuit\", move |b, rounds| {\n            let mut rng = thread_rng();\n            let key: Fr = rng.gen();\n            let plaintext: Fr = rng.gen();\n            let ciphertext = sloth::encode::<Bls12>(&key, &plaintext, *rounds);\n\n            b.iter(|| {\n                let mut cs = BenchCS::<Bls12>::new();\n\n                SlothExample {\n                    key: Some(key),\n                    ciphertext: Some(ciphertext),\n                    rounds: *rounds,\n                }\n                .synthesize(&mut cs)\n                .unwrap();\n\n                black_box(cs)\n            });\n        })\n        .with_function(\"encode-non-circuit\", move |b, rounds| {\n            let mut rng = thread_rng();\n            let key: Fr = rng.gen();\n            let plaintext: Fr = rng.gen();\n\n            b.iter(|| black_box(sloth::encode::<Bls12>(&key, &plaintext, *rounds)))\n        })\n        .sample_size(20),\n    );\n}\n\ncriterion_group!(benches, sloth_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/build.rs",
    "content": "fn is_compiled_for_64_bit_arch() -> bool {\n    cfg!(target_pointer_width = \"64\")\n}\n\nfn main() {\n    assert!(\n        is_compiled_for_64_bit_arch(),\n        \"must be built for 64-bit architectures\"\n    );\n}\n"
  },
  {
    "path": "storage-proofs/core/Cargo.toml",
    "content": "[package]\nname = \"storage-proofs-core\"\nversion = \"1.0.0-alpha.0\"\nauthors = [\"dignifiedquire <me@dignifiedquire.com>\"]\nlicense = \"MIT OR Apache-2.0\"\nedition = \"2018\"\nrepository = \"https://github.com/filecoin-project/rust-fil-proofs\"\nreadme = \"README.md\"\n\n[lib]\nbench = false\n\n[dependencies]\nrand = \"0.7\"\nmerkletree = \"0.18.0\"\nbyteorder = \"1\"\nconfig = \"0.9.3\"\nitertools = \"0.9\"\nlazy_static = \"1.2\"\nmemmap = \"0.7\"\naes = \"0.3\"\nblock-modes = \"0.3\"\nsha2 = { version = \"0.8.3\", package = \"sha2ni\" }\ntempfile = \"3\"\nfs2 = \"0.4\"\nrayon = \"1.0.0\"\nserde = { version = \"1.0\", features = [\"derive\"]}\nblake2b_simd = \"0.5\"\nblake2s_simd = \"0.5\"\ntoml = \"0.5\"\nff = { version = \"0.2.1\", package = \"fff\" }\nbellperson = \"0.7.0\"\npaired = { version = \"0.19.0\", features = [\"serde\"] }\nfil-sapling-crypto = \"0.5.1\"\nserde_json = \"1.0\"\nlog = \"0.4.7\"\nrand_chacha = \"0.2.1\"\nhex = \"0.4.0\"\ngeneric-array = \"0.13.2\"\nanyhow = \"1.0.23\"\nthiserror = \"1.0.6\"\nneptune = { version = \"0.5.6\", features = [\"gpu\"] }\ncpu-time = { version = \"1.0\", optional = true }\n\n[dev-dependencies]\nproptest = \"0.7\"\ncriterion = \"0.3\"\nfemme = \"1.2.0\"\nbitvec = \"0.17\"\nrand_xorshift = \"0.2.0\"\npretty_assertions = \"0.6.1\"\nstorage-proofs-porep = { path = \"../porep\", version = \"1.0.0-alpha.0\" }\nsha2raw = { path = \"../../sha2raw\" }\n\n[features]\ndefault = [\"gpu\"]\nsimd = []\nasm = [\"sha2/sha2-asm\"]\nbig-sector-sizes-bench = []\ngpu = [\"bellperson/gpu\", \"fil-sapling-crypto/gpu\"]\nmeasurements = [\"cpu-time\"]\nprofile = [\"measurements\"]\n\n[[bench]]\nname = \"pedersen\"\nharness = false\n\n[[bench]]\nname = \"sha256\"\nharness = false\n\n[[bench]]\nname = \"blake2s\"\nharness = false\n\n[[bench]]\nname = \"drgraph\"\nharness = false\n\n[[bench]]\nname = \"encode\"\nharness = false\n\n[[bench]]\nname = \"parents\"\nharness = false\n\n[[bench]]\nname = \"xor\"\nharness = false\n\n[[bench]]\nname = \"fr\"\nharness = false\n\n[[bench]]\nname = \"merkle\"\nharness = false\n\n[[bench]]\nname = \"misc\"\nharness = false\n"
  },
  {
    "path": "storage-proofs/core/README.md",
    "content": "# Storage Proofs Core\n\n## License\n\nMIT or Apache 2.0\n"
  },
  {
    "path": "storage-proofs/core/benches/blake2s.rs",
    "content": "use bellperson::gadgets::boolean::{self, Boolean};\nuse bellperson::groth16::*;\nuse bellperson::{Circuit, ConstraintSystem, SynthesisError};\nuse criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};\nuse paired::bls12_381::Bls12;\nuse rand::{thread_rng, Rng};\nuse storage_proofs_core::gadgets::BenchCS;\n\nstruct Blake2sExample<'a> {\n    data: &'a [Option<bool>],\n}\n\nimpl<'a> Circuit<Bls12> for Blake2sExample<'a> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let data: Vec<Boolean> = self\n            .data\n            .into_iter()\n            .enumerate()\n            .map(|(i, b)| {\n                Ok(Boolean::from(boolean::AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"bit {}\", i)),\n                    *b,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n        let cs = cs.namespace(|| \"blake2s\");\n        let personalization = vec![0u8; 8];\n        let _res = bellperson::gadgets::blake2s::blake2s(cs, &data, &personalization)?;\n        Ok(())\n    }\n}\n\nfn blake2s_benchmark(c: &mut Criterion) {\n    let params = vec![32, 64, 10 * 32];\n\n    c.bench(\n        \"hash-blake2s\",\n        ParameterizedBenchmark::new(\n            \"non-circuit\",\n            |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n                b.iter(|| black_box(blake2s_simd::blake2s(&data)))\n            },\n            params,\n        ),\n    );\n}\n\nfn blake2s_circuit_benchmark(c: &mut Criterion) {\n    let mut rng1 = thread_rng();\n    let groth_params = generate_random_parameters::<Bls12, _, _>(\n        Blake2sExample {\n            data: &vec![None; 256],\n        },\n        &mut rng1,\n    )\n    .unwrap();\n\n    let params = vec![32];\n\n    c.bench(\n        \"hash-blake2s-circuit\",\n        ParameterizedBenchmark::new(\n            \"create-proof\",\n            move |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n                b.iter(|| {\n                    let proof = create_random_proof(\n                        Blake2sExample {\n                            data: data.as_slice(),\n                        },\n                        &groth_params,\n                        &mut rng,\n                    )\n                    .unwrap();\n\n                    black_box(proof)\n                });\n            },\n            params,\n        )\n        .with_function(\"synthesize\", move |b, bytes| {\n            let mut rng = thread_rng();\n            let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n            b.iter(|| {\n                let mut cs = BenchCS::<Bls12>::new();\n\n                Blake2sExample {\n                    data: data.as_slice(),\n                }\n                .synthesize(&mut cs)\n                .unwrap();\n\n                black_box(cs)\n            });\n        })\n        .sample_size(20),\n    );\n}\n\ncriterion_group!(benches, blake2s_benchmark, blake2s_circuit_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/core/benches/drgraph.rs",
    "content": "use criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};\nuse storage_proofs_core::drgraph::*;\nuse storage_proofs_core::hasher::pedersen::*;\n\nfn drgraph(c: &mut Criterion) {\n    let params = vec![12, 24, 128, 1024];\n\n    c.bench(\n        \"sample\",\n        ParameterizedBenchmark::new(\n            \"bucket/m=6\",\n            |b, n| {\n                let graph =\n                    BucketGraph::<PedersenHasher>::new(*n, BASE_DEGREE, 0, [32; 32]).unwrap();\n\n                b.iter(|| {\n                    let mut parents = vec![0; 6];\n                    black_box(graph.parents(2, &mut parents).unwrap());\n                })\n            },\n            params,\n        ),\n    );\n}\n\ncriterion_group!(benches, drgraph);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/core/benches/encode.rs",
    "content": "use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput};\nuse ff::Field;\nuse paired::bls12_381::Fr;\nuse rand::thread_rng;\nuse storage_proofs_core::drgraph::new_seed;\nuse storage_proofs_core::fr32::fr_into_bytes;\nuse storage_proofs_core::hasher::sha256::Sha256Hasher;\nuse storage_proofs_core::hasher::{Domain, Hasher};\nuse storage_proofs_porep::stacked::{create_label, create_label_exp, StackedBucketGraph};\n\nstruct Pregenerated<H: 'static + Hasher> {\n    data: Vec<u8>,\n    replica_id: H::Domain,\n    graph: StackedBucketGraph<H>,\n}\n\nfn pregenerate_data<H: Hasher>(degree: usize) -> Pregenerated<H> {\n    assert_eq!(degree, 6 + 8);\n    let mut rng = thread_rng();\n    let size = degree * 4 * 1024 * 1024;\n    let data: Vec<u8> = (0..size)\n        .flat_map(|_| fr_into_bytes(&Fr::random(&mut rng)))\n        .collect();\n    let replica_id: H::Domain = H::Domain::random(&mut rng);\n\n    let graph = StackedBucketGraph::<H>::new_stacked(size, 6, 8, new_seed()).unwrap();\n\n    Pregenerated {\n        data,\n        replica_id,\n        graph,\n    }\n}\n\nfn kdf_benchmark(c: &mut Criterion) {\n    let degree = 14;\n    let Pregenerated {\n        data,\n        replica_id,\n        graph,\n    } = pregenerate_data::<Sha256Hasher>(degree);\n\n    let mut group = c.benchmark_group(\"kdf\");\n    group.sample_size(10);\n    group.throughput(Throughput::Bytes(\n        /* replica id + 37 parents + node id */ 39 * 32,\n    ));\n\n    group.bench_function(\"exp\", |b| {\n        let mut raw_data = data.clone();\n        raw_data.extend_from_slice(&data);\n        let (data, exp_data) = raw_data.split_at_mut(data.len());\n\n        let graph = &graph;\n        let replica_id = replica_id.clone();\n\n        b.iter(|| black_box(create_label_exp(graph, &replica_id, &*exp_data, data, 1)))\n    });\n\n    group.bench_function(\"non-exp\", |b| {\n        let mut data = data.clone();\n        let graph = &graph;\n        let replica_id = replica_id.clone();\n\n        b.iter(|| black_box(create_label(graph, &replica_id, &mut data, 1)))\n    });\n\n    group.finish();\n}\n\ncriterion_group!(benches, kdf_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/core/benches/fr.rs",
    "content": "use criterion::{black_box, criterion_group, criterion_main, Criterion};\nuse ff::Field;\nuse paired::bls12_381::Fr;\nuse rand::thread_rng;\nuse storage_proofs_core::fr32::{bytes_into_fr, fr_into_bytes};\n\nfn fr_benchmark(c: &mut Criterion) {\n    c.bench_function(\"fr-to-bytes-32\", move |b| {\n        let mut rng = thread_rng();\n        let fr = Fr::random(&mut rng);\n\n        b.iter(|| black_box(fr_into_bytes(&fr)))\n    });\n\n    c.bench_function(\"bytes-32-to-fr\", move |b| {\n        let mut rng = thread_rng();\n        let fr = Fr::random(&mut rng);\n        let bytes = fr_into_bytes(&fr);\n\n        b.iter(|| black_box(bytes_into_fr(&bytes).unwrap()))\n    });\n}\n\ncriterion_group!(benches, fr_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/core/benches/merkle.rs",
    "content": "use criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};\nuse rand::{thread_rng, Rng};\nuse storage_proofs_core::hasher::{PoseidonHasher, Sha256Hasher};\nuse storage_proofs_core::merkle::{create_base_merkle_tree, BinaryMerkleTree};\n\nfn merkle_benchmark(c: &mut Criterion) {\n    #[cfg(feature = \"big-sector-sizes-bench\")]\n    let params = vec![128, 1024, 1048576];\n    #[cfg(not(feature = \"big-sector-sizes-bench\"))]\n    let params = vec![128, 1024];\n\n    c.bench(\n        \"merkletree-binary\",\n        ParameterizedBenchmark::new(\n            \"sha256\",\n            move |b, n_nodes| {\n                let mut rng = thread_rng();\n                let data: Vec<u8> = (0..32 * *n_nodes).map(|_| rng.gen()).collect();\n                b.iter(|| {\n                    black_box(\n                        create_base_merkle_tree::<BinaryMerkleTree<Sha256Hasher>>(\n                            None, *n_nodes, &data,\n                        )\n                        .unwrap(),\n                    )\n                })\n            },\n            params,\n        )\n        .with_function(\"poseidon\", move |b, n_nodes| {\n            let mut rng = thread_rng();\n            let data: Vec<u8> = (0..32 * *n_nodes).map(|_| rng.gen()).collect();\n\n            b.iter(|| {\n                black_box(\n                    create_base_merkle_tree::<BinaryMerkleTree<PoseidonHasher>>(\n                        None, *n_nodes, &data,\n                    )\n                    .unwrap(),\n                )\n            })\n        })\n        .sample_size(20),\n    );\n}\n\ncriterion_group!(benches, merkle_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/core/benches/misc.rs",
    "content": "use std::io::{Read, Seek, Write};\n\nuse criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};\nuse rand::{thread_rng, Rng};\nuse tempfile::tempfile;\n\nfn read_bytes_benchmark(c: &mut Criterion) {\n    let params = vec![32, 64, 512, 1024, 64 * 1024];\n\n    c.bench(\n        \"read\",\n        ParameterizedBenchmark::new(\n            \"from_disk\",\n            |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n                let mut f = tempfile().unwrap();\n                f.write_all(&data).unwrap();\n                f.sync_all().unwrap();\n\n                b.iter(|| {\n                    let mut res = vec![0u8; *bytes];\n                    f.seek(std::io::SeekFrom::Start(0)).unwrap();\n                    f.read_exact(&mut res).unwrap();\n\n                    black_box(res)\n                })\n            },\n            params,\n        ),\n    );\n}\n\ncriterion_group!(benches, read_bytes_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/core/benches/pedersen.rs",
    "content": "use bellperson::gadgets::boolean::{self, Boolean};\nuse bellperson::groth16::*;\nuse bellperson::{Circuit, ConstraintSystem, SynthesisError};\nuse criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};\nuse paired::bls12_381::Bls12;\nuse rand::{thread_rng, Rng};\nuse storage_proofs_core::crypto::pedersen;\nuse storage_proofs_core::gadgets::{self, BenchCS};\n\nstruct PedersenExample<'a> {\n    data: &'a [Option<bool>],\n}\n\nimpl<'a> Circuit<Bls12> for PedersenExample<'a> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let data: Vec<Boolean> = self\n            .data\n            .into_iter()\n            .enumerate()\n            .map(|(i, b)| {\n                Ok(Boolean::from(boolean::AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"bit {}\", i)),\n                    *b,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n        let cs = cs.namespace(|| \"pedersen\");\n        let res = gadgets::pedersen::pedersen_compression_num(cs, &data)?;\n        // please compiler don't optimize the result away\n        // only check if we actually have input data\n        if self.data[0].is_some() {\n            res.get_value().unwrap();\n        }\n\n        Ok(())\n    }\n}\n\nstruct PedersenMdExample<'a> {\n    data: &'a [Option<bool>],\n}\n\nimpl<'a> Circuit<Bls12> for PedersenMdExample<'a> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let data: Vec<Boolean> = self\n            .data\n            .into_iter()\n            .enumerate()\n            .map(|(i, b)| {\n                Ok(Boolean::from(boolean::AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"bit {}\", i)),\n                    *b,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n        let cs = cs.namespace(|| \"pedersen\");\n        let res = gadgets::pedersen::pedersen_md_no_padding(cs, &data)?;\n        // please compiler don't optimize the result away\n        // only check if we actually have input data\n        if self.data[0].is_some() {\n            res.get_value().unwrap();\n        }\n\n        Ok(())\n    }\n}\n\nfn pedersen_benchmark(c: &mut Criterion) {\n    let params = vec![32];\n\n    c.bench(\n        \"hash-pedersen\",\n        ParameterizedBenchmark::new(\n            \"non-circuit\",\n            |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n                b.iter(|| black_box(pedersen::pedersen(&data)))\n            },\n            params,\n        ),\n    );\n}\n\nfn pedersen_md_benchmark(c: &mut Criterion) {\n    let params = vec![32, 2 * 32, 4 * 32, 8 * 32, 11 * 32];\n\n    c.bench(\n        \"hash-pedersen-md\",\n        ParameterizedBenchmark::new(\n            \"non-circuit\",\n            |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n                b.iter(|| black_box(pedersen::pedersen_md_no_padding(&data)))\n            },\n            params,\n        ),\n    );\n}\n\nfn pedersen_circuit_benchmark(c: &mut Criterion) {\n    let mut rng1 = thread_rng();\n    let groth_params = generate_random_parameters::<Bls12, _, _>(\n        PedersenExample {\n            data: &vec![None; 256],\n        },\n        &mut rng1,\n    )\n    .unwrap();\n\n    let params = vec![32];\n\n    c.bench(\n        \"hash-pedersen-circuit\",\n        ParameterizedBenchmark::new(\n            \"create-proof\",\n            move |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n                b.iter(|| {\n                    let proof = create_random_proof(\n                        PedersenExample {\n                            data: data.as_slice(),\n                        },\n                        &groth_params,\n                        &mut rng,\n                    )\n                    .unwrap();\n\n                    black_box(proof)\n                });\n            },\n            params,\n        )\n        .with_function(\"synthesize\", move |b, bytes| {\n            let mut rng = thread_rng();\n            let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n            b.iter(|| {\n                let mut cs = BenchCS::<Bls12>::new();\n                PedersenExample {\n                    data: data.as_slice(),\n                }\n                .synthesize(&mut cs)\n                .unwrap();\n\n                black_box(cs)\n            });\n        })\n        .sample_size(20),\n    );\n}\n\nfn pedersen_md_circuit_benchmark(c: &mut Criterion) {\n    let mut rng1 = thread_rng();\n    let groth_params = generate_random_parameters::<Bls12, _, _>(\n        PedersenMdExample {\n            data: &vec![None; 256],\n        },\n        &mut rng1,\n    )\n    .unwrap();\n\n    let params = vec![64];\n\n    c.bench(\n        \"hash-pedersen-md-circuit\",\n        ParameterizedBenchmark::new(\n            \"create-proof\",\n            move |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n                b.iter(|| {\n                    let proof = create_random_proof(\n                        PedersenMdExample {\n                            data: data.as_slice(),\n                        },\n                        &groth_params,\n                        &mut rng,\n                    )\n                    .unwrap();\n\n                    black_box(proof)\n                });\n            },\n            params,\n        )\n        .with_function(\"synthesize\", move |b, bytes| {\n            let mut rng = thread_rng();\n            let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n            b.iter(|| {\n                let mut cs = BenchCS::<Bls12>::new();\n                PedersenMdExample {\n                    data: data.as_slice(),\n                }\n                .synthesize(&mut cs)\n                .unwrap();\n\n                black_box(cs)\n            });\n        })\n        .sample_size(20),\n    );\n}\n\ncriterion_group!(\n    benches,\n    pedersen_benchmark,\n    pedersen_md_benchmark,\n    pedersen_circuit_benchmark,\n);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/core/benches/sha256.rs",
    "content": "use bellperson::gadgets::boolean::{self, Boolean};\nuse bellperson::groth16::*;\nuse bellperson::{Circuit, ConstraintSystem, SynthesisError};\nuse criterion::{\n    black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark, Throughput,\n};\nuse paired::bls12_381::Bls12;\nuse rand::{thread_rng, Rng};\nuse sha2::{Digest, Sha256};\nuse storage_proofs_core::gadgets::BenchCS;\n\nstruct Sha256Example<'a> {\n    data: &'a [Option<bool>],\n}\n\nimpl<'a> Circuit<Bls12> for Sha256Example<'a> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let data: Vec<Boolean> = self\n            .data\n            .into_iter()\n            .enumerate()\n            .map(|(i, b)| {\n                Ok(Boolean::from(boolean::AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"bit {}\", i)),\n                    *b,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n        let cs = cs.namespace(|| \"sha256\");\n\n        let _res = bellperson::gadgets::sha256::sha256(cs, &data)?;\n        Ok(())\n    }\n}\n\nfn sha256_benchmark(c: &mut Criterion) {\n    let params = vec![32, 64, 10 * 32, 37 * 32];\n\n    c.bench(\n        \"hash-sha256-base\",\n        ParameterizedBenchmark::new(\n            \"non-circuit\",\n            |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n                b.iter(|| black_box(Sha256::digest(&data)))\n            },\n            params,\n        )\n        .throughput(|bytes| Throughput::Bytes(*bytes as u64)),\n    );\n}\n\nfn sha256_raw_benchmark(c: &mut Criterion) {\n    let params = vec![64, 10 * 32, 38 * 32];\n\n    c.bench(\n        \"hash-sha256-raw\",\n        ParameterizedBenchmark::new(\n            \"non-circuit\",\n            |b, bytes| {\n                use sha2raw::Sha256;\n\n                let mut rng = thread_rng();\n                let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n                let chunks = data.chunks(32).collect::<Vec<_>>();\n\n                b.iter(|| black_box(Sha256::digest(&chunks)))\n            },\n            params,\n        )\n        .throughput(|bytes| Throughput::Bytes(*bytes as u64)),\n    );\n}\n\nfn sha256_circuit_benchmark(c: &mut Criterion) {\n    let mut rng1 = thread_rng();\n\n    let params = vec![32, 64];\n\n    c.bench(\n        \"hash-sha256-circuit\",\n        ParameterizedBenchmark::new(\n            \"create-proof\",\n            move |b, bytes| {\n                let groth_params = generate_random_parameters::<Bls12, _, _>(\n                    Sha256Example {\n                        data: &vec![None; *bytes as usize * 8],\n                    },\n                    &mut rng1,\n                )\n                .unwrap();\n\n                let mut rng = thread_rng();\n                let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n                b.iter(|| {\n                    let proof = create_random_proof(\n                        Sha256Example {\n                            data: data.as_slice(),\n                        },\n                        &groth_params,\n                        &mut rng,\n                    )\n                    .unwrap();\n\n                    black_box(proof)\n                });\n            },\n            params,\n        )\n        .with_function(\"synthesize\", move |b, bytes| {\n            let mut rng = thread_rng();\n            let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n            b.iter(|| {\n                let mut cs = BenchCS::<Bls12>::new();\n                Sha256Example {\n                    data: data.as_slice(),\n                }\n                .synthesize(&mut cs)\n                .unwrap();\n\n                black_box(cs)\n            });\n        })\n        .sample_size(20),\n    );\n}\n\ncriterion_group!(\n    benches,\n    sha256_benchmark,\n    sha256_raw_benchmark,\n    sha256_circuit_benchmark\n);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/core/benches/xor.rs",
    "content": "use bellperson::gadgets::boolean::{self, Boolean};\nuse bellperson::groth16::*;\nuse bellperson::{Circuit, ConstraintSystem, SynthesisError};\nuse criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};\nuse paired::bls12_381::Bls12;\nuse rand::{thread_rng, Rng};\nuse storage_proofs_core::crypto::xor;\nuse storage_proofs_core::gadgets;\nuse storage_proofs_core::gadgets::BenchCS;\n\nstruct XorExample<'a> {\n    key: &'a [Option<bool>],\n    data: &'a [Option<bool>],\n}\n\nimpl<'a> Circuit<Bls12> for XorExample<'a> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let key: Vec<Boolean> = self\n            .key\n            .into_iter()\n            .enumerate()\n            .map(|(i, b)| {\n                Ok(Boolean::from(boolean::AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"key_bit {}\", i)),\n                    *b,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n        let data: Vec<Boolean> = self\n            .data\n            .into_iter()\n            .enumerate()\n            .map(|(i, b)| {\n                Ok(Boolean::from(boolean::AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"data_bit {}\", i)),\n                    *b,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n        let mut cs = cs.namespace(|| \"xor\");\n        let _res = gadgets::xor::xor(&mut cs, &key, &data)?;\n\n        Ok(())\n    }\n}\n\nfn xor_benchmark(c: &mut Criterion) {\n    let params = vec![32, 64, 10 * 32];\n\n    c.bench(\n        \"xor\",\n        ParameterizedBenchmark::new(\n            \"non-circuit\",\n            |b, bytes| {\n                let mut rng = thread_rng();\n                let key: Vec<u8> = (0..32).map(|_| rng.gen()).collect();\n                let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n                b.iter(|| black_box(xor::encode(&key, &data)))\n            },\n            params,\n        ),\n    );\n}\n\nfn xor_circuit_benchmark(c: &mut Criterion) {\n    let mut rng1 = thread_rng();\n    let groth_params = generate_random_parameters::<Bls12, _, _>(\n        XorExample {\n            key: &vec![None; 8 * 32],\n            data: &vec![None; 256],\n        },\n        &mut rng1,\n    )\n    .unwrap();\n\n    let params = vec![32];\n\n    c.bench(\n        \"xor-circuit\",\n        ParameterizedBenchmark::new(\n            \"create-proof\",\n            move |b, bytes| {\n                let mut rng = thread_rng();\n                let key: Vec<Option<bool>> = (0..32 * 8).map(|_| Some(rng.gen())).collect();\n                let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n                b.iter(|| {\n                    let proof = create_random_proof(\n                        XorExample {\n                            key: key.as_slice(),\n                            data: data.as_slice(),\n                        },\n                        &groth_params,\n                        &mut rng,\n                    )\n                    .unwrap();\n\n                    black_box(proof)\n                });\n            },\n            params,\n        )\n        .with_function(\"synthesize\", move |b, bytes| {\n            let mut rng = thread_rng();\n            let key: Vec<Option<bool>> = (0..32 * 8).map(|_| Some(rng.gen())).collect();\n            let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n            b.iter(|| {\n                let mut cs = BenchCS::<Bls12>::new();\n                XorExample {\n                    key: key.as_slice(),\n                    data: data.as_slice(),\n                }\n                .synthesize(&mut cs)\n                .unwrap();\n\n                black_box(cs)\n            });\n        })\n        .sample_size(20),\n    );\n}\n\ncriterion_group!(benches, xor_benchmark, xor_circuit_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/core/src/cache_key.rs",
    "content": "use std::fmt;\n\n#[derive(Debug, Copy, Clone)]\npub enum CacheKey {\n    PAux,\n    TAux,\n    CommDTree,\n    CommCTree,\n    CommRLastTree,\n}\n\nimpl fmt::Display for CacheKey {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        match *self {\n            CacheKey::PAux => write!(f, \"p_aux\"),\n            CacheKey::TAux => write!(f, \"t_aux\"),\n            CacheKey::CommDTree => write!(f, \"tree-d\"),\n            CacheKey::CommCTree => write!(f, \"tree-c\"),\n            CacheKey::CommRLastTree => write!(f, \"tree-r-last\"),\n        }\n    }\n}\n\nimpl CacheKey {\n    pub fn label_layer(layer: usize) -> String {\n        format!(\"layer-{}\", layer)\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/compound_proof.rs",
    "content": "use anyhow::{ensure, Context};\nuse bellperson::{groth16, Circuit};\nuse log::info;\nuse paired::bls12_381::{Bls12, Fr};\nuse rand::{rngs::OsRng, RngCore};\nuse rayon::prelude::*;\n\nuse crate::error::Result;\nuse crate::multi_proof::MultiProof;\nuse crate::parameter_cache::{CacheableParameters, ParameterSetMetadata};\nuse crate::partitions;\nuse crate::proof::ProofScheme;\n\n#[derive(Clone)]\npub struct SetupParams<'a, S: ProofScheme<'a>> {\n    pub vanilla_params: <S as ProofScheme<'a>>::SetupParams,\n    pub partitions: Option<usize>,\n    /// High priority (always runs on GPU) == true\n    pub priority: bool,\n}\n\n#[derive(Clone)]\npub struct PublicParams<'a, S: ProofScheme<'a>> {\n    pub vanilla_params: S::PublicParams,\n    pub partitions: Option<usize>,\n    pub priority: bool,\n}\n\n/// CircuitComponent exists so parent components can pass private inputs to their subcomponents\n/// when calling CompoundProof::circuit directly. In general, there are no internal private inputs,\n/// and a default value will be passed. CompoundProof::circuit implementations should exhibit\n/// default behavior when passed a default ComponentPrivateinputs.\npub trait CircuitComponent {\n    type ComponentPrivateInputs: Default + Clone;\n}\n\n/// The CompoundProof trait bundles a proof::ProofScheme and a bellperson::Circuit together.\n/// It provides methods equivalent to those provided by proof::ProofScheme (setup, prove, verify).\n/// See documentation at proof::ProofScheme for details.\n/// Implementations should generally only need to supply circuit and generate_public_inputs.\n/// The remaining trait methods are used internally and implement the necessary plumbing.\npub trait CompoundProof<'a, S: ProofScheme<'a>, C: Circuit<Bls12> + CircuitComponent + Send>\nwhere\n    S::Proof: Sync + Send,\n    S::PublicParams: ParameterSetMetadata + Sync + Send,\n    S::PublicInputs: Clone + Sync,\n    Self: CacheableParameters<C, S::PublicParams>,\n{\n    // setup is equivalent to ProofScheme::setup.\n    fn setup(sp: &SetupParams<'a, S>) -> Result<PublicParams<'a, S>> {\n        Ok(PublicParams {\n            vanilla_params: S::setup(&sp.vanilla_params)?,\n            partitions: sp.partitions,\n            priority: sp.priority,\n        })\n    }\n\n    fn partition_count(public_params: &PublicParams<'a, S>) -> usize {\n        match public_params.partitions {\n            None => 1,\n            Some(0) => panic!(\"cannot specify zero partitions\"),\n            Some(k) => k,\n        }\n    }\n\n    /// prove is equivalent to ProofScheme::prove.\n    fn prove<'b>(\n        pub_params: &PublicParams<'a, S>,\n        pub_in: &S::PublicInputs,\n        priv_in: &S::PrivateInputs,\n        groth_params: &'b groth16::MappedParameters<Bls12>,\n    ) -> Result<MultiProof<'b>> {\n        let partition_count = Self::partition_count(pub_params);\n\n        // This will always run at least once, since there cannot be zero partitions.\n        ensure!(partition_count > 0, \"There must be partitions\");\n\n        info!(\"vanilla_proof:start\");\n        let vanilla_proofs = S::prove_all_partitions(\n            &pub_params.vanilla_params,\n            &pub_in,\n            priv_in,\n            partition_count,\n        )?;\n\n        info!(\"vanilla_proof:finish\");\n\n        let sanity_check =\n            S::verify_all_partitions(&pub_params.vanilla_params, &pub_in, &vanilla_proofs)?;\n        ensure!(sanity_check, \"sanity check failed\");\n\n        info!(\"snark_proof:start\");\n        let groth_proofs = Self::circuit_proofs(\n            pub_in,\n            vanilla_proofs,\n            &pub_params.vanilla_params,\n            groth_params,\n            pub_params.priority,\n        )?;\n        info!(\"snark_proof:finish\");\n\n        Ok(MultiProof::new(groth_proofs, &groth_params.vk))\n    }\n\n    // verify is equivalent to ProofScheme::verify.\n    fn verify<'b>(\n        public_params: &PublicParams<'a, S>,\n        public_inputs: &S::PublicInputs,\n        multi_proof: &MultiProof<'b>,\n        requirements: &S::Requirements,\n    ) -> Result<bool> {\n        ensure!(\n            multi_proof.circuit_proofs.len() == Self::partition_count(public_params),\n            \"Inconsistent inputs\"\n        );\n\n        let vanilla_public_params = &public_params.vanilla_params;\n        let pvk = groth16::prepare_batch_verifying_key(&multi_proof.verifying_key);\n\n        if !<S as ProofScheme>::satisfies_requirements(\n            &public_params.vanilla_params,\n            requirements,\n            multi_proof.circuit_proofs.len(),\n        ) {\n            return Ok(false);\n        }\n\n        let inputs: Vec<_> = (0..multi_proof.circuit_proofs.len())\n            .into_par_iter()\n            .map(|k| Self::generate_public_inputs(public_inputs, vanilla_public_params, Some(k)))\n            .collect::<Result<_>>()?;\n        let proofs: Vec<_> = multi_proof.circuit_proofs.iter().collect();\n\n        let res = groth16::verify_proofs_batch(&pvk, &mut rand::rngs::OsRng, &proofs, &inputs)?;\n        Ok(res)\n    }\n\n    /// Efficiently verify multiple proofs.\n    fn batch_verify<'b>(\n        public_params: &PublicParams<'a, S>,\n        public_inputs: &[S::PublicInputs],\n        multi_proofs: &[MultiProof<'b>],\n        requirements: &S::Requirements,\n    ) -> Result<bool> {\n        ensure!(\n            public_inputs.len() == multi_proofs.len(),\n            \"Inconsistent inputs\"\n        );\n        for proof in multi_proofs {\n            ensure!(\n                proof.circuit_proofs.len() == Self::partition_count(public_params),\n                \"Inconsistent inputs\"\n            );\n        }\n        ensure!(!public_inputs.is_empty(), \"Cannot verify empty proofs\");\n\n        let vanilla_public_params = &public_params.vanilla_params;\n        // just use the first one, the must be equal any way\n        let pvk = groth16::prepare_batch_verifying_key(&multi_proofs[0].verifying_key);\n\n        for multi_proof in multi_proofs.iter() {\n            if !<S as ProofScheme>::satisfies_requirements(\n                &public_params.vanilla_params,\n                requirements,\n                multi_proof.circuit_proofs.len(),\n            ) {\n                return Ok(false);\n            }\n        }\n\n        let inputs: Vec<_> = multi_proofs\n            .par_iter()\n            .zip(public_inputs.par_iter())\n            .flat_map(|(multi_proof, pub_inputs)| {\n                (0..multi_proof.circuit_proofs.len())\n                    .into_par_iter()\n                    .map(|k| {\n                        Self::generate_public_inputs(pub_inputs, vanilla_public_params, Some(k))\n                    })\n                    .collect::<Result<Vec<_>>>()\n                    .expect(\"Invalid public inputs\") // TODO: improve error handling\n            })\n            .collect::<Vec<_>>();\n        let circuit_proofs: Vec<_> = multi_proofs\n            .iter()\n            .flat_map(|m| m.circuit_proofs.iter())\n            .collect();\n\n        let res = groth16::verify_proofs_batch(\n            &pvk,\n            &mut rand::rngs::OsRng,\n            &circuit_proofs[..],\n            &inputs,\n        )?;\n\n        Ok(res)\n    }\n\n    /// circuit_proof creates and synthesizes a circuit from concrete params/inputs, then generates a\n    /// groth proof from it. It returns a groth proof.\n    /// circuit_proof is used internally and should neither be called nor implemented outside of\n    /// default trait methods.\n    fn circuit_proofs(\n        pub_in: &S::PublicInputs,\n        vanilla_proof: Vec<S::Proof>,\n        pub_params: &S::PublicParams,\n        groth_params: &groth16::MappedParameters<Bls12>,\n        priority: bool,\n    ) -> Result<Vec<groth16::Proof<Bls12>>> {\n        let mut rng = OsRng;\n        ensure!(\n            !vanilla_proof.is_empty(),\n            \"cannot create a circuit proof over missing vanilla proofs\"\n        );\n\n        let circuits = vanilla_proof\n            .into_par_iter()\n            .enumerate()\n            .map(|(k, vanilla_proof)| {\n                Self::circuit(\n                    &pub_in,\n                    C::ComponentPrivateInputs::default(),\n                    &vanilla_proof,\n                    &pub_params,\n                    Some(k),\n                )\n            })\n            .collect::<Result<Vec<_>>>()?;\n\n        let groth_proofs = if priority {\n            groth16::create_random_proof_batch_in_priority(circuits, groth_params, &mut rng)?\n        } else {\n            groth16::create_random_proof_batch(circuits, groth_params, &mut rng)?\n        };\n\n        groth_proofs\n            .into_iter()\n            .map(|groth_proof| {\n                let mut proof_vec = vec![];\n                groth_proof.write(&mut proof_vec)?;\n                let gp = groth16::Proof::<Bls12>::read(&proof_vec[..])?;\n                Ok(gp)\n            })\n            .collect()\n    }\n\n    /// generate_public_inputs generates public inputs suitable for use as input during verification\n    /// of a proof generated from this CompoundProof's bellperson::Circuit (C). These inputs correspond\n    /// to those allocated when C is synthesized.\n    fn generate_public_inputs(\n        pub_in: &S::PublicInputs,\n        pub_params: &S::PublicParams,\n        partition_k: Option<usize>,\n    ) -> Result<Vec<Fr>>;\n\n    /// circuit constructs an instance of this CompoundProof's bellperson::Circuit.\n    /// circuit takes PublicInputs, PublicParams, and Proof from this CompoundProof's proof::ProofScheme (S)\n    /// and uses them to initialize Circuit fields which will be used to construct public and private\n    /// inputs during circuit synthesis.\n    fn circuit(\n        public_inputs: &S::PublicInputs,\n        component_private_inputs: C::ComponentPrivateInputs,\n        vanilla_proof: &S::Proof,\n        public_param: &S::PublicParams,\n        partition_k: Option<usize>,\n    ) -> Result<C>;\n\n    fn blank_circuit(public_params: &S::PublicParams) -> C;\n\n    /// If the rng option argument is set, parameters will be\n    /// generated using it.  This is used for testing only, or where\n    /// parameters are otherwise unavailable (e.g. benches).  If rng\n    /// is not set, an error will result if parameters are not\n    /// present.\n    fn groth_params<R: RngCore>(\n        rng: Option<&mut R>,\n        public_params: &S::PublicParams,\n    ) -> Result<groth16::MappedParameters<Bls12>> {\n        Self::get_groth_params(rng, Self::blank_circuit(public_params), public_params)\n    }\n\n    /// If the rng option argument is set, parameters will be\n    /// generated using it.  This is used for testing only, or where\n    /// parameters are otherwise unavailable (e.g. benches).  If rng\n    /// is not set, an error will result if parameters are not\n    /// present.\n    fn verifying_key<R: RngCore>(\n        rng: Option<&mut R>,\n        public_params: &S::PublicParams,\n    ) -> Result<groth16::VerifyingKey<Bls12>> {\n        Self::get_verifying_key(rng, Self::blank_circuit(public_params), public_params)\n    }\n\n    fn circuit_for_test(\n        public_parameters: &PublicParams<'a, S>,\n        public_inputs: &S::PublicInputs,\n        private_inputs: &S::PrivateInputs,\n    ) -> Result<(C, Vec<Fr>)> {\n        let vanilla_params = &public_parameters.vanilla_params;\n        let partition_count = partitions::partition_count(public_parameters.partitions);\n        let vanilla_proofs = S::prove_all_partitions(\n            vanilla_params,\n            public_inputs,\n            private_inputs,\n            partition_count,\n        )\n        .context(\"failed to generate partition proofs\")?;\n\n        ensure!(\n            vanilla_proofs.len() == partition_count,\n            \"Vanilla proofs didn't match number of partitions.\"\n        );\n\n        let partitions_are_verified =\n            S::verify_all_partitions(vanilla_params, &public_inputs, &vanilla_proofs)\n                .context(\"failed to verify partition proofs\")?;\n\n        ensure!(partitions_are_verified, \"Vanilla proof didn't verify.\");\n\n        // Some(0) because we only return a circuit and inputs for the first partition.\n        // It would be more thorough to return all, though just checking one is probably\n        // fine for verifying circuit construction.\n        let partition_pub_in = S::with_partition(public_inputs.clone(), Some(0));\n        let inputs = Self::generate_public_inputs(&partition_pub_in, vanilla_params, Some(0))?;\n\n        let circuit = Self::circuit(\n            &partition_pub_in,\n            C::ComponentPrivateInputs::default(),\n            &vanilla_proofs[0],\n            vanilla_params,\n            Some(0),\n        )?;\n\n        Ok((circuit, inputs))\n    }\n\n    /// Like circuit_for_test but returns values for all partitions.\n    fn circuit_for_test_all(\n        public_parameters: &PublicParams<'a, S>,\n        public_inputs: &S::PublicInputs,\n        private_inputs: &S::PrivateInputs,\n    ) -> Result<Vec<(C, Vec<Fr>)>> {\n        let vanilla_params = &public_parameters.vanilla_params;\n        let partition_count = partitions::partition_count(public_parameters.partitions);\n        let vanilla_proofs = S::prove_all_partitions(\n            vanilla_params,\n            public_inputs,\n            private_inputs,\n            partition_count,\n        )\n        .context(\"failed to generate partition proofs\")?;\n\n        ensure!(\n            vanilla_proofs.len() == partition_count,\n            \"Vanilla proofs didn't match number of partitions.\"\n        );\n\n        let partitions_are_verified =\n            S::verify_all_partitions(vanilla_params, &public_inputs, &vanilla_proofs)\n                .context(\"failed to verify partition proofs\")?;\n\n        ensure!(partitions_are_verified, \"Vanilla proof didn't verify.\");\n\n        let mut res = Vec::with_capacity(partition_count);\n        for (partition, vanilla_proof) in vanilla_proofs.iter().enumerate() {\n            let partition_pub_in = S::with_partition(public_inputs.clone(), Some(partition));\n            let inputs =\n                Self::generate_public_inputs(&partition_pub_in, vanilla_params, Some(partition))?;\n\n            let circuit = Self::circuit(\n                &partition_pub_in,\n                C::ComponentPrivateInputs::default(),\n                vanilla_proof,\n                vanilla_params,\n                Some(partition),\n            )?;\n            res.push((circuit, inputs));\n        }\n        Ok(res)\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/crypto/aes.rs",
    "content": "use aes::Aes256;\nuse anyhow::{ensure, Context};\nuse block_modes::block_padding::ZeroPadding;\nuse block_modes::{BlockMode, Cbc};\n\nuse crate::error::Result;\n\nconst IV: [u8; 16] = [0u8; 16];\n\npub fn encode(key: &[u8], plaintext: &[u8]) -> Result<Vec<u8>> {\n    ensure!(key.len() == 32, \"invalid key length\");\n\n    let mode = Cbc::<Aes256, ZeroPadding>::new_var(key, &IV).context(\"invalid key\")?;\n\n    Ok(mode.encrypt_vec(plaintext))\n}\n\npub fn decode(key: &[u8], ciphertext: &[u8]) -> Result<Vec<u8>> {\n    ensure!(key.len() == 32, \"invalid key length\");\n\n    let mode = Cbc::<Aes256, ZeroPadding>::new_var(key, &IV).context(\"invalid key\")?;\n\n    let res = mode.decrypt_vec(ciphertext).context(\"failed to decrypt\")?;\n    Ok(res)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    #[test]\n    fn test_aes() {\n        let mut rng = XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for i in 0..10 {\n            let key: Vec<u8> = (0..32).map(|_| rng.gen()).collect();\n            let plaintext: Vec<u8> = (0..(i + 1) * 32).map(|_| rng.gen()).collect();\n\n            let ciphertext = encode(key.as_slice(), plaintext.as_slice()).unwrap();\n\n            assert_ne!(\n                plaintext, ciphertext,\n                \"plaintext and ciphertext are identical\"\n            );\n            assert_eq!(plaintext.len(), ciphertext.len());\n\n            let roundtrip = decode(key.as_slice(), ciphertext.as_slice()).unwrap();\n            assert_eq!(plaintext, roundtrip, \"failed to roundtrip\");\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/crypto/feistel.rs",
    "content": "use blake2b_simd::blake2b;\nuse std::mem;\n\npub const FEISTEL_ROUNDS: usize = 3;\n// 3 rounds is an acceptable value for a pseudo-random permutation,\n// see https://github.com/filecoin-project/rust-proofs/issues/425\n// (and also https://en.wikipedia.org/wiki/Feistel_cipher#Theoretical_work).\n\npub type Index = u64;\n\npub type FeistelPrecomputed = (Index, Index, Index);\n\n// Find the minimum number of even bits to represent `num_elements`\n// within a `u32` maximum. Returns the left and right masks evenly\n// distributed that together add up to that minimum number of bits.\npub fn precompute(num_elements: Index) -> FeistelPrecomputed {\n    let mut next_pow4: Index = 4;\n    let mut log4 = 1;\n    while next_pow4 < num_elements {\n        next_pow4 *= 4;\n        log4 += 1;\n    }\n\n    let left_mask = ((1 << log4) - 1) << log4;\n    let right_mask = (1 << log4) - 1;\n    let half_bits = log4;\n\n    (left_mask, right_mask, half_bits)\n}\n\n// Pseudo-randomly shuffle an input from a starting position to another\n// one within the `[0, num_elements)` range using a `key` that will allow\n// the reverse operation to take place.\npub fn permute(\n    num_elements: Index,\n    index: Index,\n    keys: &[Index],\n    precomputed: FeistelPrecomputed,\n) -> Index {\n    let mut u = encode(index, keys, precomputed);\n\n    while u >= num_elements {\n        u = encode(u, keys, precomputed)\n    }\n    // Since we are representing `num_elements` using an even number of bits,\n    // that can encode many values above it, so keep repeating the operation\n    // until we land in the permitted range.\n\n    u\n}\n\n// Inverts the `permute` result to its starting value for the same `key`.\npub fn invert_permute(\n    num_elements: Index,\n    index: Index,\n    keys: &[Index],\n    precomputed: FeistelPrecomputed,\n) -> Index {\n    let mut u = decode(index, keys, precomputed);\n\n    while u >= num_elements {\n        u = decode(u, keys, precomputed);\n    }\n    u\n}\n\n/// common_setup performs common calculations on inputs shared by encode and decode.\n/// Decompress the `precomputed` part of the algorithm into the initial `left` and\n/// `right` pieces `(L_0, R_0)` with the `right_mask` and `half_bits` to manipulate\n/// them.\nfn common_setup(index: Index, precomputed: FeistelPrecomputed) -> (Index, Index, Index, Index) {\n    let (left_mask, right_mask, half_bits) = precomputed;\n\n    let left = (index & left_mask) >> half_bits;\n    let right = index & right_mask;\n\n    (left, right, right_mask, half_bits)\n}\n\nfn encode(index: Index, keys: &[Index], precomputed: FeistelPrecomputed) -> Index {\n    let (mut left, mut right, right_mask, half_bits) = common_setup(index, precomputed);\n\n    for key in keys.iter().take(FEISTEL_ROUNDS) {\n        let (l, r) = (right, left ^ feistel(right, *key, right_mask));\n        left = l;\n        right = r;\n    }\n\n    (left << half_bits) | right\n}\n\nfn decode(index: Index, keys: &[Index], precomputed: FeistelPrecomputed) -> Index {\n    let (mut left, mut right, right_mask, half_bits) = common_setup(index, precomputed);\n\n    for i in (0..FEISTEL_ROUNDS).rev() {\n        let (l, r) = ((right ^ feistel(left, keys[i], right_mask)), left);\n        left = l;\n        right = r;\n    }\n\n    (left << half_bits) | right\n}\n\nconst HALF_FEISTEL_BYTES: usize = mem::size_of::<Index>();\nconst FEISTEL_BYTES: usize = 2 * HALF_FEISTEL_BYTES;\n\n// Round function of the Feistel network: `F(Ri, Ki)`. Joins the `right`\n// piece and the `key`, hashes it and returns the lower `u32` part of\n// the hash filtered trough the `right_mask`.\nfn feistel(right: Index, key: Index, right_mask: Index) -> Index {\n    let mut data: [u8; FEISTEL_BYTES] = [0; FEISTEL_BYTES];\n\n    // So ugly, but the price of (relative) speed.\n    let r = if FEISTEL_BYTES <= 8 {\n        data[0] = (right >> 24) as u8;\n        data[1] = (right >> 16) as u8;\n        data[2] = (right >> 8) as u8;\n        data[3] = right as u8;\n\n        data[4] = (key >> 24) as u8;\n        data[5] = (key >> 16) as u8;\n        data[6] = (key >> 8) as u8;\n        data[7] = key as u8;\n\n        let raw = blake2b(&data);\n        let hash = raw.as_bytes();\n\n        Index::from(hash[0]) << 24\n            | Index::from(hash[1]) << 16\n            | Index::from(hash[2]) << 8\n            | Index::from(hash[3])\n    } else {\n        data[0] = (right >> 56) as u8;\n        data[1] = (right >> 48) as u8;\n        data[2] = (right >> 40) as u8;\n        data[3] = (right >> 32) as u8;\n        data[4] = (right >> 24) as u8;\n        data[5] = (right >> 16) as u8;\n        data[6] = (right >> 8) as u8;\n        data[7] = right as u8;\n\n        data[8] = (key >> 56) as u8;\n        data[9] = (key >> 48) as u8;\n        data[10] = (key >> 40) as u8;\n        data[11] = (key >> 32) as u8;\n        data[12] = (key >> 24) as u8;\n        data[13] = (key >> 16) as u8;\n        data[14] = (key >> 8) as u8;\n        data[15] = key as u8;\n\n        let raw = blake2b(&data);\n        let hash = raw.as_bytes();\n\n        Index::from(hash[0]) << 56\n            | Index::from(hash[1]) << 48\n            | Index::from(hash[2]) << 40\n            | Index::from(hash[3]) << 32\n            | Index::from(hash[4]) << 24\n            | Index::from(hash[5]) << 16\n            | Index::from(hash[6]) << 8\n            | Index::from(hash[7])\n    };\n\n    r & right_mask\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    // Some sample n-values which are not powers of four and also don't coincidentally happen to\n    // encode/decode correctly.\n    const BAD_NS: &[Index] = &[5, 6, 8, 12, 17]; //\n                                                 //\n    fn encode_decode(n: Index, expect_success: bool) {\n        let mut failed = false;\n        let precomputed = precompute(n);\n        for i in 0..n {\n            let p = encode(i, &[1, 2, 3, 4], precomputed);\n            let v = decode(p, &[1, 2, 3, 4], precomputed);\n            let equal = i == v;\n            let in_range = p <= n;\n            if expect_success {\n                assert!(equal, \"failed to permute (n = {})\", n);\n                assert!(in_range, \"output number is too big (n = {})\", n);\n            } else {\n                if !equal || !in_range {\n                    failed = true;\n                }\n            }\n        }\n        if !expect_success {\n            assert!(failed, \"expected failure (n = {})\", n);\n        }\n    }\n\n    #[test]\n    fn test_feistel_power_of_4() {\n        // Our implementation is guaranteed to produce a permutation when input size (number of elements)\n        // is a power of our.\n        let mut n = 1;\n\n        // Powers of 4 always succeed.\n        for _ in 0..4 {\n            n *= 4;\n            encode_decode(n, true);\n        }\n\n        // Some non-power-of 4 also succeed, but here is a selection of examples values showing\n        // that this is not guaranteed.\n        for i in BAD_NS.iter() {\n            encode_decode(*i, false);\n        }\n    }\n\n    #[test]\n    fn test_feistel_on_arbitrary_set() {\n        for n in BAD_NS.iter() {\n            let precomputed = precompute(*n as Index);\n            for i in 0..*n {\n                let p = permute(*n, i, &[1, 2, 3, 4], precomputed);\n                let v = invert_permute(*n, p, &[1, 2, 3, 4], precomputed);\n                // Since every element in the set is reversibly mapped to another element also in the set,\n                // this is indeed a permutation.\n                assert_eq!(i, v, \"failed to permute\");\n                assert!(p <= *n, \"output number is too big\");\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/crypto/mod.rs",
    "content": "pub mod aes;\npub mod feistel;\npub mod pedersen;\npub mod sloth;\npub mod xor;\n"
  },
  {
    "path": "storage-proofs/core/src/crypto/pedersen.rs",
    "content": "use anyhow::{ensure, Context};\nuse ff::PrimeFieldRepr;\nuse fil_sapling_crypto::jubjub::JubjubBls12;\nuse fil_sapling_crypto::pedersen_hash::Personalization;\nuse lazy_static::lazy_static;\nuse paired::bls12_381::{Bls12, Fr, FrRepr};\n\nuse crate::error::Result;\nuse crate::fr32::bytes_into_frs;\nuse crate::settings;\n\nlazy_static! {\n    pub static ref JJ_PARAMS: JubjubBls12 = JubjubBls12::new_with_window_size(\n        settings::SETTINGS\n            .lock()\n            .unwrap()\n            .pedersen_hash_exp_window_size\n    );\n}\n\npub const PEDERSEN_BLOCK_SIZE: usize = 256;\npub const PEDERSEN_BLOCK_BYTES: usize = PEDERSEN_BLOCK_SIZE / 8;\n\npub fn pedersen(data: &[u8]) -> Fr {\n    pedersen_bits(Bits::new(data))\n}\n\npub fn pedersen_bits<'a, S: Iterator<Item = &'a [u8]>>(data: Bits<&'a [u8], S>) -> Fr {\n    let digest = if cfg!(target_arch = \"x86_64\") {\n        use fil_sapling_crypto::pedersen_hash::pedersen_hash_bls12_381_with_precomp;\n        pedersen_hash_bls12_381_with_precomp::<_>(Personalization::None, data, &JJ_PARAMS)\n    } else {\n        use fil_sapling_crypto::pedersen_hash::pedersen_hash;\n        pedersen_hash::<Bls12, _>(Personalization::None, data, &JJ_PARAMS)\n    };\n\n    digest.into_xy().0\n}\n\n/// Pedersen hashing for inputs that have length mulitple of the block size `256`. Based on pedersen hashes and a Merkle-Damgard construction.\npub fn pedersen_md_no_padding(data: &[u8]) -> Fr {\n    pedersen_md_no_padding_bits(Bits::new(data))\n}\n\npub fn pedersen_md_no_padding_bits<T: AsRef<[u8]>, S: Iterator<Item = T>>(\n    mut data: Bits<T, S>,\n) -> Fr {\n    let mut cur = Vec::with_capacity(PEDERSEN_BLOCK_SIZE);\n\n    // hash the first two blocks\n    let first = pedersen_compression_bits(data.ref_take(2 * PEDERSEN_BLOCK_SIZE));\n    first\n        .write_le(&mut cur)\n        .expect(\"failed to write result hash\");\n\n    while !data.is_done() {\n        let r = data.ref_take(PEDERSEN_BLOCK_SIZE);\n        let x = pedersen_compression_bits(Bits::new(&cur).chain(r));\n\n        cur.truncate(0);\n        x.write_le(&mut cur).expect(\"failed to write result hash\");\n    }\n\n    let frs = bytes_into_frs(&cur).expect(\"pedersen must generate valid fr elements\");\n    assert_eq!(frs.len(), 1);\n    frs[0]\n}\n\nfn pedersen_compression_bits<T>(bits: T) -> FrRepr\nwhere\n    T: IntoIterator<Item = bool>,\n{\n    let digest = if cfg!(target_arch = \"x86_64\") {\n        use fil_sapling_crypto::pedersen_hash::pedersen_hash_bls12_381_with_precomp;\n        pedersen_hash_bls12_381_with_precomp::<_>(Personalization::None, bits, &JJ_PARAMS)\n    } else {\n        use fil_sapling_crypto::pedersen_hash::pedersen_hash;\n        pedersen_hash::<Bls12, _>(Personalization::None, bits, &JJ_PARAMS)\n    };\n\n    digest.into_xy().0.into()\n}\n\n#[derive(Debug, Clone)]\npub struct Hasher {\n    curr: Option<[u8; 32]>,\n}\n\nimpl Hasher {\n    pub fn new(data: &[u8]) -> Result<Self> {\n        ensure!(data.len() == 32, \"Data must be 32 bytes.\");\n        let mut curr = [0u8; 32];\n        curr.copy_from_slice(data);\n\n        Ok(Hasher { curr: Some(curr) })\n    }\n\n    pub fn new_empty() -> Self {\n        Hasher { curr: None }\n    }\n\n    pub fn update(&mut self, data: &[u8]) -> Result<()> {\n        ensure!(data.len() == 32, \"Data must be 32 bytes.\");\n\n        if let Some(ref mut curr) = self.curr {\n            let source = [curr, data];\n            let data = Bits::new_many(source.iter());\n            let x = pedersen_compression_bits(data);\n            x.write_le(std::io::Cursor::new(&mut curr[..]))\n                .context(\"failed to write result\")?;\n        } else {\n            let data = Bits::new(data);\n            let x = pedersen_compression_bits(data);\n            let mut curr = [0u8; 32];\n            x.write_le(std::io::Cursor::new(&mut curr[..]))\n                .context(\"failed to write result\")?;\n            self.curr = Some(curr);\n        }\n\n        Ok(())\n    }\n\n    pub fn finalize_bytes(self) -> [u8; 32] {\n        let Hasher { curr } = self;\n        curr.expect(\"missed init\")\n    }\n\n    pub fn finalize(self) -> Result<Fr> {\n        let frs = bytes_into_frs(&self.curr.expect(\"missed init\"))\n            .context(\"pedersen must generate valid fr elements\")?;\n        ensure!(frs.len() == 1, \"There must be a single fr element.\");\n        Ok(frs[0])\n    }\n}\n\n/// Creates an iterator over the byte slices in little endian format.\n#[derive(Debug, Clone)]\npub struct Bits<K: AsRef<[u8]>, S: Iterator<Item = K>> {\n    /// The individual parts that make up the data that is being iterated over.\n    parts: ManyOrSingle<K, S>,\n    /// How many bytes we are into the `current_part`\n    position_byte: usize,\n    /// How many bits we are into the `current_byte`.\n    position_bit: u8,\n    /// The current part we are reading from.\n    current_part: Option<K>,\n    /// Track the first iteration.\n    first: bool,\n    /// Are we done yet?\n    done: bool,\n}\n\n/// Abstraction over either an iterator or a single element.\n#[derive(Debug, Clone)]\nenum ManyOrSingle<T, S = <Vec<T> as IntoIterator>::IntoIter>\nwhere\n    S: Iterator<Item = T>,\n{\n    Many(S),\n    Single(Option<T>),\n}\n\nimpl<T: AsRef<[u8]>> Bits<T, <Vec<T> as IntoIterator>::IntoIter> {\n    pub fn new(parts: T) -> Self {\n        Bits {\n            parts: ManyOrSingle::<T, <Vec<T> as IntoIterator>::IntoIter>::Single(Some(parts)),\n            position_byte: 0,\n            position_bit: 0,\n            current_part: None,\n            first: true,\n            done: false,\n        }\n    }\n}\n\nimpl<T: AsRef<[u8]>, S: Iterator<Item = T>> Bits<T, S> {\n    pub fn new_many(parts: S) -> Self {\n        Bits {\n            parts: ManyOrSingle::Many(parts),\n            position_byte: 0,\n            position_bit: 0,\n            current_part: None,\n            first: true,\n            done: false,\n        }\n    }\n\n    pub fn is_done(&self) -> bool {\n        self.done\n    }\n\n    fn inc_part(&mut self) {\n        self.current_part = match self.parts {\n            ManyOrSingle::Many(ref mut parts) => {\n                if self.first {\n                    self.first = false;\n                }\n                parts.next()\n            }\n            ManyOrSingle::Single(ref mut part) => {\n                if self.first {\n                    self.first = false;\n                    part.take()\n                } else {\n                    None\n                }\n            }\n        }\n    }\n\n    /// Increments the inner positions by 1 bit.\n    fn inc(&mut self) {\n        if self.position_bit < 7 {\n            self.position_bit += 1;\n            return;\n        }\n\n        self.position_bit = 0;\n        if let Some(ref part) = self.current_part {\n            if self.position_byte + 1 < part.as_ref().len() {\n                self.position_byte += 1;\n                return;\n            }\n        }\n\n        self.inc_part();\n        self.position_byte = 0;\n        self.done = self.current_part.is_none();\n    }\n\n    fn ref_take(&mut self, take: usize) -> BitsTake<'_, T, S> {\n        BitsTake::new(self, take)\n    }\n}\n\n#[derive(Debug)]\nstruct BitsTake<'a, T: AsRef<[u8]>, S: Iterator<Item = T>> {\n    iter: &'a mut Bits<T, S>,\n    take: usize,\n}\n\nimpl<'a, T: AsRef<[u8]>, S: Iterator<Item = T>> BitsTake<'a, T, S> {\n    pub fn new(iter: &'a mut Bits<T, S>, take: usize) -> Self {\n        BitsTake { iter, take }\n    }\n}\n\nimpl<'a, T: AsRef<[u8]>, S: Iterator<Item = T> + std::iter::FusedIterator> std::iter::FusedIterator\n    for BitsTake<'a, T, S>\n{\n}\n\nimpl<'a, T: AsRef<[u8]>, S: Iterator<Item = T>> Iterator for BitsTake<'a, T, S> {\n    type Item = bool;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        if self.take == 0 {\n            return None;\n        }\n\n        self.take -= 1;\n        self.iter.next()\n    }\n}\n\nimpl<T: AsRef<[u8]>, S: Iterator<Item = T> + std::iter::FusedIterator> std::iter::FusedIterator\n    for Bits<T, S>\n{\n}\n\nimpl<T: AsRef<[u8]>, S: Iterator<Item = T>> Iterator for Bits<T, S> {\n    type Item = bool;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        if self.done {\n            return None;\n        }\n\n        if self.first {\n            // first time\n            self.inc_part();\n        }\n\n        let byte = match self.current_part {\n            Some(ref part) => part.as_ref()[self.position_byte],\n            None => {\n                self.done = true;\n                return None;\n            }\n        };\n\n        let res = (byte >> self.position_bit) & 1u8 == 1u8;\n        self.inc();\n\n        Some(res)\n    }\n\n    // optimized nth method so we can use it to skip forward easily\n    fn nth(&mut self, n: usize) -> Option<Self::Item> {\n        for _ in 0..n {\n            // TODO: implement optimized inc for n bits.\n            self.inc();\n        }\n        self.next()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::util::bytes_into_bits;\n    use bitvec::{bitvec, order::Lsb0};\n    use ff::Field;\n    use paired::bls12_381::Fr;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    #[test]\n    fn test_bit_vec_le() {\n        let bytes = b\"ABC\";\n        let bits = bytes_into_bits(bytes);\n\n        let mut bits2 = bitvec![Lsb0, u8; 0; bits.len()];\n        bits2.as_mut_slice()[0..bytes.len()].copy_from_slice(&bytes[..]);\n\n        assert_eq!(bits, bits2.iter().copied().collect::<Vec<bool>>());\n    }\n\n    #[test]\n    fn test_pedersen_compression() {\n        let bytes = Bits::new(b\"some bytes\");\n\n        let x = pedersen_compression_bits(bytes);\n        let mut data = Vec::new();\n        x.write_le(&mut data).unwrap();\n\n        let expected = vec![\n            237, 70, 41, 231, 39, 180, 131, 120, 36, 36, 119, 199, 200, 225, 153, 242, 106, 116,\n            70, 9, 12, 249, 169, 84, 105, 38, 225, 115, 165, 188, 98, 25,\n        ];\n        assert_eq!(expected, data);\n    }\n\n    #[test]\n    fn test_pedersen_md_no_padding() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for i in 2..5 {\n            let x: Vec<u8> = (0..i * 32).map(|_| rng.gen()).collect();\n            let hashed = pedersen_md_no_padding(x.as_slice());\n            assert_ne!(hashed, Fr::zero());\n        }\n    }\n\n    #[test]\n    fn test_bits_collect() {\n        let bytes = b\"hello\";\n        let bits = bytes_into_bits(bytes);\n\n        let bits_iter = Bits::new(bytes);\n        let bits_iter_collected: Vec<bool> = bits_iter.collect();\n\n        assert_eq!(bits, bits_iter_collected);\n\n        let bytes = b\"hello world these are some bytes\";\n        let bits = bytes_into_bits(bytes);\n\n        let parts: Vec<&[u8]> = vec![b\"hello \", b\"world\", b\" these are some bytes\"];\n        let bits_iter = Bits::new_many(parts.into_iter());\n\n        let bits_iter_collected: Vec<bool> = bits_iter.collect();\n\n        assert_eq!(bits, bits_iter_collected);\n    }\n\n    #[test]\n    fn test_bits_take() {\n        let bytes = b\"hello world these are some bytes\";\n        let bits = bytes_into_bits(bytes);\n\n        let parts: Vec<&[u8]> = vec![b\"hello \", b\"world\", b\" these are some bytes\"];\n        let mut bits_iter = Bits::new_many(parts.into_iter());\n\n        let bits_collected: Vec<bool> = vec![\n            bits_iter.ref_take(8).collect::<Vec<bool>>(),\n            bits_iter.ref_take(8).collect::<Vec<bool>>(),\n            bits_iter.ref_take(bits.len() - 16).collect::<Vec<bool>>(),\n        ]\n        .into_iter()\n        .flatten()\n        .collect();\n\n        assert_eq!(bits, bits_collected);\n    }\n\n    #[test]\n    fn test_pedersen_hasher_update() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for _ in 2..5 {\n            let x: Vec<Vec<u8>> = (0..5)\n                .map(|_| (0..32).map(|_| rng.gen()).collect())\n                .collect();\n            let flat: Vec<u8> = x.iter().flatten().copied().collect();\n            let hashed = pedersen_md_no_padding(&flat);\n\n            let mut hasher = Hasher::new(&x[0]).unwrap();\n            for k in 1..5 {\n                hasher.update(&x[k]).unwrap();\n            }\n\n            let hasher_final = hasher.finalize().unwrap();\n\n            assert_eq!(hashed, hasher_final);\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/crypto/sloth.rs",
    "content": "use ff::Field;\nuse paired::bls12_381::Fr;\n\n/// Sloth based encoding.\n#[inline]\npub fn encode(key: &Fr, plaintext: &Fr) -> Fr {\n    let mut ciphertext = *plaintext;\n\n    ciphertext.add_assign(key); // c + k\n    ciphertext\n}\n\n/// Sloth based decoding.\n#[inline]\npub fn decode(key: &Fr, ciphertext: &Fr) -> Fr {\n    let mut plaintext = *ciphertext;\n\n    plaintext.sub_assign(key); // c - k\n\n    plaintext\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use ff::PrimeField;\n    use paired::bls12_381::{Fr, FrRepr};\n    use proptest::{prop_compose, proptest, proptest_helper};\n\n    // the modulus from `bls12_381::Fr`\n    // The definition of MODULUS and comment defining r come from paired/src/bls_12_381/fr.rs.\n    // r = 52435875175126190479447740508185965837690552500527637822603658699938581184513\n    const MODULUS: [u64; 4] = [\n        0xffffffff00000001,\n        0x53bda402fffe5bfe,\n        0x3339d80809a1d805,\n        0x73eda753299d7d48,\n    ];\n\n    #[test]\n    fn sloth_bls_12() {\n        let key = Fr::from_str(\"11111111\").unwrap();\n        let plaintext = Fr::from_str(\"123456789\").unwrap();\n        let ciphertext = encode(&key, &plaintext);\n        let decrypted = decode(&key, &ciphertext);\n        assert_eq!(plaintext, decrypted);\n        assert_ne!(plaintext, ciphertext);\n    }\n\n    #[test]\n    fn sloth_bls_12_fake() {\n        let key = Fr::from_str(\"11111111\").unwrap();\n        let key_fake = Fr::from_str(\"11111112\").unwrap();\n        let plaintext = Fr::from_str(\"123456789\").unwrap();\n        let ciphertext = encode(&key, &plaintext);\n        let decrypted = decode(&key_fake, &ciphertext);\n        assert_ne!(plaintext, decrypted);\n    }\n\n    prop_compose! {\n        fn arb_fr()(a in 0..MODULUS[0], b in 0..MODULUS[1], c in 0..MODULUS[2], d in 0..MODULUS[3]) -> Fr {\n            Fr::from_repr(FrRepr([a, b, c, d])).unwrap()\n        }\n    }\n    proptest! {\n        #[test]\n        fn sloth_bls_roundtrip(key in arb_fr(), plaintext in arb_fr()) {\n            let ciphertext = encode(&key, &plaintext);\n            assert_eq!(decode(&key, &ciphertext), plaintext);\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/crypto/xor.rs",
    "content": "use crate::error::Result;\nuse anyhow::ensure;\n\n/// Encodes plaintext by elementwise xoring with the passed in key.\npub fn encode(key: &[u8], plaintext: &[u8]) -> Result<Vec<u8>> {\n    xor(key, plaintext)\n}\n\n/// Decodes ciphertext by elementwise xoring with the passed in key.\npub fn decode(key: &[u8], ciphertext: &[u8]) -> Result<Vec<u8>> {\n    xor(key, ciphertext)\n}\n\nfn xor(key: &[u8], input: &[u8]) -> Result<Vec<u8>> {\n    let key_len = key.len();\n    ensure!(key_len == 32, \"Key must be 32 bytes.\");\n\n    Ok(input\n        .iter()\n        .enumerate()\n        .map(|(i, byte)| byte ^ key[i % key_len])\n        .collect())\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    #[test]\n    fn test_xor() {\n        let mut rng = XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for i in 0..10 {\n            let key: Vec<u8> = (0..32).map(|_| rng.gen()).collect();\n            let plaintext: Vec<u8> = (0..(i + 1) * 32).map(|_| rng.gen()).collect();\n\n            let ciphertext = encode(key.as_slice(), plaintext.as_slice()).unwrap();\n\n            assert_ne!(\n                plaintext, ciphertext,\n                \"plaintext and ciphertext are identical\"\n            );\n            assert_eq!(plaintext.len(), ciphertext.len());\n\n            let roundtrip = decode(key.as_slice(), ciphertext.as_slice()).unwrap();\n            assert_eq!(plaintext, roundtrip, \"failed to roundtrip\");\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/data.rs",
    "content": "use std::path::PathBuf;\n\nuse anyhow::{ensure, Context, Result};\nuse log::info;\n\n/// A wrapper around data either on disk or a slice in memory, that can be dropped and read back into memory,\n/// to allow for better control of memory consumption.\n#[derive(Debug)]\npub struct Data<'a> {\n    raw: Option<RawData<'a>>,\n    path: Option<PathBuf>,\n    len: usize,\n}\n\n#[derive(Debug)]\nenum RawData<'a> {\n    Slice(&'a mut [u8]),\n    Mmap(memmap::MmapMut),\n}\n\nuse std::ops::{Deref, DerefMut};\n\nimpl<'a> Deref for RawData<'a> {\n    type Target = [u8];\n\n    fn deref(&self) -> &Self::Target {\n        match self {\n            RawData::Slice(ref raw) => raw,\n            RawData::Mmap(ref raw) => raw,\n        }\n    }\n}\n\nimpl<'a> DerefMut for RawData<'a> {\n    fn deref_mut(&mut self) -> &mut Self::Target {\n        match self {\n            RawData::Slice(ref mut raw) => raw,\n            RawData::Mmap(ref mut raw) => raw,\n        }\n    }\n}\n\nimpl<'a> From<&'a mut [u8]> for Data<'a> {\n    fn from(raw: &'a mut [u8]) -> Self {\n        let len = raw.len();\n        Data {\n            raw: Some(RawData::Slice(raw)),\n            path: None,\n            len,\n        }\n    }\n}\n\nimpl<'a> From<(memmap::MmapMut, PathBuf)> for Data<'a> {\n    fn from(raw: (memmap::MmapMut, PathBuf)) -> Self {\n        let len = raw.0.len();\n        Data {\n            raw: Some(RawData::Mmap(raw.0)),\n            path: Some(raw.1),\n            len,\n        }\n    }\n}\n\nimpl<'a> AsRef<[u8]> for Data<'a> {\n    fn as_ref(&self) -> &[u8] {\n        match self.raw {\n            Some(ref raw) => raw,\n            None => panic!(\"figure it out\"),\n        }\n    }\n}\n\nimpl<'a> AsMut<[u8]> for Data<'a> {\n    fn as_mut(&mut self) -> &mut [u8] {\n        match self.raw {\n            Some(ref mut raw) => raw,\n            None => panic!(\"figure it out\"),\n        }\n    }\n}\n\nimpl<'a> Data<'a> {\n    pub fn from_path(path: PathBuf) -> Self {\n        Data {\n            raw: None,\n            path: Some(path),\n            len: 0,\n        }\n    }\n\n    pub fn new(raw: &'a mut [u8], path: PathBuf) -> Self {\n        let len = raw.len();\n\n        Data {\n            raw: Some(RawData::Slice(raw)),\n            path: Some(path),\n            len,\n        }\n    }\n\n    pub fn len(&self) -> usize {\n        self.len\n    }\n\n    pub fn is_empty(&self) -> bool {\n        self.len == 0\n    }\n\n    /// Recover the data.\n    pub fn ensure_data(&mut self) -> Result<()> {\n        match self.raw {\n            Some(..) => {}\n            None => {\n                ensure!(self.path.is_some(), \"Missing path\");\n                let path = self.path.as_ref().unwrap();\n\n                info!(\"restoring {}\", path.display());\n\n                let f_data = std::fs::OpenOptions::new()\n                    .read(true)\n                    .write(true)\n                    .open(path)\n                    .with_context(|| format!(\"could not open path={:?}\", path))?;\n                let data = unsafe {\n                    memmap::MmapOptions::new()\n                        .map_mut(&f_data)\n                        .with_context(|| format!(\"could not mmap path={:?}\", path))?\n                };\n\n                self.len = data.len();\n                self.raw = Some(RawData::Mmap(data));\n            }\n        }\n\n        Ok(())\n    }\n\n    /// Drops the actual data, if we can recover it.\n    pub fn drop_data(&mut self) {\n        if let Some(ref p) = self.path {\n            info!(\"dropping data {}\", p.display());\n            self.raw.take();\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/drgraph.rs",
    "content": "use std::cmp::{max, min};\nuse std::marker::PhantomData;\n\nuse anyhow::ensure;\nuse generic_array::typenum;\nuse rand::{rngs::OsRng, Rng, SeedableRng};\nuse rand_chacha::ChaCha8Rng;\nuse sha2::{Digest, Sha256};\n\nuse crate::error::*;\nuse crate::fr32::bytes_into_fr_repr_safe;\nuse crate::hasher::{Hasher, PoseidonArity};\nuse crate::parameter_cache::ParameterSetMetadata;\nuse crate::util::{data_at_node_offset, NODE_SIZE};\n\npub const PARALLEL_MERKLE: bool = true;\n\n/// The base degree used for all DRG graphs. One degree from this value is used to ensure that a\n/// given node always has its immediate predecessor as a parent, thus ensuring unique topological\n/// ordering of the graph nodes.\npub const BASE_DEGREE: usize = 6;\n\n/// A depth robust graph.\npub trait Graph<H: Hasher>: ::std::fmt::Debug + Clone + PartialEq + Eq {\n    type Key: std::fmt::Debug;\n\n    /// Returns the expected size of all nodes in the graph.\n    fn expected_size(&self) -> usize {\n        self.size() * NODE_SIZE\n    }\n\n    /// Returns the merkle tree depth.\n    fn merkle_tree_depth<U: 'static + PoseidonArity>(&self) -> u64 {\n        graph_height::<U>(self.size()) as u64\n    }\n\n    /// Returns a sorted list of all parents of this node. The parents may be repeated.\n    ///\n    /// If a node doesn't have any parents, then this vector needs to return a vector where\n    /// the first element is the requested node. This will be used as indicator for nodes\n    /// without parents.\n    ///\n    /// The `parents` parameter is used to store the result. This is done fore performance\n    /// reasons, so that the vector can be allocated outside this call.\n    fn parents(&self, node: usize, parents: &mut [u32]) -> Result<()>;\n\n    /// Returns the size of the graph (number of nodes).\n    fn size(&self) -> usize;\n\n    /// Returns the number of parents of each node in the graph.\n    fn degree(&self) -> usize;\n\n    fn new(\n        nodes: usize,\n        base_degree: usize,\n        expansion_degree: usize,\n        seed: [u8; 28],\n    ) -> Result<Self>;\n    fn seed(&self) -> [u8; 28];\n\n    /// Creates the encoding key.\n    /// The algorithm for that is `Sha256(id | encodedParentNode1 | encodedParentNode1 | ...)`.\n    fn create_key(\n        &self,\n        id: &H::Domain,\n        node: usize,\n        parents: &[u32],\n        parents_data: &[u8],\n        exp_parents_data: Option<&[u8]>,\n    ) -> Result<Self::Key>;\n}\n\npub fn graph_height<U: typenum::Unsigned>(number_of_leafs: usize) -> usize {\n    merkletree::merkle::get_merkle_tree_height(number_of_leafs, U::to_usize())\n}\n\n/// Bucket sampling algorithm.\n#[derive(Clone, Debug, PartialEq, Eq, Copy)]\npub struct BucketGraph<H: Hasher> {\n    nodes: usize,\n    base_degree: usize,\n    seed: [u8; 28],\n    _h: PhantomData<H>,\n}\n\nimpl<H: Hasher> ParameterSetMetadata for BucketGraph<H> {\n    fn identifier(&self) -> String {\n        // NOTE: Seed is not included because it does not influence parameter generation.\n        format!(\n            \"drgraph::BucketGraph{{size: {}; degree: {}; hasher: {}}}\",\n            self.nodes,\n            self.degree(),\n            H::name(),\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        (self.nodes * NODE_SIZE) as u64\n    }\n}\n\nimpl<H: Hasher> Graph<H> for BucketGraph<H> {\n    type Key = H::Domain;\n\n    fn create_key(\n        &self,\n        id: &H::Domain,\n        node: usize,\n        parents: &[u32],\n        base_parents_data: &[u8],\n        _exp_parents_data: Option<&[u8]>,\n    ) -> Result<Self::Key> {\n        let mut hasher = Sha256::new();\n        hasher.input(AsRef::<[u8]>::as_ref(id));\n\n        // The hash is about the parents, hence skip if a node doesn't have any parents\n        if node != parents[0] as usize {\n            for parent in parents.iter() {\n                let offset = data_at_node_offset(*parent as usize);\n                hasher.input(&base_parents_data[offset..offset + NODE_SIZE]);\n            }\n        }\n\n        let hash = hasher.result();\n        Ok(bytes_into_fr_repr_safe(hash.as_ref()).into())\n    }\n\n    #[inline]\n    fn parents(&self, node: usize, parents: &mut [u32]) -> Result<()> {\n        let m = self.degree();\n\n        match node {\n            // There are special cases for the first and second node: the first node self\n            // references, the second node only references the first node.\n            0 | 1 => {\n                // Use the degree of the current graph (`m`) as `parents.len()` might be bigger than\n                // that (that's the case for Stacked Graph).\n                for parent in parents.iter_mut().take(m) {\n                    *parent = 0;\n                }\n                Ok(())\n            }\n            _ => {\n                // DRG node indexes are guaranteed to fit within a `u32`.\n                let node = node as u32;\n\n                let mut seed = [0u8; 32];\n                seed[..28].copy_from_slice(&self.seed);\n                seed[28..].copy_from_slice(&node.to_le_bytes());\n                let mut rng = ChaCha8Rng::from_seed(seed);\n\n                let m_prime = m - 1;\n                // Large sector sizes require that metagraph node indexes are `u64`.\n                let metagraph_node = node as u64 * m_prime as u64;\n                let n_buckets = (metagraph_node as f64).log2().ceil() as u64;\n\n                for parent in parents.iter_mut().take(m_prime) {\n                    let bucket_index = (rng.gen::<u64>() % n_buckets) + 1;\n                    let largest_distance_in_bucket = min(metagraph_node, 1 << bucket_index);\n                    let smallest_distance_in_bucket = max(2, largest_distance_in_bucket >> 1);\n\n                    // Add 1 becuase the number of distances in the bucket is inclusive.\n                    let n_distances_in_bucket =\n                        largest_distance_in_bucket - smallest_distance_in_bucket + 1;\n\n                    let distance =\n                        smallest_distance_in_bucket + (rng.gen::<u64>() % n_distances_in_bucket);\n\n                    let metagraph_parent = metagraph_node - distance;\n\n                    // Any metagraph node mapped onto the DRG can be safely cast back to `u32`.\n                    let mapped_parent = (metagraph_parent / m_prime as u64) as u32;\n\n                    *parent = if mapped_parent == node {\n                        node - 1\n                    } else {\n                        mapped_parent\n                    };\n                }\n\n                parents[m_prime] = node - 1;\n                Ok(())\n            }\n        }\n    }\n\n    #[inline]\n    fn size(&self) -> usize {\n        self.nodes\n    }\n\n    /// Returns the degree of the graph.\n    #[inline]\n    fn degree(&self) -> usize {\n        self.base_degree\n    }\n\n    fn seed(&self) -> [u8; 28] {\n        self.seed\n    }\n\n    fn new(\n        nodes: usize,\n        base_degree: usize,\n        expansion_degree: usize,\n        seed: [u8; 28],\n    ) -> Result<Self> {\n        ensure!(expansion_degree == 0, \"Expension degree must be zero.\");\n\n        // The number of metagraph nodes must be less than `2u64^54` as to not incur rounding errors\n        // when casting metagraph node indexes from `u64` to `f64` during parent generation.\n        let m_prime = base_degree - 1;\n        let n_metagraph_nodes = nodes as u64 * m_prime as u64;\n        ensure!(\n            n_metagraph_nodes <= 1u64 << 54,\n            \"The number of metagraph nodes must be precisely castable to `f64`\"\n        );\n\n        Ok(BucketGraph {\n            nodes,\n            base_degree,\n            seed,\n            _h: PhantomData,\n        })\n    }\n}\n\npub fn new_seed() -> [u8; 28] {\n    OsRng.gen()\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use memmap::MmapMut;\n    use memmap::MmapOptions;\n    use merkletree::store::StoreConfig;\n\n    use crate::drgraph::new_seed;\n    use crate::hasher::{\n        Blake2sHasher, PedersenHasher, PoseidonArity, PoseidonHasher, Sha256Hasher,\n    };\n    use crate::merkle::{\n        create_base_merkle_tree, DiskStore, MerkleProofTrait, MerkleTreeTrait, MerkleTreeWrapper,\n    };\n\n    // Create and return an object of MmapMut backed by in-memory copy of data.\n    pub fn mmap_from(data: &[u8]) -> MmapMut {\n        let mut mm = MmapOptions::new()\n            .len(data.len())\n            .map_anon()\n            .expect(\"Failed to create memory map\");\n        mm.copy_from_slice(data);\n        mm\n    }\n\n    fn graph_bucket<H: Hasher>() {\n        let degree = BASE_DEGREE;\n\n        for size in vec![4, 16, 256, 2048] {\n            let g = BucketGraph::<H>::new(size, degree, 0, new_seed()).unwrap();\n\n            assert_eq!(g.size(), size, \"wrong nodes count\");\n\n            let mut parents = vec![0; degree];\n            g.parents(0, &mut parents).unwrap();\n            assert_eq!(parents, vec![0; degree as usize]);\n            parents = vec![0; degree];\n            g.parents(1, &mut parents).unwrap();\n            assert_eq!(parents, vec![0; degree as usize]);\n\n            for i in 2..size {\n                let mut pa1 = vec![0; degree];\n                g.parents(i, &mut pa1).unwrap();\n                let mut pa2 = vec![0; degree];\n                g.parents(i, &mut pa2).unwrap();\n\n                assert_eq!(pa1.len(), degree);\n                assert_eq!(pa1, pa2, \"different parents on the same node\");\n\n                let mut p1 = vec![0; degree];\n                g.parents(i, &mut p1).unwrap();\n                let mut p2 = vec![0; degree];\n                g.parents(i, &mut p2).unwrap();\n\n                for parent in p1 {\n                    // TODO: fix me\n                    assert_ne!(i, parent as usize, \"self reference found\");\n                }\n            }\n        }\n    }\n\n    #[test]\n    fn graph_bucket_sha256() {\n        graph_bucket::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn graph_bucket_blake2s() {\n        graph_bucket::<Blake2sHasher>();\n    }\n\n    #[test]\n    fn graph_bucket_pedersen() {\n        graph_bucket::<PedersenHasher>();\n    }\n\n    fn gen_proof<H: 'static + Hasher, U: 'static + PoseidonArity>(config: Option<StoreConfig>) {\n        let leafs = 64;\n        let g = BucketGraph::<H>::new(leafs, BASE_DEGREE, 0, new_seed()).unwrap();\n        let data = vec![2u8; NODE_SIZE * leafs];\n\n        let mmapped = &mmap_from(&data);\n        let tree = create_base_merkle_tree::<\n            MerkleTreeWrapper<H, DiskStore<H::Domain>, U, typenum::U0, typenum::U0>,\n        >(config, g.size(), mmapped)\n        .unwrap();\n        let proof = tree.gen_proof(2).unwrap();\n\n        assert!(proof.verify());\n    }\n\n    #[test]\n    fn gen_proof_pedersen_binary() {\n        gen_proof::<PedersenHasher, typenum::U2>(None);\n    }\n\n    #[test]\n    fn gen_proof_poseidon_binary() {\n        gen_proof::<PoseidonHasher, typenum::U2>(None);\n    }\n\n    #[test]\n    fn gen_proof_sha256_binary() {\n        gen_proof::<Sha256Hasher, typenum::U2>(None);\n    }\n\n    #[test]\n    fn gen_proof_blake2s_binary() {\n        gen_proof::<Blake2sHasher, typenum::U2>(None);\n    }\n\n    #[test]\n    fn gen_proof_pedersen_quad() {\n        gen_proof::<PedersenHasher, typenum::U4>(None);\n    }\n\n    #[test]\n    fn gen_proof_poseidon_quad() {\n        gen_proof::<PoseidonHasher, typenum::U4>(None);\n    }\n\n    #[test]\n    fn gen_proof_sha256_quad() {\n        gen_proof::<Sha256Hasher, typenum::U4>(None);\n    }\n\n    #[test]\n    fn gen_proof_blake2s_quad() {\n        gen_proof::<Blake2sHasher, typenum::U4>(None);\n    }\n\n    #[test]\n    fn gen_proof_pedersen_oct() {\n        gen_proof::<PedersenHasher, typenum::U8>(None);\n    }\n\n    #[test]\n    fn gen_proof_poseidon_oct() {\n        gen_proof::<PoseidonHasher, typenum::U8>(None);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/error.rs",
    "content": "use std::any::Any;\n\nuse bellperson::SynthesisError;\n\npub use anyhow::Result;\n\n/// Custom error types\n#[derive(Debug, thiserror::Error)]\npub enum Error {\n    #[error(\"Bytes could not be converted to Fr\")]\n    BadFrBytes,\n    #[error(\"Could not create PieceInclusionProof (probably bad piece commitment: comm_p)\")]\n    BadPieceCommitment,\n    #[error(\"Out of bounds access {} > {}\", _0, _1)]\n    OutOfBounds(usize, usize),\n    #[error(\"mismatch of data, node_size and nodes {} != {} * {}\", _0, _1, _2)]\n    InvalidMerkleTreeArgs(usize, usize, usize),\n    #[error(\"{}\", _0)]\n    Synthesis(#[from] SynthesisError),\n    #[error(\"{}\", _0)]\n    Io(#[from] ::std::io::Error),\n    #[error(\"tree root and commitment do not match\")]\n    InvalidCommitment,\n    #[error(\"malformed input\")]\n    MalformedInput,\n    #[error(\"malformed merkle tree\")]\n    MalformedMerkleTree,\n    #[error(\"invalid input size\")]\n    InvalidInputSize,\n    #[error(\"merkle tree generation error: {}\", _0)]\n    MerkleTreeGenerationError(String),\n    #[error(\"Cannot (yet) generate inclusion proof for unaligned piece.\")]\n    UnalignedPiece,\n    #[error(\"{}\", _0)]\n    Serde(#[from] serde_json::error::Error),\n    #[error(\"unclassified error: {}\", _0)]\n    Unclassified(String),\n    #[error(\"Missing Private Input {0} for sector {1}\")]\n    MissingPrivateInput(&'static str, u64),\n}\n\nimpl From<Box<dyn Any + Send>> for Error {\n    fn from(inner: Box<dyn Any + Send>) -> Error {\n        Error::Unclassified(format!(\"{:?}\", dbg!(inner)))\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/fr32.rs",
    "content": "use crate::error::*;\n\nuse anyhow::{ensure, Context};\nuse byteorder::{ByteOrder, LittleEndian, WriteBytesExt};\nuse ff::{PrimeField, PrimeFieldRepr};\nuse paired::bls12_381::{Fr, FrRepr};\n\n// Contains 32 bytes whose little-endian value represents an Fr.\n// Invariants:\n// - Value MUST represent a valid Fr.\n// - Length must be 32.\npub type Fr32 = [u8];\n\n// Contains one or more 32-byte chunks whose little-endian values represent Frs.\n// Invariants:\n// - Value of each 32-byte chunks MUST represent valid Frs.\n// - Total length must be a multiple of 32.\n// That is to say: each 32-byte chunk taken alone must be a valid Fr32.\npub type Fr32Vec = Vec<u8>;\n\n// Array whose little-endian value represents an Fr.\n// Invariants:\n// - Value MUST represent a valid Fr.\npub type Fr32Ary = [u8; 32];\n\n// Takes a slice of bytes and returns an Fr if byte slice is exactly 32 bytes and does not overflow.\n// Otherwise, returns a BadFrBytesError.\npub fn bytes_into_fr(bytes: &[u8]) -> Result<Fr> {\n    ensure!(bytes.len() == 32, Error::BadFrBytes);\n\n    let mut fr_repr = <<Fr as PrimeField>::Repr as Default>::default();\n    fr_repr.read_le(bytes).context(Error::BadFrBytes)?;\n\n    Fr::from_repr(fr_repr).map_err(|_| Error::BadFrBytes.into())\n}\n\n#[inline]\npub fn trim_bytes_to_fr_safe(r: &[u8]) -> Result<Vec<u8>> {\n    ensure!(r.len() == 32, Error::BadFrBytes);\n    let mut res = r[..32].to_vec();\n    // strip last two bits, to ensure result is in Fr.\n    res[31] &= 0b0011_1111;\n    Ok(res)\n}\n\n#[inline]\npub fn bytes_into_fr_repr_safe(r: &[u8]) -> FrRepr {\n    debug_assert!(r.len() == 32);\n\n    let repr: [u64; 4] = [\n        LittleEndian::read_u64(&r[0..8]),\n        LittleEndian::read_u64(&r[8..16]),\n        LittleEndian::read_u64(&r[16..24]),\n        u64::from(r[31] & 0b0011_1111) << 56\n            | u64::from(r[30]) << 48\n            | u64::from(r[29]) << 40\n            | u64::from(r[28]) << 32\n            | u64::from(r[27]) << 24\n            | u64::from(r[26]) << 16\n            | u64::from(r[25]) << 8\n            | u64::from(r[24]),\n    ];\n\n    FrRepr(repr)\n}\n\n// Takes an Fr and returns a vector of exactly 32 bytes guaranteed to contain a valid Fr.\npub fn fr_into_bytes(fr: &Fr) -> Fr32Vec {\n    let mut out = Vec::with_capacity(32);\n    fr.into_repr().write_le(&mut out).unwrap();\n    out\n}\n\n// Takes a slice of bytes and returns a vector of Fr -- or an error if either bytes is not a multiple of 32 bytes\n// or any 32-byte chunk overflows and does not contain a valid Fr.\npub fn bytes_into_frs(bytes: &[u8]) -> Result<Vec<Fr>> {\n    bytes\n        .chunks(32)\n        .map(|ref chunk| bytes_into_fr(chunk))\n        .collect()\n}\n\n// Takes a slice of Frs and returns a vector of bytes, guaranteed to have a size which is a multiple of 32,\n// with every 32-byte chunk representing a valid Fr.\npub fn frs_into_bytes(frs: &[Fr]) -> Fr32Vec {\n    frs.iter().flat_map(|fr| fr_into_bytes(fr)).collect()\n}\n\n// Takes a u32 and returns an Fr.\npub fn u32_into_fr(n: u32) -> Fr {\n    let mut buf: Fr32Vec = vec![0u8; 32];\n    let mut w = &mut buf[0..4];\n    w.write_u32::<LittleEndian>(n).unwrap();\n\n    bytes_into_fr(&buf).expect(\"should never fail since u32 is in the field\")\n}\n\n// Takes a u64 and returns an Fr.\npub fn u64_into_fr(n: u64) -> Fr {\n    let mut buf: Fr32Vec = vec![0u8; 32];\n    let mut w = &mut buf[0..8];\n    w.write_u64::<LittleEndian>(n).unwrap();\n\n    bytes_into_fr(&buf).expect(\"should never fail since u64 is in the field\")\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    fn bytes_fr_test(bytes: Fr32Ary, expect_success: bool) {\n        let mut b = &bytes[..];\n        let fr_result = bytes_into_fr(&mut b);\n        if expect_success {\n            let f = fr_result.expect(\"Failed to convert bytes to `Fr`\");\n            let b2 = fr_into_bytes(&f);\n\n            assert_eq!(bytes.to_vec(), b2);\n        } else {\n            assert!(fr_result.is_err(), \"expected a decoding error\")\n        }\n    }\n    #[test]\n    fn test_bytes_into_fr_into_bytes() {\n        bytes_fr_test(\n            [\n                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,\n                23, 24, 25, 26, 27, 28, 29, 30, 31,\n            ],\n            true,\n        );\n        bytes_fr_test(\n            // Some bytes fail because they are not in the field.\n            [\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 115,\n            ],\n            false,\n        );\n        bytes_fr_test(\n            // This is okay.\n            [\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 114,\n            ],\n            true,\n        );\n        bytes_fr_test(\n            // So is this.\n            [\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 236, 115,\n            ],\n            true,\n        );\n        bytes_fr_test(\n            // But not this.\n            [\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 237, 115,\n            ],\n            false,\n        );\n    }\n\n    fn bytes_into_frs_into_bytes_test(bytes: &Fr32) {\n        let mut bytes = bytes.clone();\n        let frs = bytes_into_frs(&mut bytes).expect(\"Failed to convert bytes into a `Vec<Fr>`\");\n        assert!(frs.len() == 3);\n        let bytes_back = frs_into_bytes(&frs);\n        assert!(bytes.to_vec() == bytes_back);\n    }\n\n    #[test]\n    fn test_bytes_into_frs_into_bytes() {\n        let bytes = b\"012345678901234567890123456789--012345678901234567890123456789--012345678901234567890123456789--\";\n        bytes_into_frs_into_bytes_test(&bytes[..]);\n\n        let _short_bytes = b\"012345678901234567890123456789--01234567890123456789\";\n        // This will panic because _short_bytes is not a multiple of 32 bytes.\n        // bytes_into_frs_into_bytes_test(&_short_bytes[..]);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/gadgets/bench/mod.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellperson::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable};\nuse paired::Engine;\n\n#[derive(Debug)]\npub struct BenchCS<E: Engine> {\n    inputs: usize,\n    aux: usize,\n    a: usize,\n    b: usize,\n    c: usize,\n    _e: PhantomData<E>,\n}\n\nimpl<E: Engine> BenchCS<E> {\n    pub fn new() -> Self {\n        BenchCS::default()\n    }\n\n    pub fn num_constraints(&self) -> usize {\n        self.a\n    }\n\n    pub fn num_inputs(&self) -> usize {\n        self.inputs\n    }\n}\n\nimpl<E: Engine> Default for BenchCS<E> {\n    fn default() -> Self {\n        BenchCS {\n            inputs: 1,\n            aux: 0,\n            a: 0,\n            b: 0,\n            c: 0,\n            _e: PhantomData,\n        }\n    }\n}\n\nimpl<E: Engine> ConstraintSystem<E> for BenchCS<E> {\n    type Root = Self;\n\n    fn alloc<F, A, AR>(&mut self, _: A, _f: F) -> Result<Variable, SynthesisError>\n    where\n        F: FnOnce() -> Result<E::Fr, SynthesisError>,\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n    {\n        // don't invoke f, we just count\n        self.aux += 1;\n\n        Ok(Variable::new_unchecked(Index::Aux(self.aux - 1)))\n    }\n\n    fn alloc_input<F, A, AR>(&mut self, _: A, _f: F) -> Result<Variable, SynthesisError>\n    where\n        F: FnOnce() -> Result<E::Fr, SynthesisError>,\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n    {\n        // don't invoke f, we just count\n        self.inputs += 1;\n\n        Ok(Variable::new_unchecked(Index::Input(self.inputs - 1)))\n    }\n\n    fn enforce<A, AR, LA, LB, LC>(&mut self, _: A, _a: LA, _b: LB, _c: LC)\n    where\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n        LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n        LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n        LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n    {\n        self.a += 1;\n        self.b += 1;\n        self.c += 1;\n    }\n\n    fn push_namespace<NR, N>(&mut self, _: N)\n    where\n        NR: Into<String>,\n        N: FnOnce() -> NR,\n    {\n    }\n\n    fn pop_namespace(&mut self) {}\n\n    fn get_root(&mut self) -> &mut Self::Root {\n        self\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/gadgets/constraint.rs",
    "content": "use bellperson::{gadgets::num, ConstraintSystem, SynthesisError};\nuse ff::Field;\nuse paired::Engine;\n\n/// Adds a constraint to CS, enforcing an equality relationship between the allocated numbers a and b.\n///\n/// a == b\npub fn equal<E: Engine, A, AR, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    annotation: A,\n    a: &num::AllocatedNum<E>,\n    b: &num::AllocatedNum<E>,\n) where\n    A: FnOnce() -> AR,\n    AR: Into<String>,\n{\n    // a * 1 = b\n    cs.enforce(\n        annotation,\n        |lc| lc + a.get_variable(),\n        |lc| lc + CS::one(),\n        |lc| lc + b.get_variable(),\n    );\n}\n\n/// Adds a constraint to CS, enforcing a add relationship between the allocated numbers a, b, and sum.\n///\n/// a + b = sum\npub fn sum<E: Engine, A, AR, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    annotation: A,\n    a: &num::AllocatedNum<E>,\n    b: &num::AllocatedNum<E>,\n    sum: &num::AllocatedNum<E>,\n) where\n    A: FnOnce() -> AR,\n    AR: Into<String>,\n{\n    // (a + b) * 1 = sum\n    cs.enforce(\n        annotation,\n        |lc| lc + a.get_variable() + b.get_variable(),\n        |lc| lc + CS::one(),\n        |lc| lc + sum.get_variable(),\n    );\n}\n\npub fn add<E: Engine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    a: &num::AllocatedNum<E>,\n    b: &num::AllocatedNum<E>,\n) -> Result<num::AllocatedNum<E>, SynthesisError> {\n    let res = num::AllocatedNum::alloc(cs.namespace(|| \"add_num\"), || {\n        let mut tmp = a\n            .get_value()\n            .ok_or_else(|| SynthesisError::AssignmentMissing)?;\n        tmp.add_assign(\n            &b.get_value()\n                .ok_or_else(|| SynthesisError::AssignmentMissing)?,\n        );\n\n        Ok(tmp)\n    })?;\n\n    // a + b = res\n    sum(&mut cs, || \"sum constraint\", &a, &b, &res);\n\n    Ok(res)\n}\n\npub fn sub<E: Engine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    a: &num::AllocatedNum<E>,\n    b: &num::AllocatedNum<E>,\n) -> Result<num::AllocatedNum<E>, SynthesisError> {\n    let res = num::AllocatedNum::alloc(cs.namespace(|| \"sub_num\"), || {\n        let mut tmp = a\n            .get_value()\n            .ok_or_else(|| SynthesisError::AssignmentMissing)?;\n        tmp.sub_assign(\n            &b.get_value()\n                .ok_or_else(|| SynthesisError::AssignmentMissing)?,\n        );\n\n        Ok(tmp)\n    })?;\n\n    // a - b = res\n    difference(&mut cs, || \"subtraction constraint\", &a, &b, &res);\n\n    Ok(res)\n}\n\n/// Adds a constraint to CS, enforcing a difference relationship between the allocated numbers a, b, and difference.\n///\n/// a - b = difference\npub fn difference<E: Engine, A, AR, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    annotation: A,\n    a: &num::AllocatedNum<E>,\n    b: &num::AllocatedNum<E>,\n    difference: &num::AllocatedNum<E>,\n) where\n    A: FnOnce() -> AR,\n    AR: Into<String>,\n{\n    //    difference = a-b\n    // => difference + b = a\n    // => (difference + b) * 1 = a\n    cs.enforce(\n        annotation,\n        |lc| lc + difference.get_variable() + b.get_variable(),\n        |lc| lc + CS::one(),\n        |lc| lc + a.get_variable(),\n    );\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use crate::gadgets::TestConstraintSystem;\n    use paired::bls12_381::{Bls12, Fr};\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n\n    #[test]\n    fn add_constraint() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for _ in 0..100 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let a = num::AllocatedNum::alloc(cs.namespace(|| \"a\"), || Ok(Fr::random(rng))).unwrap();\n            let b = num::AllocatedNum::alloc(cs.namespace(|| \"b\"), || Ok(Fr::random(rng))).unwrap();\n\n            let res = add(cs.namespace(|| \"a+b\"), &a, &b).expect(\"add failed\");\n\n            let mut tmp = a.get_value().unwrap().clone();\n            tmp.add_assign(&b.get_value().unwrap());\n\n            assert_eq!(res.get_value().unwrap(), tmp);\n            assert!(cs.is_satisfied());\n        }\n    }\n\n    #[test]\n    fn sub_constraint() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for _ in 0..100 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let a = num::AllocatedNum::alloc(cs.namespace(|| \"a\"), || Ok(Fr::random(rng))).unwrap();\n            let b = num::AllocatedNum::alloc(cs.namespace(|| \"b\"), || Ok(Fr::random(rng))).unwrap();\n\n            let res = sub(cs.namespace(|| \"a-b\"), &a, &b).expect(\"subtraction failed\");\n\n            let mut tmp = a.get_value().unwrap().clone();\n            tmp.sub_assign(&b.get_value().unwrap());\n\n            assert_eq!(res.get_value().unwrap(), tmp);\n            assert!(cs.is_satisfied());\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/gadgets/encode.rs",
    "content": "use bellperson::gadgets::num;\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse paired::Engine;\n\nuse crate::gadgets::constraint;\n\npub fn encode<E, CS>(\n    mut cs: CS,\n    key: &num::AllocatedNum<E>,\n    value: &num::AllocatedNum<E>,\n) -> Result<num::AllocatedNum<E>, SynthesisError>\nwhere\n    E: Engine,\n    CS: ConstraintSystem<E>,\n{\n    constraint::add(cs.namespace(|| \"encode_add\"), key, value)\n}\n\npub fn decode<E, CS>(\n    mut cs: CS,\n    key: &num::AllocatedNum<E>,\n    value: &num::AllocatedNum<E>,\n) -> Result<num::AllocatedNum<E>, SynthesisError>\nwhere\n    E: Engine,\n    CS: ConstraintSystem<E>,\n{\n    constraint::sub(cs.namespace(|| \"decode_sub\"), value, key)\n}\n"
  },
  {
    "path": "storage-proofs/core/src/gadgets/insertion.rs",
    "content": "//! Insertion Permutation\n//!\n//! Insert an `AllocatedNum` into a sequence of `AllocatedNums` at an arbitrary position.\n//! This can be thought of as a generalization of `AllocatedNum::conditionally_reverse` and reduces to it in the binary case.\n\nuse bellperson::gadgets::boolean::{AllocatedBit, Boolean};\nuse bellperson::gadgets::num::AllocatedNum;\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse ff::Field;\nuse paired::Engine;\n\n/// Insert `element` after the nth 1-indexed element of `elements`, where `path_bits` represents n, least-significant bit first.\n/// The returned result contains a new vector of `AllocatedNum`s with `element` inserted, and constraints are enforced.\n/// `elements.len() + 1` must be a power of two.\npub fn insert<E: Engine, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    element: &AllocatedNum<E>,\n    bits: &[Boolean],\n    elements: &[AllocatedNum<E>],\n) -> Result<Vec<AllocatedNum<E>>, SynthesisError> {\n    let size = elements.len() + 1;\n    assert_eq!(1 << bits.len(), size);\n\n    // For the sizes we know we need, we can take advantage of redundancy in the candidate selection at each position.\n    // This allows us to accomplish insertion with fewer constraints, if we hand-optimize.\n    // We don't need a special case for size 2 because the general algorithm\n    // collapses to `conditionally_reverse` when size = 2.\n    //\n    // If no special cases have been hand-coded, use the general algorithm.\n    // This costs size * (size - 1) constraints.\n    //\n    // Future work: In theory, we could compile arbitrary lookup tables to minimize constraints and avoid\n    // the most general case except when actually required — which it never is for simple insertion.\n    if size == 2 {\n        return insert_2(cs, element, bits, elements);\n    } else if size == 4 {\n        return insert_4(cs, element, bits, elements);\n    } else if size == 8 {\n        return insert_8(cs, element, bits, elements);\n    };\n\n    // Running example choices, represent inserting x into [1, 2, 3].\n\n    // An indexed sequence of correct results, one of which (the indexed one) will be selected.\n    let mut potential_results = Vec::new();\n    for index in 0..size {\n        // These are the results when bits corresponds to index.\n        //\n        // index | result\n        //-------+-------\n        // 0     | x 1 2 3\n        // 1     | 1 x 2 3\n        // 2     | 1 2 x 3\n        // 3     | 1 2 3 x\n        let mut result = Vec::new();\n        (0..index).for_each(|i| result.push(elements[i].clone()));\n        result.push(element.clone());\n        (index..elements.len()).for_each(|i| result.push(elements[i].clone()));\n\n        potential_results.push(result);\n    }\n\n    let mut result = Vec::new();\n    for pos in 0..size {\n        // These are the choices needed such that for each position in the selected result,\n        // the value is column-for-pos[index].\n        //\n        // This table is constructed by reading columns from the index-result table above.\n        // Reading columns from this table yields the result table.\n\n        // pos   column\n        // 0     x 1 1 1\n        // 1     1 x 2 2\n        // 2     2 2 x 3\n        // 3     3 3 3 x\n        let choices = (0..size)\n            .map(|index| potential_results[index][pos].clone())\n            .collect::<Vec<_>>();\n\n        result.push(select(\n            cs.namespace(|| format!(\"choice at {}\", pos)),\n            &choices,\n            bits,\n        )?);\n    }\n\n    Ok(result)\n}\n\npub fn insert_2<E: Engine, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    element: &AllocatedNum<E>,\n    bits: &[Boolean],\n    elements: &[AllocatedNum<E>],\n) -> Result<Vec<AllocatedNum<E>>, SynthesisError> {\n    assert_eq!(elements.len() + 1, 2);\n    assert_eq!(bits.len(), 1);\n\n    Ok(vec![\n        pick(\n            cs.namespace(|| \"binary insert 0\"),\n            &bits[0],\n            &elements[0],\n            &element,\n        )?,\n        pick(\n            cs.namespace(|| \"binary insert 1\"),\n            &bits[0],\n            &element,\n            &elements[0],\n        )?,\n    ])\n}\n\npub fn insert_4<E: Engine, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    element: &AllocatedNum<E>,\n    bits: &[Boolean],\n    elements: &[AllocatedNum<E>],\n) -> Result<Vec<AllocatedNum<E>>, SynthesisError> {\n    assert_eq!(elements.len() + 1, 4);\n    assert_eq!(bits.len(), 2);\n\n    /*\n    To insert A into [b, c, d] at position n of bits, represented by booleans [b0, b1, b2].\n    n [b0, b1] pos 0 1 2 3\n    0 [0, 0]       A b c d\n    1 [1, 0]       b A c d\n    2 [0, 1]       b c A d\n    3 [1, 1]       b c d A\n\n    A = element\n    b = elements[0]\n    c = elements[1]\n    d = elements[2]\n     */\n    let (b0, b1) = (&bits[0], &bits[1]);\n    let (a, b, c, d) = (&element, &elements[0], &elements[1], &elements[2]);\n\n    /// Define witness macro to allow legible definition of positional constraints.\n    /// See example expansions in comment to first usages below.\n    macro_rules! witness {\n        ( $var:ident <== if $cond:ident { $a:expr } else { $b:expr }) => {\n            let $var = pick(cs.namespace(|| stringify!($var)), $cond, $a, $b)?;\n        };\n    }\n\n    // Witness naming convention:\n    // `p0_x0` means \"Output position 0 when b0 is unknown (x) and b1 is 0.\"\n\n    // Declaration:\n    witness!(p0_x0 <== if b0 { b } else { a });\n    witness!(p0 <== if b1 { b } else { &p0_x0 });\n    // Expansion:\n    // let p0_x0 = pick(cs.namespace(|| \"p0_x0\"), b0, b, a)?;\n    // let p0 = pick(cs.namespace(|| \"p0\"), b1, b, &p0_x0)?;\n\n    witness!(p1_x0 <== if b0 { a } else { b });\n    witness!(p1 <== if b1 { c } else { &p1_x0 });\n\n    witness!(p2_x1 <== if b0 { d } else { a });\n    witness!(p2 <== if b1 { &p2_x1 } else {c });\n\n    witness!(p3_x1 <== if b0 { a } else { d });\n    witness!(p3 <== if b1 { &p3_x1 } else { d });\n\n    Ok(vec![p0, p1, p2, p3])\n}\n\npub fn insert_8<E: Engine, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    element: &AllocatedNum<E>,\n    bits: &[Boolean],\n    elements: &[AllocatedNum<E>],\n) -> Result<Vec<AllocatedNum<E>>, SynthesisError> {\n    assert_eq!(elements.len() + 1, 8);\n    assert_eq!(bits.len(), 3);\n    /*\n    To insert A into [b, c, d, e, f, g, h] at position n of bits, represented by booleans [b0, b1, b2].\n    n [b0, b1, b2] pos 0 1 2 3 4 5 6 7\n    0 [0, 0, 0]        A b c d e f g h\n    1 [1, 0, 0]        b A c d e f g h\n    2 [0, 1, 0]        b c A d e f g h\n    3 [1, 1, 0]        b c d A e f g h\n    4 [0, 0, 1]        b c d e A f g h\n    5 [1, 0, 1]        b c d e f A g h\n    6 [0, 1, 1]        b c d e f g A h\n    7 [1, 1, 1]        b c d e f g h A\n\n\n    A = element\n    b = elements[0]\n    c = elements[1]\n    d = elements[2]\n    e = elements[3]\n    f = elements[4]\n    g = elements[5]\n    h = elements[6]\n     */\n\n    let (b0, b1, b2) = (&bits[0], &bits[1], &bits[2]);\n    let (a, b, c, d, e, f, g, h) = (\n        &element,\n        &elements[0],\n        &elements[1],\n        &elements[2],\n        &elements[3],\n        &elements[4],\n        &elements[5],\n        &elements[6],\n    );\n\n    // true if booleans b0 and b1 are both false: `(not b0) and (not b1)`\n    // (1 - b0) * (1 - b1) = 1\n    let b0_nor_b1 = match (b0, b1) {\n        (Boolean::Is(ref b0), Boolean::Is(ref b1)) => {\n            Boolean::Is(AllocatedBit::nor(cs.namespace(|| \"b0 nor b1\"), b0, b1)?)\n        }\n        _ => panic!(\"bits must be allocated and unnegated\"),\n    };\n\n    // true if booleans b0 and b1 are both true: `b0 and b1`\n    // b0 * b1 = 1\n    let b0_and_b1 = match (&bits[0], &bits[1]) {\n        (Boolean::Is(ref b0), Boolean::Is(ref b1)) => {\n            Boolean::Is(AllocatedBit::and(cs.namespace(|| \"b0 and b1\"), b0, b1)?)\n        }\n        _ => panic!(\"bits must be allocated and unnegated\"),\n    };\n\n    /// Define witness macro to allow legible definition of positional constraints.\n    /// See example expansions in comment to first usages below.\n    macro_rules! witness {\n        ( $var:ident <== if $cond:ident { $a:expr } else { $b:expr }) => {\n            let $var = pick(cs.namespace(|| stringify!($var)), $cond, $a, $b)?;\n        };\n\n        // Match condition terms which are explict syntactic references.\n        ( $var:ident <== if &$cond:ident { $a:expr } else { $b:expr }) => {\n            let $var = pick(cs.namespace(|| stringify!($var)), &$cond, $a, $b)?;\n        };\n    }\n\n    // Declaration:\n    witness!(p0_xx0 <== if &b0_nor_b1 { a } else { b });\n    witness!(p0 <== if b2 { b } else { &p0_xx0 });\n    // Expansion:\n    // let p0_xx0 = pick(cs.namespace(|| \"p0_xx0\"), &b0_nor_b1, a, b)?;\n    // let p0 = pick(cs.namespace(|| \"p0\"), b2, b, &p0_xx0)?;\n\n    witness!(p1_x00 <== if b0 { a } else { b });\n    witness!(p1_xx0 <== if b1 { c } else { &p1_x00 });\n    witness!(p1 <== if b2 { c } else { &p1_xx0 });\n\n    witness!(p2_x10 <== if b0 { d } else { a });\n    witness!(p2_xx0 <== if b1 { &p2_x10 } else { c });\n    witness!(p2 <== if b2 { d } else { &p2_xx0 });\n\n    witness!(p3_xx0 <== if &b0_and_b1 { a } else { d });\n    witness!(p3 <== if b2 { e } else { &p3_xx0 });\n\n    witness!(p4_xx1 <== if &b0_nor_b1 { a } else { f });\n    witness!(p4 <== if b2 { &p4_xx1 } else { e });\n\n    witness!(p5_x01 <== if b0 { a } else { f });\n    witness!(p5_xx1 <== if b1 { g } else { &p5_x01 });\n    witness!(p5 <== if b2 { &p5_xx1 } else { f });\n\n    witness!(p6_x11 <== if b0 { h } else { a });\n    witness!(p6_xx1 <== if b1 { &p6_x11 } else { g });\n    witness!(p6 <== if b2 { &p6_xx1 } else { g });\n\n    witness!(p7_xx1 <== if &b0_and_b1 { a } else { h });\n    witness!(p7 <== if b2 { &p7_xx1 } else { h });\n\n    Ok(vec![p0, p1, p2, p3, p4, p5, p6, p7])\n}\n\n/// Select the nth element of `from`, where `path_bits` represents n, least-significant bit first.\n/// The returned result contains the selected element, and constraints are enforced.\n/// `from.len()` must be a power of two.\npub fn select<E: Engine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    from: &[AllocatedNum<E>],\n    path_bits: &[Boolean],\n) -> Result<AllocatedNum<E>, SynthesisError> {\n    let pathlen = path_bits.len();\n    assert_eq!(1 << pathlen, from.len());\n\n    let mut state = Vec::new();\n    for elt in from {\n        state.push(elt.clone())\n    }\n    let mut half_size = from.len() / 2;\n\n    // We reverse the path bits because the contained algorithm consumes most significant bit first.\n    for (i, bit) in path_bits.iter().rev().enumerate() {\n        let mut new_state = Vec::new();\n        for j in 0..half_size {\n            new_state.push(pick(\n                cs.namespace(|| format!(\"pick {}, {}\", i, j)),\n                bit,\n                &state[half_size + j],\n                &state[j],\n            )?);\n        }\n        state = new_state;\n        half_size /= 2;\n    }\n\n    Ok(state.remove(0))\n}\n\n/// Takes two allocated numbers (`a`, `b`) and returns `a` if the condition is true, and `b` otherwise.\npub fn pick<E: Engine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    condition: &Boolean,\n    a: &AllocatedNum<E>,\n    b: &AllocatedNum<E>,\n) -> Result<AllocatedNum<E>, SynthesisError>\nwhere\n    CS: ConstraintSystem<E>,\n{\n    let c = AllocatedNum::alloc(cs.namespace(|| \"pick result\"), || {\n        if condition\n            .get_value()\n            .ok_or(SynthesisError::AssignmentMissing)?\n        {\n            Ok(a.get_value().ok_or(SynthesisError::AssignmentMissing)?)\n        } else {\n            Ok(b.get_value().ok_or(SynthesisError::AssignmentMissing)?)\n        }\n    })?;\n\n    // Constrain (b - a) * condition = (b - c), ensuring c = a iff\n    // condition is true, otherwise c = b.\n    cs.enforce(\n        || \"pick\",\n        |lc| lc + b.get_variable() - a.get_variable(),\n        |_| condition.lc(CS::one(), E::Fr::one()),\n        |lc| lc + b.get_variable() - c.get_variable(),\n    );\n\n    Ok(c)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use crate::gadgets::TestConstraintSystem;\n    use bellperson::gadgets::boolean::AllocatedBit;\n    use ff::Field;\n    use paired::bls12_381::{Bls12, Fr};\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n\n    #[test]\n    fn test_select() {\n        for log_size in 1..5 {\n            let size = 1 << log_size;\n            for index in 0..size {\n                // Initialize rng in loop to simplify debugging with consistent elements.\n                let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n                let mut cs = TestConstraintSystem::new();\n\n                let elements: Vec<_> = (0..size)\n                    .map(|i| {\n                        AllocatedNum::<Bls12>::alloc(\n                            &mut cs.namespace(|| format!(\"element {}\", i)),\n                            || {\n                                let elt = <Fr as Field>::random(rng);\n                                Ok(elt)\n                            },\n                        )\n                        .unwrap()\n                    })\n                    .collect();\n\n                let path_bits = (0..log_size)\n                    .map(|i| {\n                        <Boolean as std::convert::From<AllocatedBit>>::from(\n                            AllocatedBit::alloc(cs.namespace(|| format!(\"index bit {}\", i)), {\n                                let bit = ((index >> i) & 1) == 1;\n                                Some(bit)\n                            })\n                            .unwrap(),\n                        )\n                    })\n                    .collect::<Vec<_>>();\n\n                let test_constraints = cs.num_constraints();\n                assert_eq!(log_size, test_constraints);\n\n                let selected = select(cs.namespace(|| \"select\"), &elements, &path_bits).unwrap();\n\n                assert!(cs.is_satisfied());\n                assert_eq!(elements[index].get_value(), selected.get_value());\n\n                // One constraint per non-leaf node of a binary tree with `size` leaves.\n                let expected_constraints = size - 1;\n\n                let actual_constraints = cs.num_constraints() - test_constraints;\n                assert_eq!(expected_constraints, actual_constraints);\n            }\n        }\n    }\n\n    #[test]\n    fn test_insert() {\n        for log_size in 1..=4 {\n            let size = 1 << log_size;\n            for index in 0..size {\n                // Initialize rng in loop to simplify debugging with consistent elements.\n                let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n                let mut cs = TestConstraintSystem::new();\n\n                let elements: Vec<_> = (0..size - 1)\n                    .map(|i| {\n                        AllocatedNum::<Bls12>::alloc(\n                            &mut cs.namespace(|| format!(\"element {}\", i)),\n                            || {\n                                let elt = <Fr as Field>::random(rng);\n                                Ok(elt)\n                            },\n                        )\n                        .unwrap()\n                    })\n                    .collect();\n\n                let to_insert =\n                    AllocatedNum::<Bls12>::alloc(&mut cs.namespace(|| \"insert\"), || {\n                        let elt_to_insert = <Fr as Field>::random(rng);\n                        Ok(elt_to_insert)\n                    })\n                    .unwrap();\n\n                let index_bits = (0..log_size)\n                    .map(|i| {\n                        <Boolean as std::convert::From<AllocatedBit>>::from(\n                            AllocatedBit::alloc(cs.namespace(|| format!(\"index bit {}\", i)), {\n                                let bit = ((index >> i) & 1) == 1;\n                                Some(bit)\n                            })\n                            .unwrap(),\n                        )\n                    })\n                    .collect::<Vec<_>>();\n\n                let test_constraints = cs.num_constraints();\n                assert_eq!(log_size, test_constraints);\n\n                let mut inserted = insert(\n                    &mut cs,\n                    &to_insert.clone(),\n                    index_bits.as_slice(),\n                    &elements.as_slice(),\n                )\n                .unwrap();\n\n                assert!(cs.is_satisfied());\n\n                let extracted = inserted.remove(index);\n                assert_eq!(to_insert.get_value(), extracted.get_value(),);\n\n                for i in 0..size - 1 {\n                    let a = elements[i].get_value();\n                    let b = inserted[i].get_value();\n                    assert_eq!(a, b)\n                }\n\n                // One selection for each element of the result.\n                let expected_constraints = match size {\n                    8 => 22, // unoptimized, would be 56\n                    4 => 8,  // unoptimized, would be 12\n                    _ => size * (size - 1),\n                };\n\n                let actual_constraints = cs.num_constraints() - test_constraints;\n                assert_eq!(expected_constraints, actual_constraints);\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/gadgets/metric/mod.rs",
    "content": "use bellperson::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable};\nuse paired::Engine;\nuse std::cmp::Ordering;\nuse std::collections::HashMap;\n\n#[derive(Clone, Copy)]\nstruct OrderedVariable(Variable);\n\n#[derive(Debug)]\nenum NamedObject {\n    Constraint(usize),\n    Var(Variable),\n    Namespace,\n}\n\nimpl Eq for OrderedVariable {}\nimpl PartialEq for OrderedVariable {\n    fn eq(&self, other: &OrderedVariable) -> bool {\n        match (self.0.get_unchecked(), other.0.get_unchecked()) {\n            (Index::Input(ref a), Index::Input(ref b)) => a == b,\n            (Index::Aux(ref a), Index::Aux(ref b)) => a == b,\n            _ => false,\n        }\n    }\n}\nimpl PartialOrd for OrderedVariable {\n    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {\n        Some(self.cmp(other))\n    }\n}\nimpl Ord for OrderedVariable {\n    fn cmp(&self, other: &Self) -> Ordering {\n        match (self.0.get_unchecked(), other.0.get_unchecked()) {\n            (Index::Input(ref a), Index::Input(ref b)) => a.cmp(b),\n            (Index::Aux(ref a), Index::Aux(ref b)) => a.cmp(b),\n            (Index::Input(_), Index::Aux(_)) => Ordering::Less,\n            (Index::Aux(_), Index::Input(_)) => Ordering::Greater,\n        }\n    }\n}\n\npub struct MetricCS<E: Engine> {\n    named_objects: HashMap<String, NamedObject>,\n    current_namespace: Vec<String>,\n    #[allow(clippy::type_complexity)]\n    constraints: Vec<(\n        LinearCombination<E>,\n        LinearCombination<E>,\n        LinearCombination<E>,\n        String,\n    )>,\n    inputs: Vec<String>,\n    aux: Vec<String>,\n}\n\nimpl<E: Engine> MetricCS<E> {\n    pub fn new() -> Self {\n        MetricCS::default()\n    }\n\n    pub fn num_constraints(&self) -> usize {\n        self.constraints.len()\n    }\n\n    pub fn num_inputs(&self) -> usize {\n        self.inputs.len()\n    }\n\n    pub fn pretty_print_list(&self) -> Vec<String> {\n        let mut result = Vec::new();\n\n        for input in &self.inputs {\n            result.push(format!(\"INPUT {}\", input));\n        }\n        for aux in &self.aux {\n            result.push(format!(\"AUX {}\", aux));\n        }\n\n        for &(ref _a, ref _b, ref _c, ref name) in &self.constraints {\n            result.push(name.to_string());\n        }\n\n        result\n    }\n\n    pub fn pretty_print(&self) -> String {\n        let res = self.pretty_print_list();\n\n        res.join(\"\\n\")\n    }\n\n    fn set_named_obj(&mut self, path: String, to: NamedObject) {\n        if self.named_objects.contains_key(&path) {\n            panic!(\"tried to create object at existing path: {}\", path);\n        }\n\n        self.named_objects.insert(path, to);\n    }\n}\n\nimpl<E: Engine> Default for MetricCS<E> {\n    fn default() -> Self {\n        let mut map = HashMap::new();\n        map.insert(\"ONE\".into(), NamedObject::Var(MetricCS::<E>::one()));\n        MetricCS {\n            named_objects: map,\n            current_namespace: vec![],\n            constraints: vec![],\n            inputs: vec![String::from(\"ONE\")],\n            aux: vec![],\n        }\n    }\n}\n\nimpl<E: Engine> ConstraintSystem<E> for MetricCS<E> {\n    type Root = Self;\n\n    fn alloc<F, A, AR>(&mut self, annotation: A, _f: F) -> Result<Variable, SynthesisError>\n    where\n        F: FnOnce() -> Result<E::Fr, SynthesisError>,\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n    {\n        let path = compute_path(&self.current_namespace, &annotation().into());\n        self.aux.push(path);\n\n        Ok(Variable::new_unchecked(Index::Aux(self.aux.len() - 1)))\n    }\n\n    fn alloc_input<F, A, AR>(&mut self, annotation: A, _f: F) -> Result<Variable, SynthesisError>\n    where\n        F: FnOnce() -> Result<E::Fr, SynthesisError>,\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n    {\n        let path = compute_path(&self.current_namespace, &annotation().into());\n        self.inputs.push(path);\n\n        Ok(Variable::new_unchecked(Index::Input(self.inputs.len() - 1)))\n    }\n\n    fn enforce<A, AR, LA, LB, LC>(&mut self, annotation: A, a: LA, b: LB, c: LC)\n    where\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n        LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n        LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n        LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n    {\n        let path = compute_path(&self.current_namespace, &annotation().into());\n        let index = self.constraints.len();\n        self.set_named_obj(path.clone(), NamedObject::Constraint(index));\n\n        let a = a(LinearCombination::zero());\n        let b = b(LinearCombination::zero());\n        let c = c(LinearCombination::zero());\n\n        self.constraints.push((a, b, c, path));\n    }\n\n    fn push_namespace<NR, N>(&mut self, name_fn: N)\n    where\n        NR: Into<String>,\n        N: FnOnce() -> NR,\n    {\n        let name = name_fn().into();\n        let path = compute_path(&self.current_namespace, &name);\n        self.set_named_obj(path, NamedObject::Namespace);\n        self.current_namespace.push(name);\n    }\n\n    fn pop_namespace(&mut self) {\n        assert!(self.current_namespace.pop().is_some());\n    }\n\n    fn get_root(&mut self) -> &mut Self::Root {\n        self\n    }\n}\n\nfn compute_path(ns: &[String], this: &str) -> String {\n    if this.chars().any(|a| a == '/') {\n        panic!(\"'/' is not allowed in names\");\n    }\n\n    let mut name = String::new();\n\n    let mut needs_separation = false;\n    for ns in ns.iter().chain(Some(this.to_string()).iter()) {\n        if needs_separation {\n            name += \"/\";\n        }\n\n        name += ns;\n        needs_separation = true;\n    }\n\n    name\n}\n"
  },
  {
    "path": "storage-proofs/core/src/gadgets/mod.rs",
    "content": "mod bench;\nmod metric;\nmod test;\n\npub mod constraint;\npub mod encode;\npub mod insertion;\npub mod multipack;\npub mod pedersen;\npub mod por;\npub mod uint64;\npub mod variables;\npub mod xor;\n\npub use self::bench::*;\npub use self::metric::*;\npub use self::test::*;\n"
  },
  {
    "path": "storage-proofs/core/src/gadgets/multipack.rs",
    "content": "use bellperson::gadgets::{\n    boolean::Boolean,\n    num::{AllocatedNum, Num},\n};\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse ff::{Field, PrimeField, ScalarEngine};\n\n/// Takes a sequence of booleans and exposes them as a single compact Num.\npub fn pack_bits<E, CS>(mut cs: CS, bits: &[Boolean]) -> Result<AllocatedNum<E>, SynthesisError>\nwhere\n    E: ScalarEngine,\n    CS: ConstraintSystem<E>,\n{\n    let mut num = Num::<E>::zero();\n    let mut coeff = E::Fr::one();\n    for bit in bits.iter().take(E::Fr::CAPACITY as usize) {\n        num = num.add_bool_with_coeff(CS::one(), &bit, coeff);\n\n        coeff.double();\n    }\n\n    let alloc_num = AllocatedNum::alloc(cs.namespace(|| \"input\"), || {\n        num.get_value()\n            .ok_or_else(|| SynthesisError::AssignmentMissing)\n    })?;\n\n    // num * 1 = input\n    cs.enforce(\n        || \"packing constraint\",\n        |_| num.lc(E::Fr::one()),\n        |lc| lc + CS::one(),\n        |lc| lc + alloc_num.get_variable(),\n    );\n\n    Ok(alloc_num)\n}\n"
  },
  {
    "path": "storage-proofs/core/src/gadgets/pedersen.rs",
    "content": "use bellperson::gadgets::{boolean::Boolean, num};\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse fil_sapling_crypto::circuit::pedersen_hash;\nuse paired::bls12_381::Bls12;\n\nuse crate::crypto::pedersen::{JJ_PARAMS, PEDERSEN_BLOCK_SIZE};\n\n/// Pedersen hashing for inputs with length multiple of the block size. Based on a Merkle-Damgard construction.\npub fn pedersen_md_no_padding<CS>(\n    mut cs: CS,\n    data: &[Boolean],\n) -> Result<num::AllocatedNum<Bls12>, SynthesisError>\nwhere\n    CS: ConstraintSystem<Bls12>,\n{\n    assert!(\n        data.len() >= 2 * PEDERSEN_BLOCK_SIZE,\n        \"must be at least 2 block sizes long ({})\",\n        data.len()\n    );\n\n    assert_eq!(\n        data.len() % PEDERSEN_BLOCK_SIZE,\n        0,\n        \"data must be a multiple of the block size ({})\",\n        data.len()\n    );\n\n    let mut chunks = data.chunks(PEDERSEN_BLOCK_SIZE);\n    let mut cur: Vec<Boolean> = chunks.next().unwrap().to_vec();\n    let chunks_len = chunks.len();\n\n    for (i, block) in chunks.enumerate() {\n        let mut cs = cs.namespace(|| format!(\"block {}\", i));\n        for b in block {\n            // TODO: no cloning\n            cur.push(b.clone());\n        }\n        if i == chunks_len - 1 {\n            // last round, skip\n        } else {\n            cur = pedersen_compression(cs.namespace(|| \"hash\"), &cur)?;\n        }\n    }\n\n    // hash and return a num at the end\n    pedersen_compression_num(cs.namespace(|| \"last hash\"), &cur)\n}\n\npub fn pedersen_compression_num<CS: ConstraintSystem<Bls12>>(\n    mut cs: CS,\n    bits: &[Boolean],\n) -> Result<num::AllocatedNum<Bls12>, SynthesisError> {\n    Ok(pedersen_hash::pedersen_hash(\n        cs.namespace(|| \"inner hash\"),\n        pedersen_hash::Personalization::None,\n        &bits,\n        &*JJ_PARAMS,\n    )?\n    .get_x()\n    .clone())\n}\n\npub fn pedersen_compression<CS: ConstraintSystem<Bls12>>(\n    mut cs: CS,\n    bits: &[Boolean],\n) -> Result<Vec<Boolean>, SynthesisError> {\n    let h = pedersen_compression_num(cs.namespace(|| \"compression\"), bits)?;\n    let mut out = h.to_bits_le(cs.namespace(|| \"h into bits\"))?;\n\n    // needs padding, because x does not always translate to exactly 256 bits\n    while out.len() < PEDERSEN_BLOCK_SIZE {\n        out.push(Boolean::Constant(false));\n    }\n\n    Ok(out)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use crate::crypto;\n    use crate::gadgets::TestConstraintSystem;\n    use crate::util::bytes_into_boolean_vec;\n    use bellperson::gadgets::boolean::Boolean;\n    use bellperson::ConstraintSystem;\n    use paired::bls12_381::Bls12;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    #[test]\n    fn test_pedersen_single_input_circut() {\n        let mut rng = XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let cases = [(32, 689), (64, 1376)];\n\n        for (bytes, constraints) in &cases {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n            let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n            let data_bits: Vec<Boolean> = {\n                let mut cs = cs.namespace(|| \"data\");\n                bytes_into_boolean_vec(&mut cs, Some(data.as_slice()), data.len()).unwrap()\n            };\n            let out =\n                pedersen_compression_num(&mut cs, &data_bits).expect(\"pedersen hashing failed\");\n\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n            assert_eq!(\n                cs.num_constraints(),\n                *constraints,\n                \"constraint size changed for {} bytes\",\n                *bytes\n            );\n\n            let expected = crypto::pedersen::pedersen(data.as_slice());\n\n            assert_eq!(\n                expected,\n                out.get_value().unwrap(),\n                \"circuit and non circuit do not match\"\n            );\n        }\n    }\n\n    #[test]\n    fn test_pedersen_md_input_circut() {\n        let mut rng = XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let cases = [\n            (64, 1376),   // 64 bytes\n            (96, 2751),   // 96 bytes\n            (128, 4126),  // 128 bytes\n            (160, 5501),  // 160 bytes\n            (256, 9626),  // 160 bytes\n            (512, 20626), // 512 bytes\n        ];\n\n        for (bytes, constraints) in &cases {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n            let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n            let data_bits: Vec<Boolean> = {\n                let mut cs = cs.namespace(|| \"data\");\n                bytes_into_boolean_vec(&mut cs, Some(data.as_slice()), data.len()).unwrap()\n            };\n            let out = pedersen_md_no_padding(cs.namespace(|| \"pedersen\"), &data_bits)\n                .expect(\"pedersen hashing failed\");\n\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n            assert_eq!(\n                cs.num_constraints(),\n                *constraints,\n                \"constraint size changed {}\",\n                bytes\n            );\n\n            let expected = crypto::pedersen::pedersen_md_no_padding(data.as_slice());\n\n            assert_eq!(\n                expected,\n                out.get_value().unwrap(),\n                \"circuit and non circuit do not match {} bytes\",\n                bytes\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/gadgets/por.rs",
    "content": "use std::marker::PhantomData;\n\nuse anyhow::ensure;\nuse bellperson::gadgets::boolean::{AllocatedBit, Boolean};\nuse bellperson::gadgets::{multipack, num};\nuse bellperson::{Circuit, ConstraintSystem, SynthesisError};\nuse generic_array::typenum::Unsigned;\nuse paired::bls12_381::{Bls12, Fr};\n\nuse crate::compound_proof::{CircuitComponent, CompoundProof};\nuse crate::error::Result;\nuse crate::gadgets::constraint;\nuse crate::gadgets::insertion::insert;\nuse crate::gadgets::variables::Root;\nuse crate::hasher::{HashFunction, Hasher, PoseidonArity};\nuse crate::merkle::{base_path_length, MerkleProofTrait, MerkleTreeTrait};\nuse crate::parameter_cache::{CacheableParameters, ParameterSetMetadata};\nuse crate::por::PoR;\nuse crate::proof::ProofScheme;\n\n/// Proof of retrievability.\n///\n/// # Fields\n///\n/// * `params` - The params for the bls curve.\n/// * `value` - The value of the leaf.\n/// * `auth_path` - The authentication path of the leaf in the tree.\n/// * `root` - The merkle root of the tree.\n///\npub struct PoRCircuit<Tree: MerkleTreeTrait> {\n    value: Root<Bls12>,\n    auth_path: AuthPath<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n    root: Root<Bls12>,\n    private: bool,\n    _tree: PhantomData<Tree>,\n}\n\n#[derive(Debug, Clone)]\npub struct AuthPath<\n    H: Hasher,\n    U: 'static + PoseidonArity,\n    V: 'static + PoseidonArity,\n    W: 'static + PoseidonArity,\n> {\n    base: SubPath<H, U>,\n    sub: SubPath<H, V>,\n    top: SubPath<H, W>,\n}\n\nimpl<\n        H: Hasher,\n        U: 'static + PoseidonArity,\n        V: 'static + PoseidonArity,\n        W: 'static + PoseidonArity,\n    > From<Vec<(Vec<Option<Fr>>, Option<usize>)>> for AuthPath<H, U, V, W>\n{\n    fn from(mut base_opts: Vec<(Vec<Option<Fr>>, Option<usize>)>) -> Self {\n        let has_top = W::to_usize() > 0;\n        let has_sub = V::to_usize() > 0;\n        let len = base_opts.len();\n\n        let x = if has_top {\n            2\n        } else if has_sub {\n            1\n        } else {\n            0\n        };\n        let mut opts = base_opts.split_off(len - x);\n\n        let base = base_opts\n            .into_iter()\n            .map(|(hashes, index)| PathElement {\n                hashes,\n                index,\n                _a: Default::default(),\n                _h: Default::default(),\n            })\n            .collect();\n\n        let top = if has_top {\n            let (hashes, index) = opts.pop().unwrap();\n            vec![PathElement {\n                hashes,\n                index,\n                _a: Default::default(),\n                _h: Default::default(),\n            }]\n        } else {\n            Vec::new()\n        };\n\n        let sub = if has_sub {\n            let (hashes, index) = opts.pop().unwrap();\n            vec![PathElement {\n                hashes,\n                index,\n                _a: Default::default(),\n                _h: Default::default(),\n            }]\n        } else {\n            Vec::new()\n        };\n\n        assert!(opts.is_empty());\n\n        AuthPath {\n            base: SubPath { path: base },\n            sub: SubPath { path: sub },\n            top: SubPath { path: top },\n        }\n    }\n}\n\n#[derive(Debug, Clone)]\nstruct SubPath<H: Hasher, Arity: 'static + PoseidonArity> {\n    path: Vec<PathElement<H, Arity>>,\n}\n\n#[derive(Debug, Clone)]\nstruct PathElement<H: Hasher, Arity: 'static + PoseidonArity> {\n    hashes: Vec<Option<Fr>>,\n    index: Option<usize>,\n    _a: PhantomData<Arity>,\n    _h: PhantomData<H>,\n}\n\nimpl<H: Hasher, Arity: 'static + PoseidonArity> SubPath<H, Arity> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(\n        self,\n        mut cs: CS,\n        mut cur: num::AllocatedNum<Bls12>,\n    ) -> Result<(num::AllocatedNum<Bls12>, Vec<Boolean>), SynthesisError> {\n        let arity = Arity::to_usize();\n\n        if arity == 0 {\n            // Nothing to do here.\n            assert!(self.path.is_empty());\n            return Ok((cur, vec![]));\n        }\n\n        assert_eq!(1, arity.count_ones(), \"arity must be a power of two\");\n        let index_bit_count = arity.trailing_zeros() as usize;\n\n        let mut auth_path_bits = Vec::with_capacity(self.path.len());\n\n        for (i, path_element) in self.path.into_iter().enumerate() {\n            let path_hashes = path_element.hashes;\n            let optional_index = path_element.index; // Optional because of Bellman blank-circuit construction mechanics.\n\n            let cs = &mut cs.namespace(|| format!(\"merkle tree hash {}\", i));\n\n            let mut index_bits = Vec::with_capacity(index_bit_count);\n\n            for i in 0..index_bit_count {\n                let bit = AllocatedBit::alloc(cs.namespace(|| format!(\"index bit {}\", i)), {\n                    optional_index.map(|index| ((index >> i) & 1) == 1)\n                })?;\n\n                index_bits.push(Boolean::from(bit));\n            }\n\n            auth_path_bits.extend_from_slice(&index_bits);\n\n            // Witness the authentication path elements adjacent at this depth.\n            let path_hash_nums = path_hashes\n                .iter()\n                .enumerate()\n                .map(|(i, elt)| {\n                    num::AllocatedNum::alloc(cs.namespace(|| format!(\"path element {}\", i)), || {\n                        elt.ok_or_else(|| SynthesisError::AssignmentMissing)\n                    })\n                })\n                .collect::<Result<Vec<_>, _>>()?;\n\n            let inserted = insert(cs, &cur, &index_bits, &path_hash_nums)?;\n\n            // Compute the new subtree value\n            cur = H::Function::hash_multi_leaf_circuit::<Arity, _>(\n                cs.namespace(|| \"computation of commitment hash\"),\n                &inserted,\n                i,\n            )?;\n        }\n\n        Ok((cur, auth_path_bits))\n    }\n}\n\nimpl<H: Hasher, U: PoseidonArity, V: PoseidonArity, W: PoseidonArity> AuthPath<H, U, V, W> {\n    pub fn blank(leaves: usize) -> Self {\n        let has_sub = V::to_usize() > 0;\n        let has_top = W::to_usize() > 0;\n        let base_elements = base_path_length::<U, V, W>(leaves);\n\n        let base = vec![\n            PathElement::<H, U> {\n                hashes: vec![None; U::to_usize() - 1],\n                index: None,\n                _a: Default::default(),\n                _h: Default::default(),\n            };\n            base_elements\n        ];\n\n        let sub = if has_sub {\n            vec![PathElement::<H, V> {\n                hashes: vec![None; V::to_usize() - 1],\n                index: None,\n                _a: Default::default(),\n                _h: Default::default(),\n            }]\n        } else {\n            Vec::new()\n        };\n\n        let top = if has_top {\n            vec![PathElement::<H, W> {\n                hashes: vec![None; W::to_usize() - 1],\n                index: None,\n                _a: Default::default(),\n                _h: Default::default(),\n            }]\n        } else {\n            Vec::new()\n        };\n\n        AuthPath {\n            base: SubPath { path: base },\n            sub: SubPath { path: sub },\n            top: SubPath { path: top },\n        }\n    }\n}\n\nimpl<Tree: MerkleTreeTrait> CircuitComponent for PoRCircuit<Tree> {\n    type ComponentPrivateInputs = Option<Root<Bls12>>;\n}\n\npub struct PoRCompound<Tree: MerkleTreeTrait> {\n    _tree: PhantomData<Tree>,\n}\n\nfn to_bits(bit_count: u32, n: usize) -> Vec<bool> {\n    (0..bit_count).map(|i| (n >> i) & 1 == 1).collect()\n}\n\npub fn challenge_into_auth_path_bits(challenge: usize, leaves: usize) -> Vec<bool> {\n    assert_eq!(1, leaves.count_ones());\n\n    to_bits(leaves.trailing_zeros(), challenge)\n}\n\nimpl<C: Circuit<Bls12>, P: ParameterSetMetadata, Tree: MerkleTreeTrait> CacheableParameters<C, P>\n    for PoRCompound<Tree>\n{\n    fn cache_prefix() -> String {\n        format!(\"proof-of-retrievability-{}\", Tree::display())\n    }\n}\n\n// can only implment for Bls12 because por is not generic over the engine.\nimpl<'a, Tree: 'static + MerkleTreeTrait> CompoundProof<'a, PoR<Tree>, PoRCircuit<Tree>>\n    for PoRCompound<Tree>\n{\n    fn circuit<'b>(\n        public_inputs: &<PoR<Tree> as ProofScheme<'a>>::PublicInputs,\n        _component_private_inputs: <PoRCircuit<Tree> as CircuitComponent>::ComponentPrivateInputs,\n        proof: &'b <PoR<Tree> as ProofScheme<'a>>::Proof,\n        public_params: &'b <PoR<Tree> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Result<PoRCircuit<Tree>> {\n        let (root, private) = match (*public_inputs).commitment {\n            None => (Root::Val(Some(proof.proof.root().into())), true),\n            Some(commitment) => (Root::Val(Some(commitment.into())), false),\n        };\n\n        ensure!(\n            private == public_params.private,\n            \"Inputs must be consistent with public params\"\n        );\n\n        Ok(PoRCircuit::<Tree> {\n            value: Root::Val(Some(proof.data.into())),\n            auth_path: proof.proof.as_options().into(),\n            root,\n            private,\n            _tree: PhantomData,\n        })\n    }\n\n    fn blank_circuit(\n        public_params: &<PoR<Tree> as ProofScheme<'a>>::PublicParams,\n    ) -> PoRCircuit<Tree> {\n        PoRCircuit::<Tree> {\n            value: Root::Val(None),\n            auth_path: AuthPath::blank(public_params.leaves),\n            root: Root::Val(None),\n            private: public_params.private,\n            _tree: PhantomData,\n        }\n    }\n\n    fn generate_public_inputs(\n        pub_inputs: &<PoR<Tree> as ProofScheme<'a>>::PublicInputs,\n        pub_params: &<PoR<Tree> as ProofScheme<'a>>::PublicParams,\n        _k: Option<usize>,\n    ) -> Result<Vec<Fr>> {\n        let mut inputs = Vec::new();\n        let path_bits = challenge_into_auth_path_bits(pub_inputs.challenge, pub_params.leaves);\n\n        inputs.extend(multipack::compute_multipacking::<Bls12>(&path_bits));\n\n        if let Some(commitment) = pub_inputs.commitment {\n            ensure!(!pub_params.private, \"Params must be public\");\n            inputs.push(commitment.into());\n        } else {\n            ensure!(pub_params.private, \"Params must be private\");\n        }\n\n        Ok(inputs)\n    }\n}\n\nimpl<'a, Tree: MerkleTreeTrait> Circuit<Bls12> for PoRCircuit<Tree> {\n    /// # Public Inputs\n    ///\n    /// This circuit expects the following public inputs.\n    ///\n    /// * [0] - packed version of the `is_right` components of the auth_path.\n    /// * [1] - the merkle root of the tree.\n    ///\n    /// This circuit derives the following private inputs from its fields:\n    /// * value_num - packed version of `value` as bits. (might be more than one Fr)\n    ///\n    /// Note: All public inputs must be provided as `E::Fr`.\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let value = self.value;\n        let auth_path = self.auth_path;\n        let root = self.root;\n\n        let base_arity = Tree::Arity::to_usize();\n        let sub_arity = Tree::SubTreeArity::to_usize();\n        let top_arity = Tree::TopTreeArity::to_usize();\n\n        // All arities must be powers of two or circuits cannot be generated.\n        assert_eq!(\n            1,\n            base_arity.count_ones(),\n            \"base arity must be power of two\"\n        );\n        if sub_arity > 0 {\n            assert_eq!(\n                1,\n                sub_arity.count_ones(),\n                \"subtree arity must be power of two\"\n            );\n        }\n        if top_arity > 0 {\n            assert_eq!(\n                1,\n                top_arity.count_ones(),\n                \"top tree arity must be power of two\"\n            );\n        }\n\n        {\n            let value_num = value.allocated(cs.namespace(|| \"value\"))?;\n            let cur = value_num;\n\n            // Ascend the merkle tree authentication path\n\n            // base tree\n            let (cur, base_auth_path_bits) =\n                auth_path.base.synthesize(cs.namespace(|| \"base\"), cur)?;\n\n            // sub\n            let (cur, sub_auth_path_bits) =\n                auth_path.sub.synthesize(cs.namespace(|| \"sub\"), cur)?;\n\n            // top\n            let (computed_root, top_auth_path_bits) =\n                auth_path.top.synthesize(cs.namespace(|| \"top\"), cur)?;\n\n            let mut auth_path_bits = Vec::new();\n            auth_path_bits.extend(base_auth_path_bits);\n            auth_path_bits.extend(sub_auth_path_bits);\n            auth_path_bits.extend(top_auth_path_bits);\n\n            multipack::pack_into_inputs(cs.namespace(|| \"path\"), &auth_path_bits)?;\n            {\n                // Validate that the root of the merkle tree that we calculated is the same as the input.\n                let rt = root.allocated(cs.namespace(|| \"root_value\"))?;\n                constraint::equal(cs, || \"enforce root is correct\", &computed_root, &rt);\n\n                if !self.private {\n                    // Expose the root\n                    rt.inputize(cs.namespace(|| \"root\"))?;\n                }\n            }\n\n            Ok(())\n        }\n    }\n}\n\nimpl<'a, Tree: MerkleTreeTrait> PoRCircuit<Tree> {\n    #[allow(clippy::type_complexity)]\n    pub fn synthesize<CS>(\n        mut cs: CS,\n        value: Root<Bls12>,\n        auth_path: AuthPath<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n        root: Root<Bls12>,\n        private: bool,\n    ) -> Result<(), SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>,\n    {\n        let por = Self {\n            value,\n            auth_path,\n            root,\n            private,\n            _tree: PhantomData,\n        };\n\n        por.synthesize(&mut cs)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use bellperson::gadgets::multipack;\n    use ff::Field;\n    use generic_array::typenum;\n    use merkletree::store::VecStore;\n    use pretty_assertions::assert_eq;\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n\n    use crate::compound_proof;\n    use crate::fr32::{bytes_into_fr, fr_into_bytes};\n    use crate::gadgets::{MetricCS, TestConstraintSystem};\n    use crate::hasher::{\n        Blake2sHasher, Domain, Hasher, PedersenHasher, PoseidonHasher, Sha256Hasher,\n    };\n    use crate::merkle::{\n        create_base_merkle_tree, generate_tree, get_base_tree_count, MerkleProofTrait,\n        MerkleTreeWrapper, ResTree,\n    };\n    use crate::por;\n    use crate::proof::NoRequirements;\n    use crate::proof::ProofScheme;\n    use crate::util::data_at_node;\n\n    type TestTree<H, A> =\n        MerkleTreeWrapper<H, VecStore<<H as Hasher>::Domain>, A, typenum::U0, typenum::U0>;\n\n    type TestTree2<H, A, B> =\n        MerkleTreeWrapper<H, VecStore<<H as Hasher>::Domain>, A, B, typenum::U0>;\n\n    type TestTree3<H, A, B, C> = MerkleTreeWrapper<H, VecStore<<H as Hasher>::Domain>, A, B, C>;\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn por_test_compound_poseidon_base_8() {\n        por_compound::<TestTree<PoseidonHasher, typenum::U8>>();\n    }\n\n    fn por_compound<Tree: 'static + MerkleTreeTrait>() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 64 * get_base_tree_count::<Tree>();\n\n        let data: Vec<u8> = (0..leaves)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n        let tree = create_base_merkle_tree::<Tree>(None, leaves, data.as_slice()).unwrap();\n\n        let public_inputs = por::PublicInputs {\n            challenge: 2,\n            commitment: Some(tree.root()),\n        };\n\n        let setup_params = compound_proof::SetupParams {\n            vanilla_params: por::SetupParams {\n                leaves,\n                private: false,\n            },\n            partitions: None,\n            priority: false,\n        };\n        let public_params = PoRCompound::<Tree>::setup(&setup_params).expect(\"setup failed\");\n\n        let private_inputs = por::PrivateInputs::<Tree>::new(\n            bytes_into_fr(data_at_node(data.as_slice(), public_inputs.challenge).unwrap())\n                .expect(\"failed to create Fr from node data\")\n                .into(),\n            &tree,\n        );\n\n        let gparams = PoRCompound::<Tree>::groth_params(Some(rng), &public_params.vanilla_params)\n            .expect(\"failed to generate groth params\");\n\n        let proof =\n            PoRCompound::<Tree>::prove(&public_params, &public_inputs, &private_inputs, &gparams)\n                .expect(\"failed while proving\");\n\n        let verified =\n            PoRCompound::<Tree>::verify(&public_params, &public_inputs, &proof, &NoRequirements)\n                .expect(\"failed while verifying\");\n        assert!(verified);\n\n        let (circuit, inputs) =\n            PoRCompound::<Tree>::circuit_for_test(&public_params, &public_inputs, &private_inputs)\n                .unwrap();\n\n        let mut cs = TestConstraintSystem::new();\n\n        circuit.synthesize(&mut cs).expect(\"failed to synthesize\");\n        assert!(cs.is_satisfied());\n        assert!(cs.verify(&inputs));\n    }\n\n    #[test]\n    fn test_por_circuit_pedersen_base_2() {\n        test_por_circuit::<TestTree<PedersenHasher, typenum::U2>>(3, 8_247);\n    }\n\n    #[test]\n    fn test_por_circuit_blake2s_base_2() {\n        test_por_circuit::<TestTree<Blake2sHasher, typenum::U2>>(3, 129_135);\n    }\n\n    #[test]\n    fn test_por_circuit_sha256_base_2() {\n        test_por_circuit::<TestTree<Sha256Hasher, typenum::U2>>(3, 272_295);\n    }\n\n    #[test]\n    fn test_por_circuit_poseidon_base_2() {\n        test_por_circuit::<TestTree<PoseidonHasher, typenum::U2>>(3, 1_887);\n    }\n\n    #[test]\n    fn test_por_circuit_pedersen_base_4() {\n        test_por_circuit::<TestTree<PedersenHasher, typenum::U4>>(3, 12_399);\n    }\n\n    #[test]\n    fn test_por_circuit_pedersen_sub_8_2() {\n        test_por_circuit::<TestTree2<PedersenHasher, typenum::U8, typenum::U2>>(3, 20_663);\n    }\n\n    #[test]\n    fn test_por_circuit_pedersen_top_8_4_2() {\n        test_por_circuit::<TestTree3<PedersenHasher, typenum::U8, typenum::U4, typenum::U2>>(\n            3, 24_795,\n        );\n    }\n\n    #[test]\n    fn test_por_circuit_pedersen_top_8_2_4() {\n        // We can handle top-heavy trees with a non-zero subtree arity.\n        // These should never be produced, though.\n        test_por_circuit::<TestTree3<PedersenHasher, typenum::U8, typenum::U2, typenum::U4>>(\n            3, 24_795,\n        );\n    }\n\n    #[test]\n    fn test_por_circuit_blake2s_base_4() {\n        test_por_circuit::<TestTree<Blake2sHasher, typenum::U4>>(3, 130_296);\n    }\n\n    #[test]\n    fn test_por_circuit_sha256_base_4() {\n        test_por_circuit::<TestTree<Sha256Hasher, typenum::U4>>(3, 216_258);\n    }\n\n    #[test]\n    fn test_por_circuit_poseidon_base_4() {\n        test_por_circuit::<TestTree<PoseidonHasher, typenum::U4>>(3, 1_164);\n    }\n\n    #[test]\n    fn test_por_circuit_pedersen_base_8() {\n        test_por_circuit::<TestTree<PedersenHasher, typenum::U8>>(3, 19_289);\n    }\n\n    #[test]\n    fn test_por_circuit_blake2s_base_8() {\n        test_por_circuit::<TestTree<Blake2sHasher, typenum::U8>>(3, 174_503);\n    }\n\n    #[test]\n    fn test_por_circuit_sha256_base_8() {\n        test_por_circuit::<TestTree<Sha256Hasher, typenum::U8>>(3, 250_987);\n    }\n\n    #[test]\n    fn test_por_circuit_poseidon_base_8() {\n        test_por_circuit::<TestTree<PoseidonHasher, typenum::U8>>(3, 1_063);\n    }\n\n    #[test]\n    fn test_por_circuit_poseidon_sub_8_2() {\n        test_por_circuit::<TestTree2<PoseidonHasher, typenum::U8, typenum::U2>>(3, 1_377);\n    }\n\n    #[test]\n    fn test_por_circuit_poseidon_top_8_4_2() {\n        test_por_circuit::<TestTree3<PoseidonHasher, typenum::U8, typenum::U4, typenum::U2>>(\n            3, 1_764,\n        );\n    }\n\n    #[test]\n    fn test_por_circuit_poseidon_top_8_8() {\n        // This is the shape we want for 32GiB sectors.\n        test_por_circuit::<TestTree2<PoseidonHasher, typenum::U8, typenum::U8>>(3, 1_593);\n    }\n    #[test]\n    fn test_por_circuit_poseidon_top_8_8_2() {\n        // This is the shape we want for 64GiB secotrs.\n        test_por_circuit::<TestTree3<PoseidonHasher, typenum::U8, typenum::U8, typenum::U2>>(\n            3, 1_907,\n        );\n    }\n\n    #[test]\n    fn test_por_circuit_poseidon_top_8_2_4() {\n        // We can handle top-heavy trees with a non-zero subtree arity.\n        // These should never be produced, though.\n        test_por_circuit::<TestTree3<PoseidonHasher, typenum::U8, typenum::U2, typenum::U4>>(\n            3, 1_764,\n        );\n    }\n\n    fn test_por_circuit<Tree: 'static + MerkleTreeTrait>(\n        num_inputs: usize,\n        num_constraints: usize,\n    ) {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        // Ensure arity will evenly fill tree.\n        let leaves = 64 * get_base_tree_count::<Tree>();\n\n        // -- Basic Setup\n        let (data, tree) = generate_tree::<Tree, _>(rng, leaves, None);\n\n        for i in 0..leaves {\n            //println!(\"challenge: {}, ({})\", i, leaves);\n\n            // -- PoR\n            let pub_params = por::PublicParams {\n                leaves,\n                private: false,\n            };\n            let pub_inputs = por::PublicInputs::<<Tree::Hasher as Hasher>::Domain> {\n                challenge: i,\n                commitment: Some(tree.root()),\n            };\n            let leaf = data_at_node(data.as_slice(), pub_inputs.challenge).unwrap();\n            let leaf_element = <Tree::Hasher as Hasher>::Domain::try_from_bytes(leaf).unwrap();\n            let priv_inputs = por::PrivateInputs::<ResTree<Tree>>::new(leaf_element, &tree);\n            let p = tree.gen_proof(i).unwrap();\n            assert!(p.verify());\n\n            // create a non circuit proof\n            let proof = por::PoR::<ResTree<Tree>>::prove(&pub_params, &pub_inputs, &priv_inputs)\n                .expect(\"proving failed\");\n\n            // make sure it verifies\n            let is_valid = por::PoR::<ResTree<Tree>>::verify(&pub_params, &pub_inputs, &proof)\n                .expect(\"verification failed\");\n            assert!(is_valid, \"failed to verify por proof\");\n\n            // -- Circuit\n\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n            let por = PoRCircuit::<ResTree<Tree>> {\n                value: Root::Val(Some(proof.data.into())),\n                auth_path: proof.proof.as_options().into(),\n                root: Root::Val(Some(pub_inputs.commitment.unwrap().into())),\n                private: false,\n                _tree: PhantomData,\n            };\n\n            por.synthesize(&mut cs).expect(\"circuit synthesis failed\");\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n            assert_eq!(cs.num_inputs(), num_inputs, \"wrong number of inputs\");\n            assert_eq!(\n                cs.num_constraints(),\n                num_constraints,\n                \"wrong number of constraints\"\n            );\n\n            let generated_inputs = PoRCompound::<ResTree<Tree>>::generate_public_inputs(\n                &pub_inputs,\n                &pub_params,\n                None,\n            )\n            .unwrap();\n\n            let expected_inputs = cs.get_inputs();\n\n            for ((input, label), generated_input) in\n                expected_inputs.iter().skip(1).zip(generated_inputs.iter())\n            {\n                assert_eq!(input, generated_input, \"{}\", label);\n            }\n\n            assert_eq!(\n                generated_inputs.len(),\n                expected_inputs.len() - 1,\n                \"inputs are not the same length\"\n            );\n\n            assert!(cs.verify(&generated_inputs), \"failed to verify inputs\");\n        }\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_pedersen_base_2() {\n        private_por_test_compound::<TestTree<PedersenHasher, typenum::U2>>();\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_pedersen_base_4() {\n        private_por_test_compound::<TestTree<PedersenHasher, typenum::U4>>();\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_poseidon_base_2() {\n        private_por_test_compound::<TestTree<PoseidonHasher, typenum::U2>>();\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_poseidon_base_4() {\n        private_por_test_compound::<TestTree<PoseidonHasher, typenum::U4>>();\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_poseidon_sub_8_2() {\n        private_por_test_compound::<TestTree2<PoseidonHasher, typenum::U8, typenum::U2>>();\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_poseidon_top_8_4_2() {\n        private_por_test_compound::<TestTree3<PoseidonHasher, typenum::U8, typenum::U4, typenum::U2>>(\n        );\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_poseidon_top_8_8() {\n        private_por_test_compound::<TestTree2<PoseidonHasher, typenum::U8, typenum::U8>>();\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_poseidon_top_8_8_2() {\n        private_por_test_compound::<TestTree3<PoseidonHasher, typenum::U8, typenum::U8, typenum::U2>>(\n        );\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_poseidon_top_8_2_4() {\n        private_por_test_compound::<TestTree3<PoseidonHasher, typenum::U8, typenum::U2, typenum::U4>>(\n        );\n    }\n\n    fn private_por_test_compound<Tree: 'static + MerkleTreeTrait>() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        // Ensure arity will evenly fill tree.\n        let leaves = 64 * get_base_tree_count::<Tree>();\n\n        // -- Basic Setup\n        let (data, tree) = generate_tree::<Tree, _>(rng, leaves, None);\n\n        for i in 0..3 {\n            let public_inputs = por::PublicInputs {\n                challenge: i,\n                commitment: None,\n            };\n\n            let setup_params = compound_proof::SetupParams {\n                vanilla_params: por::SetupParams {\n                    leaves,\n                    private: true,\n                },\n                partitions: None,\n                priority: false,\n            };\n            let public_params =\n                PoRCompound::<ResTree<Tree>>::setup(&setup_params).expect(\"setup failed\");\n\n            let private_inputs = por::PrivateInputs::<ResTree<Tree>>::new(\n                bytes_into_fr(data_at_node(data.as_slice(), public_inputs.challenge).unwrap())\n                    .expect(\"failed to create Fr from node data\")\n                    .into(),\n                &tree,\n            );\n\n            {\n                let (circuit, inputs) =\n                    PoRCompound::circuit_for_test(&public_params, &public_inputs, &private_inputs)\n                        .unwrap();\n\n                let mut cs = TestConstraintSystem::new();\n\n                circuit.synthesize(&mut cs).expect(\"failed to synthesize\");\n\n                if !cs.is_satisfied() {\n                    panic!(\n                        \"failed to satisfy: {:?}\",\n                        cs.which_is_unsatisfied().unwrap()\n                    );\n                }\n                assert!(\n                    cs.verify(&inputs),\n                    \"verification failed with TestContraintSystem and generated inputs\"\n                );\n            }\n            // NOTE: This diagnostic code currently fails, even though the proof generated from the blank circuit verifies.\n            // Use this to debug differences between blank and regular circuit generation.\n            {\n                let (circuit1, _inputs) =\n                    PoRCompound::circuit_for_test(&public_params, &public_inputs, &private_inputs)\n                        .unwrap();\n                let blank_circuit =\n                    PoRCompound::<ResTree<Tree>>::blank_circuit(&public_params.vanilla_params);\n\n                let mut cs_blank = MetricCS::new();\n                blank_circuit\n                    .synthesize(&mut cs_blank)\n                    .expect(\"failed to synthesize\");\n\n                let a = cs_blank.pretty_print_list();\n\n                let mut cs1 = TestConstraintSystem::new();\n                circuit1.synthesize(&mut cs1).expect(\"failed to synthesize\");\n                let b = cs1.pretty_print_list();\n\n                for (i, (a, b)) in a.chunks(100).zip(b.chunks(100)).enumerate() {\n                    assert_eq!(a, b, \"failed at chunk {}\", i);\n                }\n            }\n\n            let blank_groth_params = PoRCompound::<ResTree<Tree>>::groth_params(\n                Some(rng),\n                &public_params.vanilla_params,\n            )\n            .expect(\"failed to generate groth params\");\n\n            let proof = PoRCompound::prove(\n                &public_params,\n                &public_inputs,\n                &private_inputs,\n                &blank_groth_params,\n            )\n            .expect(\"failed while proving\");\n\n            let verified =\n                PoRCompound::verify(&public_params, &public_inputs, &proof, &NoRequirements)\n                    .expect(\"failed while verifying\");\n\n            assert!(verified);\n        }\n    }\n\n    #[test]\n    fn test_private_por_input_circuit_pedersen_binary() {\n        test_private_por_input_circuit::<TestTree<PedersenHasher, typenum::U2>>(8_246);\n    }\n\n    #[test]\n    fn test_private_por_input_circuit_poseidon_binary() {\n        test_private_por_input_circuit::<TestTree<PoseidonHasher, typenum::U2>>(1_886);\n    }\n\n    #[test]\n    fn test_private_por_input_circuit_pedersen_quad() {\n        test_private_por_input_circuit::<TestTree<PedersenHasher, typenum::U4>>(12_398);\n    }\n\n    #[test]\n    fn test_private_por_input_circuit_poseidon_quad() {\n        test_private_por_input_circuit::<TestTree<PoseidonHasher, typenum::U4>>(1_163);\n    }\n\n    #[test]\n    fn test_private_por_input_circuit_poseidon_oct() {\n        test_private_por_input_circuit::<TestTree<PoseidonHasher, typenum::U8>>(1_062);\n    }\n\n    fn test_private_por_input_circuit<Tree: MerkleTreeTrait>(num_constraints: usize) {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 64 * get_base_tree_count::<Tree>();\n        for i in 0..leaves {\n            // -- Basic Setup\n\n            let data: Vec<u8> = (0..leaves)\n                .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n                .collect();\n\n            let tree = create_base_merkle_tree::<Tree>(None, leaves, data.as_slice()).unwrap();\n\n            // -- PoR\n\n            let pub_params = por::PublicParams {\n                leaves,\n                private: true,\n            };\n            let pub_inputs = por::PublicInputs {\n                challenge: i,\n                commitment: None,\n            };\n\n            let priv_inputs = por::PrivateInputs::<Tree>::new(\n                bytes_into_fr(data_at_node(data.as_slice(), pub_inputs.challenge).unwrap())\n                    .unwrap()\n                    .into(),\n                &tree,\n            );\n\n            // create a non circuit proof\n            let proof = por::PoR::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs)\n                .expect(\"proving failed\");\n\n            // make sure it verifies\n            let is_valid = por::PoR::<Tree>::verify(&pub_params, &pub_inputs, &proof)\n                .expect(\"verification failed\");\n            assert!(is_valid, \"failed to verify por proof\");\n\n            // -- Circuit\n\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let por = PoRCircuit::<Tree> {\n                value: Root::Val(Some(proof.data.into())),\n                auth_path: proof.proof.as_options().into(),\n                root: Root::Val(Some(tree.root().into())),\n                private: true,\n                _tree: PhantomData,\n            };\n\n            por.synthesize(&mut cs).expect(\"circuit synthesis failed\");\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n            assert_eq!(cs.num_inputs(), 2, \"wrong number of inputs\");\n            assert_eq!(\n                cs.num_constraints(),\n                num_constraints,\n                \"wrong number of constraints\"\n            );\n\n            let auth_path_bits =\n                challenge_into_auth_path_bits(pub_inputs.challenge, pub_params.leaves);\n            let packed_auth_path = multipack::compute_multipacking::<Bls12>(&auth_path_bits);\n\n            let mut expected_inputs = Vec::new();\n            expected_inputs.extend(packed_auth_path);\n\n            assert_eq!(cs.get_input(0, \"ONE\"), Fr::one(), \"wrong input 0\");\n\n            assert_eq!(\n                cs.get_input(1, \"path/input 0\"),\n                expected_inputs[0],\n                \"wrong packed_auth_path\"\n            );\n\n            assert!(cs.is_satisfied(), \"constraints are not all satisfied\");\n            assert!(cs.verify(&expected_inputs), \"failed to verify inputs\");\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/gadgets/test/mod.rs",
    "content": "use std::cmp::Ordering;\nuse std::collections::BTreeMap;\nuse std::collections::HashMap;\n\nuse bellperson::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable};\nuse blake2s_simd::State as Blake2s;\nuse byteorder::{BigEndian, ByteOrder};\nuse ff::{Field, PrimeField, PrimeFieldRepr};\nuse paired::Engine;\n\n#[derive(Debug)]\nenum NamedObject {\n    Constraint(usize),\n    Var(Variable),\n    Namespace,\n}\n\n/// Constraint system for testing purposes.\npub struct TestConstraintSystem<E: Engine> {\n    named_objects: HashMap<String, NamedObject>,\n    current_namespace: Vec<String>,\n    #[allow(clippy::type_complexity)]\n    constraints: Vec<(\n        LinearCombination<E>,\n        LinearCombination<E>,\n        LinearCombination<E>,\n        String,\n    )>,\n    inputs: Vec<(E::Fr, String)>,\n    aux: Vec<(E::Fr, String)>,\n}\n\n#[derive(Clone, Copy)]\nstruct OrderedVariable(Variable);\n\nimpl Eq for OrderedVariable {}\nimpl PartialEq for OrderedVariable {\n    fn eq(&self, other: &OrderedVariable) -> bool {\n        match (self.0.get_unchecked(), other.0.get_unchecked()) {\n            (Index::Input(ref a), Index::Input(ref b)) => a == b,\n            (Index::Aux(ref a), Index::Aux(ref b)) => a == b,\n            _ => false,\n        }\n    }\n}\nimpl PartialOrd for OrderedVariable {\n    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {\n        Some(self.cmp(other))\n    }\n}\nimpl Ord for OrderedVariable {\n    fn cmp(&self, other: &Self) -> Ordering {\n        match (self.0.get_unchecked(), other.0.get_unchecked()) {\n            (Index::Input(ref a), Index::Input(ref b)) => a.cmp(b),\n            (Index::Aux(ref a), Index::Aux(ref b)) => a.cmp(b),\n            (Index::Input(_), Index::Aux(_)) => Ordering::Less,\n            (Index::Aux(_), Index::Input(_)) => Ordering::Greater,\n        }\n    }\n}\n\nfn proc_lc<E: Engine>(terms: &[(Variable, E::Fr)]) -> BTreeMap<OrderedVariable, E::Fr> {\n    let mut map = BTreeMap::new();\n    for &(var, coeff) in terms {\n        map.entry(OrderedVariable(var))\n            .or_insert_with(E::Fr::zero)\n            .add_assign(&coeff);\n    }\n\n    // Remove terms that have a zero coefficient to normalize\n    let mut to_remove = vec![];\n    for (var, coeff) in map.iter() {\n        if coeff.is_zero() {\n            to_remove.push(var.clone())\n        }\n    }\n\n    for var in to_remove {\n        map.remove(&var);\n    }\n\n    map\n}\n\nfn hash_lc<E: Engine>(terms: &[(Variable, E::Fr)], h: &mut Blake2s) {\n    let map = proc_lc::<E>(terms);\n\n    let mut buf = [0u8; 9 + 32];\n    BigEndian::write_u64(&mut buf[0..8], map.len() as u64);\n    h.update(&buf[0..8]);\n\n    for (var, coeff) in map {\n        match var.0.get_unchecked() {\n            Index::Input(i) => {\n                buf[0] = b'I';\n                BigEndian::write_u64(&mut buf[1..9], i as u64);\n            }\n            Index::Aux(i) => {\n                buf[0] = b'A';\n                BigEndian::write_u64(&mut buf[1..9], i as u64);\n            }\n        }\n\n        coeff\n            .into_repr()\n            .write_be(&mut buf[9..])\n            .expect(\"failed to write coeff\");\n\n        h.update(&buf[..]);\n    }\n}\n\nfn _eval_lc2<E: Engine>(terms: &[(Variable, E::Fr)], inputs: &[E::Fr], aux: &[E::Fr]) -> E::Fr {\n    let mut acc = E::Fr::zero();\n\n    for &(var, ref coeff) in terms {\n        let mut tmp = match var.get_unchecked() {\n            Index::Input(index) => inputs[index],\n            Index::Aux(index) => aux[index],\n        };\n\n        tmp.mul_assign(&coeff);\n        acc.add_assign(&tmp);\n    }\n\n    acc\n}\n\nfn eval_lc<E: Engine>(\n    terms: &[(Variable, E::Fr)],\n    inputs: &[(E::Fr, String)],\n    aux: &[(E::Fr, String)],\n) -> E::Fr {\n    let mut acc = E::Fr::zero();\n\n    for &(var, ref coeff) in terms {\n        let mut tmp = match var.get_unchecked() {\n            Index::Input(index) => inputs[index].0,\n            Index::Aux(index) => aux[index].0,\n        };\n\n        tmp.mul_assign(&coeff);\n        acc.add_assign(&tmp);\n    }\n\n    acc\n}\n\nimpl<E: Engine> Default for TestConstraintSystem<E> {\n    fn default() -> Self {\n        let mut map = HashMap::new();\n        map.insert(\n            \"ONE\".into(),\n            NamedObject::Var(TestConstraintSystem::<E>::one()),\n        );\n\n        TestConstraintSystem {\n            named_objects: map,\n            current_namespace: vec![],\n            constraints: vec![],\n            inputs: vec![(E::Fr::one(), \"ONE\".into())],\n            aux: vec![],\n        }\n    }\n}\n\nimpl<E: Engine> TestConstraintSystem<E> {\n    pub fn new() -> Self {\n        Default::default()\n    }\n\n    pub fn pretty_print_list(&self) -> Vec<String> {\n        let mut result = Vec::new();\n\n        for input in &self.inputs {\n            result.push(format!(\"INPUT {}\", input.1));\n        }\n        for aux in &self.aux {\n            result.push(format!(\"AUX {}\", aux.1));\n        }\n\n        for &(ref _a, ref _b, ref _c, ref name) in &self.constraints {\n            result.push(name.to_string());\n        }\n\n        result\n    }\n\n    pub fn pretty_print(&self) -> String {\n        let res = self.pretty_print_list();\n\n        res.join(\"\\n\")\n    }\n\n    pub fn hash(&self) -> String {\n        let mut h = Blake2s::new();\n        {\n            let mut buf = [0u8; 24];\n\n            BigEndian::write_u64(&mut buf[0..8], self.inputs.len() as u64);\n            BigEndian::write_u64(&mut buf[8..16], self.aux.len() as u64);\n            BigEndian::write_u64(&mut buf[16..24], self.constraints.len() as u64);\n            h.update(&buf);\n        }\n\n        for constraint in &self.constraints {\n            hash_lc::<E>(constraint.0.as_ref(), &mut h);\n            hash_lc::<E>(constraint.1.as_ref(), &mut h);\n            hash_lc::<E>(constraint.2.as_ref(), &mut h);\n        }\n\n        let mut s = String::new();\n        for b in h.finalize().as_ref() {\n            s += &format!(\"{:02x}\", b);\n        }\n\n        s\n    }\n\n    pub fn which_is_unsatisfied(&self) -> Option<&str> {\n        for &(ref a, ref b, ref c, ref path) in &self.constraints {\n            let mut a = eval_lc::<E>(a.as_ref(), &self.inputs, &self.aux);\n            let b = eval_lc::<E>(b.as_ref(), &self.inputs, &self.aux);\n            let c = eval_lc::<E>(c.as_ref(), &self.inputs, &self.aux);\n\n            a.mul_assign(&b);\n\n            if a != c {\n                return Some(&*path);\n            }\n        }\n\n        None\n    }\n\n    pub fn is_satisfied(&self) -> bool {\n        match self.which_is_unsatisfied() {\n            Some(b) => {\n                println!(\"fail: {:?}\", b);\n                false\n            }\n            None => true,\n        }\n        // self.which_is_unsatisfied().is_none()\n    }\n\n    pub fn num_constraints(&self) -> usize {\n        self.constraints.len()\n    }\n\n    pub fn set(&mut self, path: &str, to: E::Fr) {\n        match self.named_objects.get(path) {\n            Some(&NamedObject::Var(ref v)) => match v.get_unchecked() {\n                Index::Input(index) => self.inputs[index].0 = to,\n                Index::Aux(index) => self.aux[index].0 = to,\n            },\n            Some(e) => panic!(\n                \"tried to set path `{}` to value, but `{:?}` already exists there.\",\n                path, e\n            ),\n            _ => panic!(\"no variable exists at path: {}\", path),\n        }\n    }\n\n    pub fn verify(&self, expected: &[E::Fr]) -> bool {\n        assert_eq!(expected.len() + 1, self.inputs.len());\n        for (a, b) in self.inputs.iter().skip(1).zip(expected.iter()) {\n            if &a.0 != b {\n                return false;\n            }\n        }\n\n        true\n    }\n\n    pub fn num_inputs(&self) -> usize {\n        self.inputs.len()\n    }\n\n    pub fn get_input(&mut self, index: usize, path: &str) -> E::Fr {\n        let (assignment, name) = self.inputs[index].clone();\n\n        assert_eq!(path, name);\n\n        assignment\n    }\n\n    pub fn get_inputs(&self) -> &[(E::Fr, String)] {\n        &self.inputs[..]\n    }\n\n    pub fn get(&mut self, path: &str) -> E::Fr {\n        match self.named_objects.get(path) {\n            Some(&NamedObject::Var(ref v)) => match v.get_unchecked() {\n                Index::Input(index) => self.inputs[index].0,\n                Index::Aux(index) => self.aux[index].0,\n            },\n            Some(e) => panic!(\n                \"tried to get value of path `{}`, but `{:?}` exists there (not a variable)\",\n                path, e\n            ),\n            _ => panic!(\"no variable exists at path: {}\", path),\n        }\n    }\n\n    fn set_named_obj(&mut self, path: String, to: NamedObject) {\n        if self.named_objects.contains_key(&path) {\n            panic!(\"tried to create object at existing path: {}\", path);\n        }\n\n        self.named_objects.insert(path, to);\n    }\n}\n\nfn compute_path(ns: &[String], this: &str) -> String {\n    assert!(\n        !this.chars().any(|a| a == '/'),\n        \"'/' is not allowed in names\"\n    );\n\n    if ns.is_empty() {\n        return this.to_string();\n    }\n\n    let name = ns.join(\"/\");\n    format!(\"{}/{}\", name, this)\n}\n\nimpl<E: Engine> ConstraintSystem<E> for TestConstraintSystem<E> {\n    type Root = Self;\n\n    fn alloc<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>\n    where\n        F: FnOnce() -> Result<E::Fr, SynthesisError>,\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n    {\n        let index = self.aux.len();\n        let path = compute_path(&self.current_namespace, &annotation().into());\n        self.aux.push((f()?, path.clone()));\n        let var = Variable::new_unchecked(Index::Aux(index));\n        self.set_named_obj(path, NamedObject::Var(var));\n\n        Ok(var)\n    }\n\n    fn alloc_input<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>\n    where\n        F: FnOnce() -> Result<E::Fr, SynthesisError>,\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n    {\n        let index = self.inputs.len();\n        let path = compute_path(&self.current_namespace, &annotation().into());\n        self.inputs.push((f()?, path.clone()));\n        let var = Variable::new_unchecked(Index::Input(index));\n        self.set_named_obj(path, NamedObject::Var(var));\n\n        Ok(var)\n    }\n\n    fn enforce<A, AR, LA, LB, LC>(&mut self, annotation: A, a: LA, b: LB, c: LC)\n    where\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n        LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n        LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n        LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n    {\n        let path = compute_path(&self.current_namespace, &annotation().into());\n        let index = self.constraints.len();\n        self.set_named_obj(path.clone(), NamedObject::Constraint(index));\n\n        let a = a(LinearCombination::zero());\n        let b = b(LinearCombination::zero());\n        let c = c(LinearCombination::zero());\n\n        self.constraints.push((a, b, c, path));\n    }\n\n    fn push_namespace<NR, N>(&mut self, name_fn: N)\n    where\n        NR: Into<String>,\n        N: FnOnce() -> NR,\n    {\n        let name = name_fn().into();\n        let path = compute_path(&self.current_namespace, &name);\n        self.set_named_obj(path, NamedObject::Namespace);\n        self.current_namespace.push(name);\n    }\n\n    fn pop_namespace(&mut self) {\n        assert!(self.current_namespace.pop().is_some());\n    }\n\n    fn get_root(&mut self) -> &mut Self::Root {\n        self\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_compute_path() {\n        assert_eq!(\n            compute_path(\n                &[\n                    \"hello\".to_string(),\n                    \"world\".to_string(),\n                    \"things\".to_string()\n                ],\n                \"thing\"\n            ),\n            \"hello/world/things/thing\"\n        );\n    }\n\n    #[test]\n    fn test_cs() {\n        use ff::PrimeField;\n        use paired::bls12_381::{Bls12, Fr};\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        assert!(cs.is_satisfied());\n        assert_eq!(cs.num_constraints(), 0);\n        let a = cs\n            .namespace(|| \"a\")\n            .alloc(|| \"var\", || Ok(Fr::from_str(\"10\").unwrap()))\n            .unwrap();\n        let b = cs\n            .namespace(|| \"b\")\n            .alloc(|| \"var\", || Ok(Fr::from_str(\"4\").unwrap()))\n            .unwrap();\n        let c = cs\n            .alloc(|| \"product\", || Ok(Fr::from_str(\"40\").unwrap()))\n            .unwrap();\n\n        cs.enforce(|| \"mult\", |lc| lc + a, |lc| lc + b, |lc| lc + c);\n        assert!(cs.is_satisfied());\n        assert_eq!(cs.num_constraints(), 1);\n\n        cs.set(\"a/var\", Fr::from_str(\"4\").unwrap());\n\n        let one = TestConstraintSystem::<Bls12>::one();\n        cs.enforce(|| \"eq\", |lc| lc + a, |lc| lc + one, |lc| lc + b);\n\n        assert!(!cs.is_satisfied());\n        assert!(cs.which_is_unsatisfied() == Some(\"mult\"));\n\n        assert!(cs.get(\"product\") == Fr::from_str(\"40\").unwrap());\n\n        cs.set(\"product\", Fr::from_str(\"16\").unwrap());\n        assert!(cs.is_satisfied());\n\n        {\n            let mut cs = cs.namespace(|| \"test1\");\n            let mut cs = cs.namespace(|| \"test2\");\n            cs.alloc(|| \"hehe\", || Ok(Fr::one())).unwrap();\n        }\n\n        assert!(cs.get(\"test1/test2/hehe\") == Fr::one());\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/gadgets/uint64.rs",
    "content": "use bellperson::gadgets::boolean::{AllocatedBit, Boolean};\nuse bellperson::gadgets::multipack::pack_into_inputs;\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse paired::Engine;\n\n/// Represents an interpretation of 64 `Boolean` objects as an unsigned integer.\n#[derive(Clone)]\npub struct UInt64 {\n    // Least significant bit first\n    bits: Vec<Boolean>,\n    value: Option<u64>,\n}\n\nimpl UInt64 {\n    /// Construct a constant `UInt64` from a `u64`\n    pub fn constant(value: u64) -> Self {\n        let mut bits = Vec::with_capacity(64);\n\n        let mut tmp = value;\n        for _ in 0..64 {\n            if tmp & 1 == 1 {\n                bits.push(Boolean::constant(true))\n            } else {\n                bits.push(Boolean::constant(false))\n            }\n\n            tmp >>= 1;\n        }\n\n        UInt64 {\n            bits,\n            value: Some(value),\n        }\n    }\n\n    pub fn get_value(&self) -> Option<u64> {\n        self.value\n    }\n\n    pub fn pack_into_input<E, CS>(&self, cs: CS) -> Result<(), SynthesisError>\n    where\n        E: Engine,\n        CS: ConstraintSystem<E>,\n    {\n        pack_into_inputs(cs, &self.bits)\n    }\n\n    /// Allocate a `UInt64` in the constraint system\n    pub fn alloc<E, CS>(mut cs: CS, value: Option<u64>) -> Result<Self, SynthesisError>\n    where\n        E: Engine,\n        CS: ConstraintSystem<E>,\n    {\n        let values = match value {\n            Some(mut val) => {\n                let mut v = Vec::with_capacity(64);\n\n                for _ in 0..64 {\n                    v.push(Some(val & 1 == 1));\n                    val >>= 1;\n                }\n\n                v\n            }\n            None => vec![None; 64],\n        };\n\n        let bits = values\n            .into_iter()\n            .enumerate()\n            .map(|(i, v)| {\n                Ok(Boolean::from(AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"allocated bit {}\", i)),\n                    v,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n        Ok(UInt64 { bits, value })\n    }\n\n    pub fn to_bits_be(&self) -> Vec<Boolean> {\n        self.bits.iter().rev().cloned().collect()\n    }\n\n    pub fn from_bits_be(bits: &[Boolean]) -> Self {\n        assert_eq!(bits.len(), 64);\n\n        let mut value = Some(0u64);\n        for b in bits {\n            if let Some(v) = value.as_mut() {\n                *v <<= 1;\n            }\n\n            match b.get_value() {\n                Some(true) => {\n                    if let Some(v) = value.as_mut() {\n                        *v |= 1;\n                    }\n                }\n                Some(false) => {}\n                None => {\n                    value = None;\n                }\n            }\n        }\n\n        UInt64 {\n            value,\n            bits: bits.iter().rev().cloned().collect(),\n        }\n    }\n\n    /// Turns this `UInt64` into its little-endian byte order representation.\n    pub fn to_bits_le(&self) -> Vec<Boolean> {\n        self.bits.clone()\n    }\n\n    /// Converts a little-endian byte order representation of bits into a\n    /// `UInt64`.\n    pub fn from_bits(bits: &[Boolean]) -> Self {\n        assert_eq!(bits.len(), 64);\n\n        let new_bits = bits.to_vec();\n\n        let mut value = Some(0u64);\n        for b in new_bits.iter().rev() {\n            if let Some(v) = value.as_mut() {\n                *v <<= 1;\n            }\n\n            match *b {\n                Boolean::Constant(b) => {\n                    if b {\n                        if let Some(v) = value.as_mut() {\n                            *v |= 1\n                        }\n                    }\n                }\n                Boolean::Is(ref b) => match b.get_value() {\n                    Some(true) => {\n                        if let Some(v) = value.as_mut() {\n                            *v |= 1;\n                        }\n                    }\n                    Some(false) => {}\n                    None => value = None,\n                },\n                Boolean::Not(ref b) => match b.get_value() {\n                    Some(false) => {\n                        if let Some(v) = value.as_mut() {\n                            *v |= 1;\n                        }\n                    }\n                    Some(true) => {}\n                    None => value = None,\n                },\n            }\n        }\n\n        UInt64 {\n            value,\n            bits: new_bits,\n        }\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    #[test]\n    fn test_uint64_from_bits_be() {\n        let mut rng = XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for _ in 0..1000 {\n            let v = (0..64)\n                .map(|_| Boolean::constant(rng.gen()))\n                .collect::<Vec<_>>();\n\n            let b = UInt64::from_bits_be(&v);\n\n            for (i, bit) in b.bits.iter().enumerate() {\n                match bit {\n                    &Boolean::Constant(bit) => {\n                        assert!(bit == ((b.value.unwrap() >> i) & 1 == 1));\n                    }\n                    _ => unreachable!(),\n                }\n            }\n\n            let expected_to_be_same = b.to_bits_be();\n\n            for x in v.iter().zip(expected_to_be_same.iter()) {\n                match x {\n                    (&Boolean::Constant(true), &Boolean::Constant(true)) => {}\n                    (&Boolean::Constant(false), &Boolean::Constant(false)) => {}\n                    _ => unreachable!(),\n                }\n            }\n        }\n    }\n\n    #[test]\n    fn test_uint64_from_bits() {\n        let mut rng = XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for _ in 0..1000 {\n            let v = (0..64)\n                .map(|_| Boolean::constant(rng.gen()))\n                .collect::<Vec<_>>();\n\n            let b = UInt64::from_bits(&v);\n\n            for (i, bit) in b.bits.iter().enumerate() {\n                match bit {\n                    &Boolean::Constant(bit) => {\n                        assert!(bit == ((b.value.unwrap() >> i) & 1 == 1));\n                    }\n                    _ => unreachable!(),\n                }\n            }\n\n            let expected_to_be_same = b.to_bits_le();\n\n            for x in v.iter().zip(expected_to_be_same.iter()) {\n                match x {\n                    (&Boolean::Constant(true), &Boolean::Constant(true)) => {}\n                    (&Boolean::Constant(false), &Boolean::Constant(false)) => {}\n                    _ => unreachable!(),\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/gadgets/variables.rs",
    "content": "use std::fmt;\n\nuse anyhow::Result;\n\nuse bellperson::gadgets::num::AllocatedNum;\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse paired::Engine;\n\n/// Root represents a root commitment which may be either a raw value or an already-allocated number.\n/// This allows subcomponents to depend on roots which may optionally be shared with their parent\n/// or sibling components.\n#[derive(Clone)]\npub enum Root<E: Engine> {\n    Var(AllocatedNum<E>),\n    Val(Option<E::Fr>),\n}\n\nimpl<E: Engine> fmt::Debug for Root<E> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        match self {\n            Root::Var(num) => write!(f, \"Root::Var({:?})\", num.get_value()),\n            Root::Val(val) => write!(f, \"Root::Val({:?})\", val),\n        }\n    }\n}\n\nimpl<E: Engine> Root<E> {\n    pub fn from_allocated<CS: ConstraintSystem<E>>(allocated: AllocatedNum<E>) -> Self {\n        Root::Var(allocated)\n    }\n\n    pub fn allocated<CS: ConstraintSystem<E>>(\n        &self,\n        cs: CS,\n    ) -> Result<AllocatedNum<E>, SynthesisError> {\n        match self {\n            Root::Var(allocated) => Ok(allocated.clone()),\n            Root::Val(fr) => {\n                AllocatedNum::alloc(cs, || fr.ok_or_else(|| SynthesisError::AssignmentMissing))\n            }\n        }\n    }\n\n    pub fn var<CS: ConstraintSystem<E>>(cs: CS, fr: E::Fr) -> Result<Self> {\n        Ok(Root::Var(AllocatedNum::alloc(cs, || Ok(fr))?))\n    }\n\n    pub fn is_some(&self) -> bool {\n        match self {\n            Root::Var(_) => true,\n            Root::Val(Some(_)) => true,\n            Root::Val(None) => false,\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/gadgets/xor.rs",
    "content": "use bellperson::gadgets::boolean::Boolean;\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse fil_sapling_crypto::jubjub::JubjubEngine;\n\npub fn xor<E, CS>(\n    cs: &mut CS,\n    key: &[Boolean],\n    input: &[Boolean],\n) -> Result<Vec<Boolean>, SynthesisError>\nwhere\n    E: JubjubEngine,\n    CS: ConstraintSystem<E>,\n{\n    let key_len = key.len();\n    assert_eq!(key_len, 32 * 8);\n\n    input\n        .iter()\n        .enumerate()\n        .map(|(i, byte)| {\n            Boolean::xor(\n                cs.namespace(|| format!(\"xor bit: {}\", i)),\n                byte,\n                &key[i % key_len],\n            )\n        })\n        .collect::<Result<Vec<_>, SynthesisError>>()\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use crate::crypto;\n    use crate::gadgets::TestConstraintSystem;\n    use crate::util::{bits_to_bytes, bytes_into_boolean_vec};\n    use bellperson::gadgets::boolean::Boolean;\n    use bellperson::ConstraintSystem;\n    use paired::bls12_381::Bls12;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    #[test]\n    fn test_xor_input_circut() {\n        let mut rng = XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for i in 0..10 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let key: Vec<u8> = (0..32).map(|_| rng.gen()).collect();\n            let data: Vec<u8> = (0..(i + 1) * 32).map(|_| rng.gen()).collect();\n\n            let key_bits: Vec<Boolean> = {\n                let mut cs = cs.namespace(|| \"key\");\n                bytes_into_boolean_vec(&mut cs, Some(key.as_slice()), key.len()).unwrap()\n            };\n\n            let data_bits: Vec<Boolean> = {\n                let mut cs = cs.namespace(|| \"data bits\");\n                bytes_into_boolean_vec(&mut cs, Some(data.as_slice()), data.len()).unwrap()\n            };\n\n            let out_bits =\n                xor(&mut cs, key_bits.as_slice(), data_bits.as_slice()).expect(\"xor failed\");\n\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n            assert_eq!(out_bits.len(), data_bits.len(), \"invalid output length\");\n\n            // convert Vec<Boolean> to Vec<u8>\n            let actual = bits_to_bytes(\n                out_bits\n                    .iter()\n                    .map(|v| v.get_value().unwrap())\n                    .collect::<Vec<bool>>()\n                    .as_slice(),\n            );\n\n            let expected = crypto::xor::encode(key.as_slice(), data.as_slice()).unwrap();\n\n            assert_eq!(expected, actual, \"circuit and non circuit do not match\");\n\n            // -- roundtrip\n            let roundtrip_bits = {\n                let mut cs = cs.namespace(|| \"roundtrip\");\n                xor(&mut cs, key_bits.as_slice(), out_bits.as_slice()).expect(\"xor faield\")\n            };\n\n            let roundtrip = bits_to_bytes(\n                roundtrip_bits\n                    .iter()\n                    .map(|v| v.get_value().unwrap())\n                    .collect::<Vec<bool>>()\n                    .as_slice(),\n            );\n\n            assert_eq!(data, roundtrip, \"failed to roundtrip\");\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/hasher/blake2s.rs",
    "content": "use std::fmt;\nuse std::hash::Hasher as StdHasher;\n\nuse anyhow::ensure;\nuse bellperson::gadgets::{blake2s as blake2s_circuit, boolean, num};\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse blake2s_simd::{Hash as Blake2sHash, Params as Blake2s, State};\nuse ff::{Field, PrimeField, PrimeFieldRepr};\nuse merkletree::hash::{Algorithm, Hashable};\nuse merkletree::merkle::Element;\nuse paired::bls12_381::{Bls12, Fr, FrRepr};\nuse rand::RngCore;\nuse serde::{Deserialize, Serialize};\n\nuse super::{Domain, HashFunction, Hasher};\nuse crate::crypto::sloth;\nuse crate::error::*;\nuse crate::gadgets::multipack;\n\n#[derive(Default, Copy, Clone, PartialEq, Eq, Debug)]\npub struct Blake2sHasher {}\n\nimpl Hasher for Blake2sHasher {\n    type Domain = Blake2sDomain;\n    type Function = Blake2sFunction;\n\n    fn name() -> String {\n        \"Blake2sHasher\".into()\n    }\n\n    fn sloth_encode(key: &Self::Domain, ciphertext: &Self::Domain) -> Result<Self::Domain> {\n        // TODO: validate this is how sloth should work in this case\n        let k = (*key).into();\n        let c = (*ciphertext).into();\n\n        Ok(sloth::encode(&k, &c).into())\n    }\n\n    fn sloth_decode(key: &Self::Domain, ciphertext: &Self::Domain) -> Result<Self::Domain> {\n        // TODO: validate this is how sloth should work in this case\n        Ok(sloth::decode(&(*key).into(), &(*ciphertext).into()).into())\n    }\n}\n\n#[derive(Clone)]\npub struct Blake2sFunction(State);\n\nimpl Default for Blake2sFunction {\n    fn default() -> Self {\n        Blake2sFunction(Blake2s::new().hash_length(32).to_state())\n    }\n}\n\nimpl PartialEq for Blake2sFunction {\n    fn eq(&self, other: &Self) -> bool {\n        format!(\"{:?}\", self) == format!(\"{:?}\", other)\n    }\n}\n\nimpl Eq for Blake2sFunction {}\n\nimpl fmt::Debug for Blake2sFunction {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"Blake2sFunction({:?})\", self.0)\n    }\n}\n\nimpl StdHasher for Blake2sFunction {\n    #[inline]\n    fn write(&mut self, msg: &[u8]) {\n        self.0.update(msg);\n    }\n\n    #[inline]\n    fn finish(&self) -> u64 {\n        unreachable!(\"unused by Function -- should never be called\")\n    }\n}\n\n#[derive(\n    Copy, Clone, PartialEq, Eq, Debug, PartialOrd, Ord, Default, Serialize, Deserialize, Hash,\n)]\npub struct Blake2sDomain(pub [u8; 32]);\n\nimpl AsRef<Blake2sDomain> for Blake2sDomain {\n    fn as_ref(&self) -> &Self {\n        self\n    }\n}\n\nimpl Blake2sDomain {\n    pub fn trim_to_fr32(&mut self) {\n        // strip last two bits, to ensure result is in Fr.\n        self.0[31] &= 0b0011_1111;\n    }\n}\n\nimpl AsRef<[u8]> for Blake2sDomain {\n    fn as_ref(&self) -> &[u8] {\n        &self.0[..]\n    }\n}\n\nimpl Hashable<Blake2sFunction> for Blake2sDomain {\n    fn hash(&self, state: &mut Blake2sFunction) {\n        state.write(self.as_ref())\n    }\n}\n\nimpl From<Fr> for Blake2sDomain {\n    fn from(val: Fr) -> Self {\n        let mut res = Self::default();\n        val.into_repr().write_le(&mut res.0[0..32]).unwrap();\n\n        res\n    }\n}\n\nimpl From<FrRepr> for Blake2sDomain {\n    fn from(val: FrRepr) -> Self {\n        let mut res = Self::default();\n        val.write_le(&mut res.0[0..32]).unwrap();\n\n        res\n    }\n}\n\nimpl Element for Blake2sDomain {\n    fn byte_len() -> usize {\n        32\n    }\n\n    fn from_slice(bytes: &[u8]) -> Self {\n        match Blake2sDomain::try_from_bytes(bytes) {\n            Ok(res) => res,\n            Err(err) => panic!(err),\n        }\n    }\n\n    fn copy_to_slice(&self, bytes: &mut [u8]) {\n        bytes.copy_from_slice(&self.0);\n    }\n}\n\nimpl From<Blake2sDomain> for Fr {\n    fn from(val: Blake2sDomain) -> Self {\n        let mut res = FrRepr::default();\n        res.read_le(&val.0[0..32]).unwrap();\n\n        Fr::from_repr(res).unwrap()\n    }\n}\n\nimpl Domain for Blake2sDomain {\n    fn into_bytes(&self) -> Vec<u8> {\n        self.0.to_vec()\n    }\n\n    fn try_from_bytes(raw: &[u8]) -> Result<Self> {\n        ensure!(\n            raw.len() == 32 && u32::from(raw[31]) <= Fr::NUM_BITS,\n            Error::InvalidInputSize\n        );\n\n        let mut res = Blake2sDomain::default();\n        res.0.copy_from_slice(&raw[0..32]);\n        Ok(res)\n    }\n\n    fn write_bytes(&self, dest: &mut [u8]) -> Result<()> {\n        ensure!(dest.len() >= 32, Error::InvalidInputSize);\n        dest[0..32].copy_from_slice(&self.0[..]);\n        Ok(())\n    }\n\n    fn random<R: RngCore>(rng: &mut R) -> Self {\n        // generating an Fr and converting it, to ensure we stay in the field\n        Fr::random(rng).into()\n    }\n}\n\nimpl Into<Blake2sDomain> for Blake2sHash {\n    fn into(self) -> Blake2sDomain {\n        let mut res = Blake2sDomain::default();\n        res.0[..].copy_from_slice(self.as_ref());\n        res.trim_to_fr32();\n\n        res\n    }\n}\n\nimpl HashFunction<Blake2sDomain> for Blake2sFunction {\n    fn hash(data: &[u8]) -> Blake2sDomain {\n        Blake2s::new()\n            .hash_length(32)\n            .to_state()\n            .update(data)\n            .finalize()\n            .into()\n    }\n\n    fn hash2(a: &Blake2sDomain, b: &Blake2sDomain) -> Blake2sDomain {\n        Blake2s::new()\n            .hash_length(32)\n            .to_state()\n            .update(a.as_ref())\n            .update(b.as_ref())\n            .finalize()\n            .into()\n    }\n\n    fn hash_multi_leaf_circuit<Arity, CS: ConstraintSystem<Bls12>>(\n        mut cs: CS,\n        leaves: &[num::AllocatedNum<Bls12>],\n        _height: usize,\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        let mut bits = Vec::with_capacity(leaves.len() * Fr::CAPACITY as usize);\n        for (i, leaf) in leaves.iter().enumerate() {\n            bits.extend_from_slice(\n                &leaf.to_bits_le(cs.namespace(|| format!(\"{}_num_into_bits\", i)))?,\n            );\n            while bits.len() % 8 != 0 {\n                bits.push(boolean::Boolean::Constant(false));\n            }\n        }\n        Self::hash_circuit(cs, &bits)\n    }\n\n    fn hash_leaf_bits_circuit<CS: ConstraintSystem<Bls12>>(\n        cs: CS,\n        left: &[boolean::Boolean],\n        right: &[boolean::Boolean],\n        _height: usize,\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        let mut preimage: Vec<boolean::Boolean> = vec![];\n\n        preimage.extend_from_slice(left);\n        while preimage.len() % 8 != 0 {\n            preimage.push(boolean::Boolean::Constant(false));\n        }\n\n        preimage.extend_from_slice(right);\n        while preimage.len() % 8 != 0 {\n            preimage.push(boolean::Boolean::Constant(false));\n        }\n\n        Self::hash_circuit(cs, &preimage[..])\n    }\n\n    fn hash_circuit<CS: ConstraintSystem<Bls12>>(\n        mut cs: CS,\n        bits: &[boolean::Boolean],\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        let personalization = vec![0u8; 8];\n        let alloc_bits =\n            blake2s_circuit::blake2s(cs.namespace(|| \"hash\"), &bits[..], &personalization)?;\n\n        multipack::pack_bits(cs.namespace(|| \"pack\"), &alloc_bits)\n    }\n\n    fn hash2_circuit<CS>(\n        mut cs: CS,\n        a_num: &num::AllocatedNum<Bls12>,\n        b_num: &num::AllocatedNum<Bls12>,\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>,\n    {\n        // Allocate as booleans\n        let a = a_num.to_bits_le(cs.namespace(|| \"a_bits\"))?;\n        let b = b_num.to_bits_le(cs.namespace(|| \"b_bits\"))?;\n\n        let mut preimage: Vec<boolean::Boolean> = vec![];\n\n        preimage.extend_from_slice(&a);\n        while preimage.len() % 8 != 0 {\n            preimage.push(boolean::Boolean::Constant(false));\n        }\n\n        preimage.extend_from_slice(&b);\n        while preimage.len() % 8 != 0 {\n            preimage.push(boolean::Boolean::Constant(false));\n        }\n\n        Self::hash_circuit(cs, &preimage[..])\n    }\n}\n\nimpl Algorithm<Blake2sDomain> for Blake2sFunction {\n    #[inline]\n    fn hash(&mut self) -> Blake2sDomain {\n        self.0.clone().finalize().into()\n    }\n\n    #[inline]\n    fn reset(&mut self) {\n        self.0 = Blake2s::new().hash_length(32).to_state()\n    }\n\n    fn leaf(&mut self, leaf: Blake2sDomain) -> Blake2sDomain {\n        leaf\n    }\n\n    fn node(&mut self, left: Blake2sDomain, right: Blake2sDomain, _height: usize) -> Blake2sDomain {\n        left.hash(self);\n        right.hash(self);\n        self.hash()\n    }\n\n    fn multi_node(&mut self, parts: &[Blake2sDomain], _height: usize) -> Blake2sDomain {\n        for part in parts {\n            part.hash(self)\n        }\n        self.hash()\n    }\n}\n\nimpl From<[u8; 32]> for Blake2sDomain {\n    #[inline]\n    fn from(val: [u8; 32]) -> Self {\n        Blake2sDomain(val)\n    }\n}\n\nimpl From<Blake2sDomain> for [u8; 32] {\n    #[inline]\n    fn from(val: Blake2sDomain) -> Self {\n        val.0\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/hasher/mod.rs",
    "content": "pub mod blake2s;\npub mod pedersen;\npub mod poseidon;\npub mod sha256;\n\npub mod types;\n\npub use self::blake2s::*;\npub use self::pedersen::*;\npub use self::poseidon::*;\npub use self::sha256::*;\npub use self::types::*;\n"
  },
  {
    "path": "storage-proofs/core/src/hasher/pedersen.rs",
    "content": "use std::hash::Hasher as StdHasher;\n\nuse anyhow::ensure;\nuse bellperson::gadgets::{boolean, num};\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse ff::{Field, PrimeField, PrimeFieldRepr};\nuse fil_sapling_crypto::circuit::pedersen_hash as pedersen_hash_circuit;\nuse fil_sapling_crypto::pedersen_hash::Personalization;\nuse merkletree::hash::{Algorithm as LightAlgorithm, Hashable};\nuse merkletree::merkle::Element;\nuse paired::bls12_381::{Bls12, Fr, FrRepr};\nuse serde::{Deserialize, Serialize};\n\nuse crate::crypto::{pedersen, sloth};\nuse crate::error::{Error, Result};\nuse crate::gadgets::pedersen::{pedersen_compression_num, pedersen_md_no_padding};\nuse crate::hasher::{Domain, HashFunction, Hasher};\n\n#[derive(Default, Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]\npub struct PedersenHasher {}\n\nimpl Hasher for PedersenHasher {\n    type Domain = PedersenDomain;\n    type Function = PedersenFunction;\n\n    fn name() -> String {\n        \"PedersenHasher\".into()\n    }\n\n    #[inline]\n    fn sloth_encode(key: &Self::Domain, ciphertext: &Self::Domain) -> Result<Self::Domain> {\n        // Unrapping here is safe; `Fr` elements and hash domain elements are the same byte length.\n        let key = Fr::from_repr(key.0)?;\n        let ciphertext = Fr::from_repr(ciphertext.0)?;\n        Ok(sloth::encode(&key, &ciphertext).into())\n    }\n\n    #[inline]\n    fn sloth_decode(key: &Self::Domain, ciphertext: &Self::Domain) -> Result<Self::Domain> {\n        // Unrapping here is safe; `Fr` elements and hash domain elements are the same byte length.\n        let key = Fr::from_repr(key.0)?;\n        let ciphertext = Fr::from_repr(ciphertext.0)?;\n\n        Ok(sloth::decode(&key, &ciphertext).into())\n    }\n}\n\n#[derive(Copy, Clone, Debug, PartialEq, Eq)]\npub struct PedersenFunction(Fr);\n\nimpl Default for PedersenFunction {\n    fn default() -> PedersenFunction {\n        PedersenFunction(Fr::from_repr(FrRepr::default()).expect(\"failed default\"))\n    }\n}\n\nimpl Hashable<PedersenFunction> for Fr {\n    fn hash(&self, state: &mut PedersenFunction) {\n        let mut bytes = Vec::with_capacity(32);\n        self.into_repr().write_le(&mut bytes).unwrap();\n        state.write(&bytes);\n    }\n}\n\nimpl Hashable<PedersenFunction> for PedersenDomain {\n    fn hash(&self, state: &mut PedersenFunction) {\n        let mut bytes = Vec::with_capacity(32);\n        self.0\n            .write_le(&mut bytes)\n            .expect(\"Failed to write `FrRepr`\");\n        state.write(&bytes);\n    }\n}\n\n#[derive(Copy, Clone, Debug, Serialize, Deserialize)]\npub struct PedersenDomain(pub FrRepr);\n\nimpl AsRef<PedersenDomain> for PedersenDomain {\n    fn as_ref(&self) -> &PedersenDomain {\n        self\n    }\n}\n\nimpl std::hash::Hash for PedersenDomain {\n    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {\n        let raw: &[u64] = self.0.as_ref();\n        std::hash::Hash::hash(raw, state);\n    }\n}\n\nimpl PartialEq for PedersenDomain {\n    fn eq(&self, other: &Self) -> bool {\n        self.0.as_ref() == other.0.as_ref()\n    }\n}\n\nimpl Eq for PedersenDomain {}\n\nimpl Default for PedersenDomain {\n    fn default() -> PedersenDomain {\n        PedersenDomain(FrRepr::default())\n    }\n}\n\nimpl Ord for PedersenDomain {\n    #[inline(always)]\n    fn cmp(&self, other: &PedersenDomain) -> ::std::cmp::Ordering {\n        (self.0).cmp(&other.0)\n    }\n}\n\nimpl PartialOrd for PedersenDomain {\n    #[inline(always)]\n    fn partial_cmp(&self, other: &PedersenDomain) -> Option<::std::cmp::Ordering> {\n        Some((self.0).cmp(&other.0))\n    }\n}\n\nimpl AsRef<[u8]> for PedersenDomain {\n    #[inline]\n    fn as_ref(&self) -> &[u8] {\n        as_ref(&(self.0).0)\n    }\n}\n\n// This is unsafe, and I wish it wasn't here, but I really need AsRef<[u8]> to work, without allocating.\n// https://internals.rust-lang.org/t/safe-trasnsmute-for-slices-e-g-u64-u32-particularly-simd-types/2871\n// https://github.com/briansmith/ring/blob/abb3fdfc08562f3f02e95fb551604a871fd4195e/src/polyfill.rs#L93-L110\n#[inline(always)]\n#[allow(clippy::needless_lifetimes)]\nfn as_ref<'a>(src: &'a [u64; 4]) -> &'a [u8] {\n    unsafe {\n        std::slice::from_raw_parts(\n            src.as_ptr() as *const u8,\n            src.len() * std::mem::size_of::<u64>(),\n        )\n    }\n}\n\nimpl Domain for PedersenDomain {\n    fn into_bytes(&self) -> Vec<u8> {\n        let mut out = Vec::with_capacity(PedersenDomain::byte_len());\n        self.0.write_le(&mut out).unwrap();\n\n        out\n    }\n\n    fn try_from_bytes(raw: &[u8]) -> Result<Self> {\n        ensure!(raw.len() == PedersenDomain::byte_len(), Error::BadFrBytes);\n        let mut res: FrRepr = Default::default();\n        res.read_le(raw)?;\n\n        Ok(PedersenDomain(res))\n    }\n\n    fn write_bytes(&self, dest: &mut [u8]) -> Result<()> {\n        self.0.write_le(dest)?;\n        Ok(())\n    }\n\n    fn random<R: rand::RngCore>(rng: &mut R) -> Self {\n        // generating an Fr and converting it, to ensure we stay in the field\n        Fr::random(rng).into()\n    }\n}\n\nimpl Element for PedersenDomain {\n    fn byte_len() -> usize {\n        32\n    }\n\n    fn from_slice(bytes: &[u8]) -> Self {\n        PedersenDomain::try_from_bytes(bytes).expect(\"invalid bytes\")\n    }\n\n    fn copy_to_slice(&self, bytes: &mut [u8]) {\n        bytes.copy_from_slice(&self.into_bytes());\n    }\n}\n\nimpl StdHasher for PedersenFunction {\n    #[inline]\n    fn write(&mut self, msg: &[u8]) {\n        self.0 = pedersen::pedersen(msg);\n    }\n\n    #[inline]\n    fn finish(&self) -> u64 {\n        unimplemented!()\n    }\n}\n\nimpl HashFunction<PedersenDomain> for PedersenFunction {\n    fn hash(data: &[u8]) -> PedersenDomain {\n        pedersen::pedersen_md_no_padding(data).into()\n    }\n\n    fn hash2(a: &PedersenDomain, b: &PedersenDomain) -> PedersenDomain {\n        let data = NodeBits::new(&(a.0).0[..], &(b.0).0[..]);\n\n        let digest = if cfg!(target_arch = \"x86_64\") {\n            use fil_sapling_crypto::pedersen_hash::pedersen_hash_bls12_381_with_precomp;\n            pedersen_hash_bls12_381_with_precomp::<_>(\n                Personalization::None,\n                data,\n                &pedersen::JJ_PARAMS,\n            )\n        } else {\n            use fil_sapling_crypto::pedersen_hash::pedersen_hash;\n            pedersen_hash::<Bls12, _>(Personalization::None, data, &pedersen::JJ_PARAMS)\n        };\n        digest.into_xy().0.into()\n    }\n\n    fn hash_multi_leaf_circuit<Arity, CS: ConstraintSystem<Bls12>>(\n        mut cs: CS,\n        leaves: &[num::AllocatedNum<Bls12>],\n        _height: usize,\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        let is_binary = leaves.len() == 2;\n\n        let mut bits = Vec::with_capacity(leaves.len() * Fr::CAPACITY as usize);\n        for (i, leaf) in leaves.iter().enumerate() {\n            bits.extend_from_slice(\n                &leaf.to_bits_le(cs.namespace(|| format!(\"{}_num_into_bits\", i)))?,\n            );\n            if !is_binary {\n                while bits.len() % 8 != 0 {\n                    bits.push(boolean::Boolean::Constant(false));\n                }\n            }\n        }\n\n        if is_binary {\n            Ok(pedersen_hash_circuit::pedersen_hash(\n                cs,\n                Personalization::None,\n                &bits,\n                &*pedersen::JJ_PARAMS,\n            )?\n            .get_x()\n            .clone())\n        } else {\n            Self::hash_circuit(cs, &bits)\n        }\n    }\n\n    fn hash_leaf_bits_circuit<CS: ConstraintSystem<Bls12>>(\n        cs: CS,\n        left: &[boolean::Boolean],\n        right: &[boolean::Boolean],\n        _height: usize,\n    ) -> ::std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        let mut preimage: Vec<boolean::Boolean> = vec![];\n        preimage.extend_from_slice(left);\n        preimage.extend_from_slice(right);\n\n        Ok(pedersen_hash_circuit::pedersen_hash(\n            cs,\n            Personalization::None,\n            &preimage,\n            &*pedersen::JJ_PARAMS,\n        )?\n        .get_x()\n        .clone())\n    }\n\n    fn hash_circuit<CS: ConstraintSystem<Bls12>>(\n        cs: CS,\n        bits: &[boolean::Boolean],\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        pedersen_md_no_padding(cs, bits)\n    }\n\n    fn hash2_circuit<CS>(\n        mut cs: CS,\n        a_num: &num::AllocatedNum<Bls12>,\n        b_num: &num::AllocatedNum<Bls12>,\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>,\n    {\n        // Allocate as booleans\n        let a = a_num.to_bits_le(cs.namespace(|| \"a_bits\"))?;\n        let b = b_num.to_bits_le(cs.namespace(|| \"b_bits\"))?;\n\n        let mut values = Vec::new();\n        values.extend_from_slice(&a);\n        values.extend_from_slice(&b);\n\n        if values.is_empty() {\n            // can happen with small layers\n            num::AllocatedNum::alloc(cs.namespace(|| \"pedersen_hash1\"), || Ok(Fr::zero()))\n        } else {\n            pedersen_compression_num(cs.namespace(|| \"pedersen_hash1\"), &values)\n        }\n    }\n}\n\nimpl LightAlgorithm<PedersenDomain> for PedersenFunction {\n    #[inline]\n    fn hash(&mut self) -> PedersenDomain {\n        self.0.into()\n    }\n\n    #[inline]\n    fn reset(&mut self) {\n        self.0 = Fr::from_repr(FrRepr::from(0)).expect(\"failed 0\");\n    }\n\n    fn leaf(&mut self, leaf: PedersenDomain) -> PedersenDomain {\n        leaf\n    }\n\n    fn node(\n        &mut self,\n        left: PedersenDomain,\n        right: PedersenDomain,\n        _height: usize,\n    ) -> PedersenDomain {\n        let node_bits = NodeBits::new(&(left.0).0[..], &(right.0).0[..]);\n\n        let digest = if cfg!(target_arch = \"x86_64\") {\n            use fil_sapling_crypto::pedersen_hash::pedersen_hash_bls12_381_with_precomp;\n            pedersen_hash_bls12_381_with_precomp::<_>(\n                Personalization::None,\n                node_bits,\n                &pedersen::JJ_PARAMS,\n            )\n        } else {\n            use fil_sapling_crypto::pedersen_hash::pedersen_hash;\n            pedersen_hash::<Bls12, _>(Personalization::None, node_bits, &pedersen::JJ_PARAMS)\n        };\n\n        digest.into_xy().0.into()\n    }\n\n    fn multi_node(&mut self, parts: &[PedersenDomain], height: usize) -> PedersenDomain {\n        match parts.len() {\n            2 => self.node(parts[0], parts[1], height),\n            _ => {\n                use crate::crypto::pedersen::*;\n\n                pedersen_md_no_padding_bits(Bits::new_many(parts.iter())).into()\n            }\n        }\n    }\n}\n\n/// Helper to iterate over a pair of `Fr`.\nstruct NodeBits<'a> {\n    // 256 bits\n    lhs: &'a [u64],\n    // 256 bits\n    rhs: &'a [u64],\n    index: usize,\n}\n\nimpl<'a> NodeBits<'a> {\n    pub fn new(lhs: &'a [u64], rhs: &'a [u64]) -> Self {\n        NodeBits { lhs, rhs, index: 0 }\n    }\n}\n\nimpl<'a> Iterator for NodeBits<'a> {\n    type Item = bool;\n\n    #[inline]\n    fn next(&mut self) -> Option<Self::Item> {\n        if self.index < 255 {\n            // return lhs\n            let a = self.index / 64;\n            let b = self.index % 64;\n            let res = (self.lhs[a] & (1 << b)) != 0;\n            self.index += 1;\n            return Some(res);\n        }\n\n        if self.index < 2 * 255 {\n            // return rhs\n            let a = (self.index - 255) / 64;\n            let b = (self.index - 255) % 64;\n            let res = (self.rhs[a] & (1 << b)) != 0;\n            self.index += 1;\n            return Some(res);\n        }\n\n        None\n    }\n}\n\nimpl From<Fr> for PedersenDomain {\n    #[inline]\n    fn from(val: Fr) -> Self {\n        PedersenDomain(val.into_repr())\n    }\n}\n\nimpl From<FrRepr> for PedersenDomain {\n    #[inline]\n    fn from(val: FrRepr) -> Self {\n        PedersenDomain(val)\n    }\n}\n\nimpl From<PedersenDomain> for Fr {\n    #[inline]\n    fn from(val: PedersenDomain) -> Self {\n        Fr::from_repr(val.0).unwrap()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use std::mem;\n\n    // use merkletree::hash::Hashable;\n\n    // use crate::merkle::BinaryMerkleTree;\n\n    // These two tests need to be rewritten not to use from_data, or from_data needs to be fixed to not hash its contents\n    // before it is restored to MerkleTreeTrait.\n    // #[test]\n    // fn test_path() {\n    //     let values = [\"hello\", \"world\", \"you\", \"two\"];\n    //     let t = BinaryMerkleTree::<PedersenHasher>::from_data(values.iter()).unwrap();\n\n    //     let p = t.gen_proof(0).unwrap(); // create a proof for the first value = \"hello\"\n    //     assert_eq!(*p.path(), vec![0, 0]);\n    //     assert_eq!(\n    //         p.validate::<PedersenFunction>()\n    //             .expect(\"failed to validate\"),\n    //         true\n    //     );\n    // }\n\n    // #[test]\n    // fn test_pedersen_hasher() {\n    //     let values = [\"hello\", \"world\", \"you\", \"two\"];\n\n    //     let t = BinaryMerkleTree::<PedersenHasher>::from_data(values.iter()).unwrap();\n\n    //     assert_eq!(t.leafs(), 4);\n\n    //     let mut a = PedersenFunction::default();\n    //     let leaves: Vec<PedersenDomain> = values\n    //         .iter()\n    //         .map(|v| {\n    //             v.hash(&mut a);\n    //             let h = a.hash();\n    //             a.reset();\n    //             h\n    //         })\n    //         .collect();\n\n    //     assert_eq!(t.read_at(0).unwrap(), leaves[0]);\n    //     assert_eq!(t.read_at(1).unwrap(), leaves[1]);\n    //     assert_eq!(t.read_at(2).unwrap(), leaves[2]);\n    //     assert_eq!(t.read_at(3).unwrap(), leaves[3]);\n\n    //     let i1 = a.node(leaves[0], leaves[1], 0);\n    //     a.reset();\n    //     let i2 = a.node(leaves[2], leaves[3], 0);\n    //     a.reset();\n\n    //     assert_eq!(t.read_at(4).unwrap(), i1);\n    //     assert_eq!(t.read_at(5).unwrap(), i2);\n\n    //     let root = a.node(i1, i2, 1);\n    //     a.reset();\n\n    //     assert_eq!(\n    //         t.read_at(0).unwrap().0,\n    //         FrRepr([\n    //             8141980337328041169,\n    //             4041086031096096197,\n    //             4135265344031344584,\n    //             7650472305044950055\n    //         ])\n    //     );\n\n    //     let expected = FrRepr([\n    //         11371136130239400769,\n    //         4290566175630177573,\n    //         11576422143286805197,\n    //         2687080719931344767,\n    //     ]);\n    //     let actual = t.read_at(6).unwrap().0;\n\n    //     assert_eq!(actual, expected);\n    //     assert_eq!(t.read_at(6).unwrap(), root);\n    // }\n\n    #[test]\n    fn test_as_ref() {\n        let cases: Vec<[u64; 4]> = vec![\n            [0, 0, 0, 0],\n            [\n                14963070332212552755,\n                2414807501862983188,\n                16116531553419129213,\n                6357427774790868134,\n            ],\n        ];\n\n        for case in cases.into_iter() {\n            let repr = FrRepr(case);\n            let val = PedersenDomain(repr);\n\n            for _ in 0..100 {\n                assert_eq!(val.into_bytes(), val.into_bytes());\n            }\n\n            let raw: &[u8] = val.as_ref();\n\n            for i in 0..4 {\n                assert_eq!(case[i], unsafe {\n                    let mut val = [0u8; 8];\n                    val.clone_from_slice(&raw[i * 8..(i + 1) * 8]);\n                    mem::transmute::<[u8; 8], u64>(val)\n                });\n            }\n        }\n    }\n\n    #[test]\n    fn test_serialize() {\n        let repr = FrRepr([1, 2, 3, 4]);\n        let val = PedersenDomain(repr);\n\n        let ser = serde_json::to_string(&val)\n            .expect(\"Failed to serialize `PedersenDomain` element to JSON string\");\n        let val_back = serde_json::from_str(&ser)\n            .expect(\"Failed to deserialize JSON string to `PedersenDomain`\");\n\n        assert_eq!(val, val_back);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/hasher/poseidon.rs",
    "content": "use std::hash::Hasher as StdHasher;\n\nuse crate::crypto::sloth;\nuse crate::error::{Error, Result};\nuse crate::hasher::types::{\n    PoseidonArity, PoseidonMDArity, POSEIDON_CONSTANTS_16, POSEIDON_CONSTANTS_2,\n    POSEIDON_CONSTANTS_4, POSEIDON_CONSTANTS_8, POSEIDON_MD_CONSTANTS,\n};\nuse crate::hasher::{Domain, HashFunction, Hasher};\nuse anyhow::ensure;\nuse bellperson::gadgets::{boolean, num};\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse ff::{Field, PrimeField, PrimeFieldRepr, ScalarEngine};\nuse generic_array::typenum;\nuse generic_array::typenum::marker_traits::Unsigned;\nuse merkletree::hash::{Algorithm as LightAlgorithm, Hashable};\nuse merkletree::merkle::Element;\nuse neptune::circuit::poseidon_hash;\nuse neptune::poseidon::Poseidon;\nuse paired::bls12_381::{Bls12, Fr, FrRepr};\nuse serde::{Deserialize, Serialize};\n\n#[derive(Default, Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]\npub struct PoseidonHasher {}\n\nimpl Hasher for PoseidonHasher {\n    type Domain = PoseidonDomain;\n    type Function = PoseidonFunction;\n\n    fn name() -> String {\n        \"poseidon_hasher\".into()\n    }\n\n    #[inline]\n    fn sloth_encode(key: &Self::Domain, ciphertext: &Self::Domain) -> Result<Self::Domain> {\n        // Unrapping here is safe; `Fr` elements and hash domain elements are the same byte length.\n        let key = Fr::from_repr(key.0)?;\n        let ciphertext = Fr::from_repr(ciphertext.0)?;\n        Ok(sloth::encode(&key, &ciphertext).into())\n    }\n\n    #[inline]\n    fn sloth_decode(key: &Self::Domain, ciphertext: &Self::Domain) -> Result<Self::Domain> {\n        // Unrapping here is safe; `Fr` elements and hash domain elements are the same byte length.\n        let key = Fr::from_repr(key.0)?;\n        let ciphertext = Fr::from_repr(ciphertext.0)?;\n\n        Ok(sloth::decode(&key, &ciphertext).into())\n    }\n}\n\n#[derive(Copy, Clone, Debug, PartialEq, Eq)]\npub struct PoseidonFunction(Fr);\n\nimpl Default for PoseidonFunction {\n    fn default() -> PoseidonFunction {\n        PoseidonFunction(Fr::from_repr(FrRepr::default()).expect(\"failed default\"))\n    }\n}\n\nimpl Hashable<PoseidonFunction> for Fr {\n    fn hash(&self, state: &mut PoseidonFunction) {\n        let mut bytes = Vec::with_capacity(32);\n        self.into_repr().write_le(&mut bytes).unwrap();\n        state.write(&bytes);\n    }\n}\n\nimpl Hashable<PoseidonFunction> for PoseidonDomain {\n    fn hash(&self, state: &mut PoseidonFunction) {\n        let mut bytes = Vec::with_capacity(32);\n        self.0\n            .write_le(&mut bytes)\n            .expect(\"Failed to write `FrRepr`\");\n        state.write(&bytes);\n    }\n}\n\n#[derive(Copy, Clone, Debug, Serialize, Deserialize)]\npub struct PoseidonDomain(pub FrRepr);\n\nimpl AsRef<PoseidonDomain> for PoseidonDomain {\n    fn as_ref(&self) -> &PoseidonDomain {\n        self\n    }\n}\n\nimpl std::hash::Hash for PoseidonDomain {\n    fn hash<H: std::hash::Hasher>(&self, state: &mut H) {\n        let raw: &[u64] = self.0.as_ref();\n        std::hash::Hash::hash(raw, state);\n    }\n}\n\nimpl PartialEq for PoseidonDomain {\n    fn eq(&self, other: &Self) -> bool {\n        self.0.as_ref() == other.0.as_ref()\n    }\n}\n\nimpl Eq for PoseidonDomain {}\n\nimpl Default for PoseidonDomain {\n    fn default() -> PoseidonDomain {\n        PoseidonDomain(FrRepr::default())\n    }\n}\n\nimpl Ord for PoseidonDomain {\n    #[inline(always)]\n    fn cmp(&self, other: &PoseidonDomain) -> ::std::cmp::Ordering {\n        (self.0).cmp(&other.0)\n    }\n}\n\nimpl PartialOrd for PoseidonDomain {\n    #[inline(always)]\n    fn partial_cmp(&self, other: &PoseidonDomain) -> Option<::std::cmp::Ordering> {\n        Some((self.0).cmp(&other.0))\n    }\n}\n\nimpl AsRef<[u8]> for PoseidonDomain {\n    #[inline]\n    fn as_ref(&self) -> &[u8] {\n        as_ref(&(self.0).0)\n    }\n}\n\n// This is unsafe, and I wish it wasn't here, but I really need AsRef<[u8]> to work, without allocating.\n// https://internals.rust-lang.org/t/safe-trasnsmute-for-slices-e-g-u64-u32-particularly-simd-types/2871\n// https://github.com/briansmith/ring/blob/abb3fdfc08562f3f02e95fb551604a871fd4195e/src/polyfill.rs#L93-L110\n#[inline(always)]\n#[allow(clippy::needless_lifetimes)]\nfn as_ref<'a>(src: &'a [u64; 4]) -> &'a [u8] {\n    unsafe {\n        std::slice::from_raw_parts(\n            src.as_ptr() as *const u8,\n            src.len() * std::mem::size_of::<u64>(),\n        )\n    }\n}\n\nimpl Domain for PoseidonDomain {\n    fn into_bytes(&self) -> Vec<u8> {\n        let mut out = Vec::with_capacity(PoseidonDomain::byte_len());\n        self.0.write_le(&mut out).unwrap();\n\n        out\n    }\n\n    fn try_from_bytes(raw: &[u8]) -> Result<Self> {\n        ensure!(raw.len() == PoseidonDomain::byte_len(), Error::BadFrBytes);\n        let mut res: FrRepr = Default::default();\n        res.read_le(raw)?;\n\n        Ok(PoseidonDomain(res))\n    }\n\n    fn write_bytes(&self, dest: &mut [u8]) -> Result<()> {\n        self.0.write_le(dest)?;\n        Ok(())\n    }\n\n    fn random<R: rand::RngCore>(rng: &mut R) -> Self {\n        // generating an Fr and converting it, to ensure we stay in the field\n        Fr::random(rng).into()\n    }\n}\n\nimpl Element for PoseidonDomain {\n    fn byte_len() -> usize {\n        32\n    }\n\n    fn from_slice(bytes: &[u8]) -> Self {\n        match PoseidonDomain::try_from_bytes(bytes) {\n            Ok(res) => res,\n            Err(err) => panic!(err),\n        }\n    }\n\n    fn copy_to_slice(&self, bytes: &mut [u8]) {\n        bytes.copy_from_slice(&self.into_bytes());\n    }\n}\n\nimpl StdHasher for PoseidonFunction {\n    #[inline]\n    fn write(&mut self, msg: &[u8]) {\n        self.0 = Fr::from_repr(shared_hash(msg).0).unwrap();\n    }\n\n    #[inline]\n    fn finish(&self) -> u64 {\n        unimplemented!()\n    }\n}\n\nfn shared_hash(data: &[u8]) -> PoseidonDomain {\n    // FIXME: We shouldn't unwrap here, but doing otherwise will require an interface change.\n    // We could truncate so `bytes_into_frs` cannot fail, then ensure `data` is always `fr_safe`.\n    let preimage = data\n        .chunks(32)\n        .map(|ref chunk| {\n            <Bls12 as ff::ScalarEngine>::Fr::from_repr(PoseidonDomain::from_slice(chunk).0).unwrap()\n        })\n        .collect::<Vec<_>>();\n\n    shared_hash_frs(&preimage).into()\n}\n\nfn shared_hash_frs(\n    preimage: &[<Bls12 as ff::ScalarEngine>::Fr],\n) -> <Bls12 as ff::ScalarEngine>::Fr {\n    match preimage.len() {\n        2 => {\n            let mut p = Poseidon::new_with_preimage(&preimage, &POSEIDON_CONSTANTS_2);\n            p.hash()\n        }\n        4 => {\n            let mut p = Poseidon::new_with_preimage(&preimage, &POSEIDON_CONSTANTS_4);\n            p.hash()\n        }\n        8 => {\n            let mut p = Poseidon::new_with_preimage(&preimage, &POSEIDON_CONSTANTS_8);\n            p.hash()\n        }\n        16 => {\n            let mut p = Poseidon::new_with_preimage(&preimage, &POSEIDON_CONSTANTS_16);\n            p.hash()\n        }\n\n        _ => panic!(\"Unsupported arity for Poseidon hasher: {}\", preimage.len()),\n    }\n}\n\nimpl HashFunction<PoseidonDomain> for PoseidonFunction {\n    fn hash(data: &[u8]) -> PoseidonDomain {\n        shared_hash(data)\n    }\n\n    fn hash2(a: &PoseidonDomain, b: &PoseidonDomain) -> PoseidonDomain {\n        let mut p =\n            Poseidon::new_with_preimage(&[(*a).into(), (*b).into()][..], &*POSEIDON_CONSTANTS_2);\n        let fr: <Bls12 as ScalarEngine>::Fr = p.hash();\n        fr.into()\n    }\n\n    fn hash_md(input: &[PoseidonDomain]) -> PoseidonDomain {\n        assert!(input.len() > 1, \"hash_md needs more than one element.\");\n        let arity = PoseidonMDArity::to_usize();\n\n        let mut p = Poseidon::new(&*POSEIDON_MD_CONSTANTS);\n\n        let fr_input = input\n            .iter()\n            .map(|x| <Bls12 as ScalarEngine>::Fr::from_repr(x.0).unwrap())\n            .collect::<Vec<_>>();\n\n        fr_input[1..]\n            .chunks(arity - 1)\n            .fold(fr_input[0], |acc, elts| {\n                p.reset();\n                p.input(acc).unwrap(); // These unwraps will panic iff arity is incorrect, but it was checked above.\n                elts.iter().for_each(|elt| {\n                    let _ = p.input(*elt).unwrap();\n                });\n                p.hash()\n            })\n            .into()\n    }\n\n    fn hash_leaf_circuit<CS: ConstraintSystem<Bls12>>(\n        cs: CS,\n        left: &num::AllocatedNum<Bls12>,\n        right: &num::AllocatedNum<Bls12>,\n        _height: usize,\n    ) -> ::std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        let preimage = vec![left.clone(), right.clone()];\n\n        poseidon_hash::<CS, Bls12, typenum::U2>(cs, preimage, typenum::U2::PARAMETERS())\n    }\n\n    fn hash_multi_leaf_circuit<Arity: 'static + PoseidonArity, CS: ConstraintSystem<Bls12>>(\n        cs: CS,\n        leaves: &[num::AllocatedNum<Bls12>],\n        _height: usize,\n    ) -> ::std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        let params = Arity::PARAMETERS();\n        poseidon_hash::<CS, Bls12, Arity>(cs, leaves.to_vec(), params)\n    }\n\n    fn hash_md_circuit<CS: ConstraintSystem<Bls12>>(\n        cs: &mut CS,\n        elements: &[num::AllocatedNum<Bls12>],\n    ) -> ::std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        let params = PoseidonMDArity::PARAMETERS();\n        let arity = PoseidonMDArity::to_usize();\n\n        let mut hash = elements[0].clone();\n        let mut preimage = vec![hash.clone(); arity]; // Allocate. This will be overwritten.\n        let mut hash_num = 0;\n        for elts in elements[1..].chunks(arity - 1) {\n            preimage[0] = hash;\n            for (i, elt) in elts.iter().enumerate() {\n                preimage[i + 1] = elt.clone();\n            }\n            // any terminal padding\n            #[allow(clippy::needless_range_loop)]\n            for i in (elts.len() + 1)..arity {\n                preimage[i] =\n                    num::AllocatedNum::alloc(cs.namespace(|| format!(\"padding {}\", i)), || {\n                        Ok(Fr::zero())\n                    })\n                    .unwrap();\n            }\n            let cs = cs.namespace(|| format!(\"hash md {}\", hash_num));\n            hash =\n                poseidon_hash::<_, Bls12, PoseidonMDArity>(cs, preimage.clone(), params)?.clone();\n            hash_num += 1;\n        }\n\n        Ok(hash)\n    }\n\n    fn hash_circuit<CS: ConstraintSystem<Bls12>>(\n        _cs: CS,\n        _bits: &[boolean::Boolean],\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        unimplemented!();\n    }\n\n    fn hash2_circuit<CS>(\n        cs: CS,\n        a: &num::AllocatedNum<Bls12>,\n        b: &num::AllocatedNum<Bls12>,\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>,\n    {\n        let preimage = vec![a.clone(), b.clone()];\n        poseidon_hash::<CS, Bls12, typenum::U2>(cs, preimage, typenum::U2::PARAMETERS())\n    }\n}\n\nimpl LightAlgorithm<PoseidonDomain> for PoseidonFunction {\n    #[inline]\n    fn hash(&mut self) -> PoseidonDomain {\n        self.0.into()\n    }\n\n    #[inline]\n    fn reset(&mut self) {\n        self.0 = Fr::from_repr(FrRepr::from(0)).expect(\"failed 0\");\n    }\n\n    fn leaf(&mut self, leaf: PoseidonDomain) -> PoseidonDomain {\n        leaf\n    }\n\n    fn node(\n        &mut self,\n        left: PoseidonDomain,\n        right: PoseidonDomain,\n        _height: usize,\n    ) -> PoseidonDomain {\n        shared_hash_frs(&[\n            <Bls12 as ff::ScalarEngine>::Fr::from_repr(left.0).unwrap(),\n            <Bls12 as ff::ScalarEngine>::Fr::from_repr(right.0).unwrap(),\n        ])\n        .into()\n    }\n\n    fn multi_node(&mut self, parts: &[PoseidonDomain], _height: usize) -> PoseidonDomain {\n        match parts.len() {\n            1 | 2 | 4 | 8 | 16 => shared_hash_frs(\n                &parts\n                    .iter()\n                    .map(|x| <Bls12 as ff::ScalarEngine>::Fr::from_repr(x.0).unwrap())\n                    .collect::<Vec<_>>(),\n            )\n            .into(),\n            arity => panic!(\"unsupported arity {}\", arity),\n        }\n    }\n}\n\nimpl From<Fr> for PoseidonDomain {\n    #[inline]\n    fn from(val: Fr) -> Self {\n        PoseidonDomain(val.into_repr())\n    }\n}\n\nimpl From<FrRepr> for PoseidonDomain {\n    #[inline]\n    fn from(val: FrRepr) -> Self {\n        PoseidonDomain(val)\n    }\n}\n\nimpl From<PoseidonDomain> for Fr {\n    #[inline]\n    fn from(val: PoseidonDomain) -> Self {\n        Fr::from_repr(val.0).unwrap()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use std::mem;\n\n    use crate::gadgets::{constraint, TestConstraintSystem};\n    use crate::merkle::MerkleTree;\n    use bellperson::gadgets::num;\n\n    #[test]\n    fn test_path() {\n        let values = [\n            PoseidonDomain(Fr::one().into_repr()),\n            PoseidonDomain(Fr::one().into_repr()),\n            PoseidonDomain(Fr::one().into_repr()),\n            PoseidonDomain(Fr::one().into_repr()),\n        ];\n\n        let t = MerkleTree::<PoseidonHasher, typenum::U2>::new(values.iter().map(|x| *x)).unwrap();\n\n        let p = t.gen_proof(0).unwrap(); // create a proof for the first value =k Fr::one()\n\n        assert_eq!(*p.path(), vec![0, 0]);\n        assert_eq!(\n            p.validate::<PoseidonFunction>()\n                .expect(\"failed to validate\"),\n            true\n        );\n    }\n\n    // #[test]\n    // fn test_poseidon_quad() {\n    //     let leaves = [Fr::one(), Fr::zero(), Fr::zero(), Fr::one()];\n\n    //     assert_eq!(Fr::zero().into_repr(), shared_hash_frs(&leaves[..]).0);\n    // }\n\n    #[test]\n    fn test_poseidon_hasher() {\n        let leaves = [\n            PoseidonDomain(Fr::one().into_repr()),\n            PoseidonDomain(Fr::zero().into_repr()),\n            PoseidonDomain(Fr::zero().into_repr()),\n            PoseidonDomain(Fr::one().into_repr()),\n        ];\n\n        let t = MerkleTree::<PoseidonHasher, typenum::U2>::new(leaves.iter().map(|x| *x)).unwrap();\n\n        assert_eq!(t.leafs(), 4);\n\n        let mut a = PoseidonFunction::default();\n\n        assert_eq!(t.read_at(0).unwrap(), leaves[0]);\n        assert_eq!(t.read_at(1).unwrap(), leaves[1]);\n        assert_eq!(t.read_at(2).unwrap(), leaves[2]);\n        assert_eq!(t.read_at(3).unwrap(), leaves[3]);\n\n        let i1 = a.node(leaves[0], leaves[1], 0);\n        a.reset();\n        let i2 = a.node(leaves[2], leaves[3], 0);\n        a.reset();\n\n        assert_eq!(t.read_at(4).unwrap(), i1);\n        assert_eq!(t.read_at(5).unwrap(), i2);\n\n        let root = a.node(i1, i2, 1);\n        a.reset();\n\n        assert_eq!(\n            t.read_at(4).unwrap().0,\n            FrRepr([\n                0xf8a4092bef029be0,\n                0x2deffc4feff5a3e0,\n                0x60949ee3e7f39a7d,\n                0x2df335798cd6ce2e\n            ])\n        );\n\n        let expected = FrRepr([\n            0x7f422271ae4eac64,\n            0x767b7565e9472cdd,\n            0x0354271e16d4c223,\n            0x5acce8e6359804c0,\n        ]);\n        let actual = t.read_at(6).unwrap().0;\n\n        assert_eq!(actual, expected);\n        assert_eq!(t.read_at(6).unwrap(), root);\n    }\n\n    #[test]\n    fn test_as_ref() {\n        let cases: Vec<[u64; 4]> = vec![\n            [0, 0, 0, 0],\n            [\n                14963070332212552755,\n                2414807501862983188,\n                16116531553419129213,\n                6357427774790868134,\n            ],\n        ];\n\n        for case in cases.into_iter() {\n            let repr = FrRepr(case);\n            let val = PoseidonDomain(repr);\n\n            for _ in 0..100 {\n                assert_eq!(val.into_bytes(), val.into_bytes());\n            }\n\n            let raw: &[u8] = val.as_ref();\n\n            for i in 0..4 {\n                assert_eq!(case[i], unsafe {\n                    let mut val = [0u8; 8];\n                    val.clone_from_slice(&raw[i * 8..(i + 1) * 8]);\n                    mem::transmute::<[u8; 8], u64>(val)\n                });\n            }\n        }\n    }\n\n    #[test]\n    fn test_serialize() {\n        let repr = FrRepr([1, 2, 3, 4]);\n        let val = PoseidonDomain(repr);\n\n        let ser = serde_json::to_string(&val)\n            .expect(\"Failed to serialize `PoseidonDomain` element to JSON string\");\n        let val_back = serde_json::from_str(&ser)\n            .expect(\"Failed to deserialize JSON string to `PoseidonnDomain`\");\n\n        assert_eq!(val, val_back);\n    }\n\n    #[test]\n    fn test_hash_md() {\n        // let arity = PoseidonMDArity::to_usize();\n        let n = 71;\n        let data = vec![PoseidonDomain(Fr::one().into_repr()); n];\n        let hashed = PoseidonFunction::hash_md(&data);\n\n        assert_eq!(\n            hashed,\n            PoseidonDomain(FrRepr([\n                0x23ff11d2d2a54e3a,\n                0x1393376e3c10d281,\n                0xca9aed2681cc9081,\n                0x04f01dc7b8b9b562\n            ]))\n        );\n    }\n    #[test]\n    fn test_hash_md_circuit() {\n        // let arity = PoseidonMDArity::to_usize();\n        let n = 71;\n        let data = vec![PoseidonDomain(Fr::one().into_repr()); n];\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        let circuit_data = (0..n)\n            .map(|n| {\n                num::AllocatedNum::alloc(cs.namespace(|| format!(\"input {}\", n)), || Ok(Fr::one()))\n                    .unwrap()\n            })\n            .collect::<Vec<_>>();\n\n        let hashed = PoseidonFunction::hash_md(&data);\n        let hashed_fr = Fr::from_repr(hashed.0).unwrap();\n\n        let circuit_hashed =\n            PoseidonFunction::hash_md_circuit(&mut cs, circuit_data.as_slice()).unwrap();\n        let hashed_alloc =\n            &num::AllocatedNum::alloc(cs.namespace(|| \"calculated\"), || Ok(hashed_fr)).unwrap();\n        constraint::equal(\n            &mut cs.namespace(|| \"enforce correct\"),\n            || \"correct result\",\n            &hashed_alloc,\n            &circuit_hashed,\n        );\n\n        assert!(cs.is_satisfied());\n        let expected_constraints = 2_777;\n        let actual_constraints = cs.num_constraints();\n\n        assert_eq!(expected_constraints, actual_constraints);\n\n        assert_eq!(hashed_fr, circuit_hashed.get_value().unwrap());\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/hasher/sha256.rs",
    "content": "use std::hash::Hasher as StdHasher;\n\nuse anyhow::ensure;\nuse bellperson::gadgets::{boolean, num, sha256::sha256 as sha256_circuit};\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse ff::{Field, PrimeField, PrimeFieldRepr};\nuse merkletree::hash::{Algorithm, Hashable};\nuse merkletree::merkle::Element;\nuse paired::bls12_381::{Bls12, Fr, FrRepr};\nuse rand::RngCore;\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\n\nuse super::{Domain, HashFunction, Hasher};\nuse crate::crypto::sloth;\nuse crate::error::*;\nuse crate::gadgets::multipack;\n\n#[derive(Default, Copy, Clone, Debug, PartialEq, Eq)]\npub struct Sha256Hasher {}\n\nimpl Hasher for Sha256Hasher {\n    type Domain = Sha256Domain;\n    type Function = Sha256Function;\n\n    fn name() -> String {\n        \"sha256_hasher\".into()\n    }\n\n    fn sloth_encode(key: &Self::Domain, ciphertext: &Self::Domain) -> Result<Self::Domain> {\n        // TODO: validate this is how sloth should work in this case\n        let k = (*key).into();\n        let c = (*ciphertext).into();\n\n        Ok(sloth::encode(&k, &c).into())\n    }\n\n    fn sloth_decode(key: &Self::Domain, ciphertext: &Self::Domain) -> Result<Self::Domain> {\n        // TODO: validate this is how sloth should work in this case\n        Ok(sloth::decode(&(*key).into(), &(*ciphertext).into()).into())\n    }\n}\n\n#[derive(Default, Clone, Debug)]\npub struct Sha256Function(Sha256);\n\nimpl StdHasher for Sha256Function {\n    #[inline]\n    fn write(&mut self, msg: &[u8]) {\n        self.0.input(msg)\n    }\n\n    #[inline]\n    fn finish(&self) -> u64 {\n        unreachable!(\"unused by Function -- should never be called\")\n    }\n}\n\n#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Default, Serialize, Deserialize, Hash)]\npub struct Sha256Domain(pub [u8; 32]);\n\nimpl std::fmt::Debug for Sha256Domain {\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        write!(f, \"Sha256Domain({})\", hex::encode(&self.0))\n    }\n}\n\nimpl AsRef<Sha256Domain> for Sha256Domain {\n    fn as_ref(&self) -> &Self {\n        self\n    }\n}\n\nimpl Sha256Domain {\n    fn trim_to_fr32(&mut self) {\n        // strip last two bits, to ensure result is in Fr.\n        self.0[31] &= 0b0011_1111;\n    }\n}\n\nimpl AsRef<[u8]> for Sha256Domain {\n    fn as_ref(&self) -> &[u8] {\n        &self.0[..]\n    }\n}\n\nimpl Hashable<Sha256Function> for Sha256Domain {\n    fn hash(&self, state: &mut Sha256Function) {\n        state.write(self.as_ref())\n    }\n}\n\nimpl From<Fr> for Sha256Domain {\n    fn from(val: Fr) -> Self {\n        let mut res = Self::default();\n        val.into_repr().write_le(&mut res.0[0..32]).unwrap();\n\n        res\n    }\n}\n\nimpl From<FrRepr> for Sha256Domain {\n    fn from(val: FrRepr) -> Self {\n        let mut res = Self::default();\n        val.write_le(&mut res.0[0..32]).unwrap();\n\n        res\n    }\n}\n\nimpl From<Sha256Domain> for Fr {\n    fn from(val: Sha256Domain) -> Self {\n        let mut res = FrRepr::default();\n        res.read_le(&val.0[0..32]).unwrap();\n\n        Fr::from_repr(res).unwrap()\n    }\n}\n\nimpl Domain for Sha256Domain {\n    fn into_bytes(&self) -> Vec<u8> {\n        self.0.to_vec()\n    }\n\n    fn try_from_bytes(raw: &[u8]) -> Result<Self> {\n        ensure!(\n            raw.len() == Sha256Domain::byte_len(),\n            Error::InvalidInputSize\n        );\n\n        let mut res = Sha256Domain::default();\n        res.0.copy_from_slice(&raw[0..Sha256Domain::byte_len()]);\n        Ok(res)\n    }\n\n    fn write_bytes(&self, dest: &mut [u8]) -> Result<()> {\n        ensure!(\n            dest.len() >= Sha256Domain::byte_len(),\n            Error::InvalidInputSize\n        );\n\n        dest[0..Sha256Domain::byte_len()].copy_from_slice(&self.0[..]);\n        Ok(())\n    }\n\n    fn random<R: RngCore>(rng: &mut R) -> Self {\n        // generating an Fr and converting it, to ensure we stay in the field\n        Fr::random(rng).into()\n    }\n}\n\nimpl Element for Sha256Domain {\n    fn byte_len() -> usize {\n        32\n    }\n\n    fn from_slice(bytes: &[u8]) -> Self {\n        match Sha256Domain::try_from_bytes(bytes) {\n            Ok(res) => res,\n            Err(err) => panic!(err),\n        }\n    }\n\n    fn copy_to_slice(&self, bytes: &mut [u8]) {\n        bytes.copy_from_slice(&self.0);\n    }\n}\n\nimpl HashFunction<Sha256Domain> for Sha256Function {\n    fn hash(data: &[u8]) -> Sha256Domain {\n        let hashed = Sha256::digest(data);\n        let mut res = Sha256Domain::default();\n        res.0.copy_from_slice(&hashed[..]);\n        res.trim_to_fr32();\n        res\n    }\n\n    fn hash2(a: &Sha256Domain, b: &Sha256Domain) -> Sha256Domain {\n        let hashed = Sha256::new()\n            .chain(AsRef::<[u8]>::as_ref(a))\n            .chain(AsRef::<[u8]>::as_ref(b))\n            .result();\n        let mut res = Sha256Domain::default();\n        res.0.copy_from_slice(&hashed[..]);\n        res.trim_to_fr32();\n        res\n    }\n\n    fn hash_multi_leaf_circuit<Arity, CS: ConstraintSystem<Bls12>>(\n        mut cs: CS,\n        leaves: &[num::AllocatedNum<Bls12>],\n        _height: usize,\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        let mut bits = Vec::with_capacity(leaves.len() * Fr::CAPACITY as usize);\n        for (i, leaf) in leaves.iter().enumerate() {\n            let mut padded = leaf.to_bits_le(cs.namespace(|| format!(\"{}_num_into_bits\", i)))?;\n            while padded.len() % 8 != 0 {\n                padded.push(boolean::Boolean::Constant(false));\n            }\n\n            bits.extend(\n                padded\n                    .chunks_exact(8)\n                    .flat_map(|chunk| chunk.iter().rev())\n                    .cloned(),\n            );\n        }\n        Self::hash_circuit(cs, &bits)\n    }\n\n    fn hash_leaf_bits_circuit<CS: ConstraintSystem<Bls12>>(\n        cs: CS,\n        left: &[boolean::Boolean],\n        right: &[boolean::Boolean],\n        _height: usize,\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        let mut preimage: Vec<boolean::Boolean> = vec![];\n\n        let mut left_padded = left.to_vec();\n        while left_padded.len() % 8 != 0 {\n            left_padded.push(boolean::Boolean::Constant(false));\n        }\n\n        preimage.extend(\n            left_padded\n                .chunks_exact(8)\n                .flat_map(|chunk| chunk.iter().rev())\n                .cloned(),\n        );\n\n        let mut right_padded = right.to_vec();\n        while right_padded.len() % 8 != 0 {\n            right_padded.push(boolean::Boolean::Constant(false));\n        }\n\n        preimage.extend(\n            right_padded\n                .chunks_exact(8)\n                .flat_map(|chunk| chunk.iter().rev())\n                .cloned(),\n        );\n\n        Self::hash_circuit(cs, &preimage[..])\n    }\n\n    fn hash_circuit<CS: ConstraintSystem<Bls12>>(\n        mut cs: CS,\n        bits: &[boolean::Boolean],\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        let be_bits = sha256_circuit(cs.namespace(|| \"hash\"), &bits[..])?;\n        let le_bits = be_bits\n            .chunks(8)\n            .flat_map(|chunk| chunk.iter().rev())\n            .cloned()\n            .take(Fr::CAPACITY as usize)\n            .collect::<Vec<_>>();\n        multipack::pack_bits(cs.namespace(|| \"pack_le\"), &le_bits)\n    }\n\n    fn hash2_circuit<CS>(\n        mut cs: CS,\n        a_num: &num::AllocatedNum<Bls12>,\n        b_num: &num::AllocatedNum<Bls12>,\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>,\n    {\n        // Allocate as booleans\n        let a = a_num.to_bits_le(cs.namespace(|| \"a_bits\"))?;\n        let b = b_num.to_bits_le(cs.namespace(|| \"b_bits\"))?;\n\n        let mut preimage: Vec<boolean::Boolean> = vec![];\n\n        let mut a_padded = a.to_vec();\n        while a_padded.len() % 8 != 0 {\n            a_padded.push(boolean::Boolean::Constant(false));\n        }\n\n        preimage.extend(\n            a_padded\n                .chunks_exact(8)\n                .flat_map(|chunk| chunk.iter().rev())\n                .cloned(),\n        );\n\n        let mut b_padded = b.to_vec();\n        while b_padded.len() % 8 != 0 {\n            b_padded.push(boolean::Boolean::Constant(false));\n        }\n\n        preimage.extend(\n            b_padded\n                .chunks_exact(8)\n                .flat_map(|chunk| chunk.iter().rev())\n                .cloned(),\n        );\n\n        Self::hash_circuit(cs, &preimage[..])\n    }\n}\n\nimpl Algorithm<Sha256Domain> for Sha256Function {\n    #[inline]\n    fn hash(&mut self) -> Sha256Domain {\n        let mut h = [0u8; 32];\n        h.copy_from_slice(self.0.clone().result().as_ref());\n        let mut dd = Sha256Domain::from(h);\n        dd.trim_to_fr32();\n        dd\n    }\n\n    #[inline]\n    fn reset(&mut self) {\n        self.0.reset();\n    }\n\n    fn leaf(&mut self, leaf: Sha256Domain) -> Sha256Domain {\n        leaf\n    }\n\n    fn node(&mut self, left: Sha256Domain, right: Sha256Domain, _height: usize) -> Sha256Domain {\n        left.hash(self);\n        right.hash(self);\n        self.hash()\n    }\n\n    fn multi_node(&mut self, parts: &[Sha256Domain], _height: usize) -> Sha256Domain {\n        for part in parts {\n            part.hash(self)\n        }\n        self.hash()\n    }\n}\n\nimpl From<[u8; 32]> for Sha256Domain {\n    #[inline]\n    fn from(val: [u8; 32]) -> Self {\n        Sha256Domain(val)\n    }\n}\n\nimpl From<Sha256Domain> for [u8; 32] {\n    #[inline]\n    fn from(val: Sha256Domain) -> Self {\n        val.0\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use crate::fr32::fr_into_bytes;\n    use crate::gadgets::TestConstraintSystem;\n    use crate::util::bytes_into_boolean_vec;\n\n    use bellperson::gadgets::boolean::Boolean;\n    use bellperson::ConstraintSystem;\n    use ff::Field;\n    use merkletree::hash::Algorithm;\n    use paired::bls12_381::{Bls12, Fr};\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n\n    #[test]\n    fn hash_leaf_bits_circuit() {\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let left_fr = Fr::random(rng);\n        let right_fr = Fr::random(rng);\n        let left: Vec<u8> = fr_into_bytes(&left_fr);\n        let right: Vec<u8> = fr_into_bytes(&right_fr);\n        let height = 1;\n\n        let left_bits: Vec<Boolean> = {\n            let mut cs = cs.namespace(|| \"left\");\n            bytes_into_boolean_vec(&mut cs, Some(left.as_slice()), 256).unwrap()\n        };\n\n        let right_bits: Vec<Boolean> = {\n            let mut cs = cs.namespace(|| \"right\");\n            bytes_into_boolean_vec(&mut cs, Some(right.as_slice()), 256).unwrap()\n        };\n\n        let out = Sha256Function::hash_leaf_bits_circuit(\n            cs.namespace(|| \"hash_leaf_circuit\"),\n            &left_bits,\n            &right_bits,\n            height,\n        )\n        .expect(\"key derivation function failed\");\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n        assert_eq!(cs.num_constraints(), 45_387);\n\n        let expected: Fr = Sha256Function::default()\n            .node(left_fr.into(), right_fr.into(), height)\n            .into();\n\n        assert_eq!(\n            expected,\n            out.get_value().unwrap(),\n            \"circuit and non circuit do not match\"\n        );\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/hasher/types.rs",
    "content": "use bellperson::gadgets::{boolean, num};\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse generic_array::typenum::{U0, U11, U16, U2, U24, U36, U4, U8};\nuse lazy_static::lazy_static;\nuse merkletree::hash::{Algorithm as LightAlgorithm, Hashable as LightHashable};\nuse merkletree::merkle::Element;\nuse neptune::poseidon::PoseidonConstants;\nuse paired::bls12_381::{Bls12, Fr, FrRepr};\nuse serde::de::DeserializeOwned;\nuse serde::ser::Serialize;\n\nuse crate::error::Result;\n\npub type PoseidonBinaryArity = U2;\npub type PoseidonQuadArity = U4;\npub type PoseidonOctArity = U8;\n\n/// Arity to use by default for `hash_md` with poseidon.\npub type PoseidonMDArity = U36;\n\n/// Arity to use for hasher implementations (Poseidon) which are specialized at compile time.\n/// Must match PoseidonArity\npub const MERKLE_TREE_ARITY: usize = 2;\n\nlazy_static! {\n    pub static ref POSEIDON_CONSTANTS_2: PoseidonConstants::<Bls12, U2> = PoseidonConstants::new();\n    pub static ref POSEIDON_CONSTANTS_4: PoseidonConstants::<Bls12, U4> = PoseidonConstants::new();\n    pub static ref POSEIDON_CONSTANTS_8: PoseidonConstants::<Bls12, U8> = PoseidonConstants::new();\n    pub static ref POSEIDON_CONSTANTS_16: PoseidonConstants::<Bls12, U16> =\n        PoseidonConstants::new();\n    pub static ref POSEIDON_CONSTANTS_24: PoseidonConstants::<Bls12, U24> =\n        PoseidonConstants::new();\n    pub static ref POSEIDON_CONSTANTS_36: PoseidonConstants::<Bls12, U36> =\n        PoseidonConstants::new();\n    pub static ref POSEIDON_CONSTANTS_11: PoseidonConstants::<Bls12, U11> =\n        PoseidonConstants::new();\n    pub static ref POSEIDON_MD_CONSTANTS: PoseidonConstants::<Bls12, PoseidonMDArity> =\n        PoseidonConstants::new();\n}\n\npub trait PoseidonArity: neptune::Arity<Fr> + Send + Sync + Clone + std::fmt::Debug {\n    #[allow(non_snake_case)]\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self>;\n}\n\nimpl PoseidonArity for U0 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        unreachable!(\"dummy implementation, do not ever call me\")\n    }\n}\n\nimpl PoseidonArity for U2 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        &*POSEIDON_CONSTANTS_2\n    }\n}\n\nimpl PoseidonArity for U4 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        &*POSEIDON_CONSTANTS_4\n    }\n}\n\nimpl PoseidonArity for U8 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        &*POSEIDON_CONSTANTS_8\n    }\n}\n\nimpl PoseidonArity for U11 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        &*POSEIDON_CONSTANTS_11\n    }\n}\n\nimpl PoseidonArity for U16 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        &*POSEIDON_CONSTANTS_16\n    }\n}\nimpl PoseidonArity for U24 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        &*POSEIDON_CONSTANTS_24\n    }\n}\nimpl PoseidonArity for U36 {\n    fn PARAMETERS() -> &'static PoseidonConstants<Bls12, Self> {\n        &*POSEIDON_CONSTANTS_36\n    }\n}\n\npub trait Domain:\n    Ord\n    + Copy\n    + Clone\n    + AsRef<[u8]>\n    + Default\n    + ::std::fmt::Debug\n    + Eq\n    + Send\n    + Sync\n    + From<Fr>\n    + From<FrRepr>\n    + Into<Fr>\n    + Serialize\n    + DeserializeOwned\n    + Element\n    + std::hash::Hash\n{\n    fn into_bytes(&self) -> Vec<u8>;\n    fn try_from_bytes(raw: &[u8]) -> Result<Self>;\n    /// Write itself into the given slice, LittleEndian bytes.\n    fn write_bytes(&self, _: &mut [u8]) -> Result<()>;\n\n    fn random<R: rand::RngCore>(rng: &mut R) -> Self;\n}\n\npub trait HashFunction<T: Domain>:\n    Clone + ::std::fmt::Debug + Send + Sync + LightAlgorithm<T>\n{\n    fn hash(data: &[u8]) -> T;\n    fn hash2(a: &T, b: &T) -> T;\n    fn hash_md(input: &[T]) -> T {\n        // Default to binary.\n        assert!(input.len() > 1, \"hash_md needs more than one element.\");\n        input\n            .iter()\n            .skip(1)\n            .fold(input[0], |acc, elt| Self::hash2(&acc, elt))\n    }\n\n    fn hash_leaf(data: &dyn LightHashable<Self>) -> T {\n        let mut a = Self::default();\n        data.hash(&mut a);\n        let item_hash = a.hash();\n        a.leaf(item_hash)\n    }\n\n    fn hash_single_node(data: &dyn LightHashable<Self>) -> T {\n        let mut a = Self::default();\n        data.hash(&mut a);\n        a.hash()\n    }\n\n    fn hash_leaf_circuit<CS: ConstraintSystem<Bls12>>(\n        mut cs: CS,\n        left: &num::AllocatedNum<Bls12>,\n        right: &num::AllocatedNum<Bls12>,\n        height: usize,\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        let left_bits = left.to_bits_le(cs.namespace(|| \"left num into bits\"))?;\n        let right_bits = right.to_bits_le(cs.namespace(|| \"right num into bits\"))?;\n\n        Self::hash_leaf_bits_circuit(cs, &left_bits, &right_bits, height)\n    }\n\n    fn hash_multi_leaf_circuit<Arity: 'static + PoseidonArity, CS: ConstraintSystem<Bls12>>(\n        cs: CS,\n        leaves: &[num::AllocatedNum<Bls12>],\n        height: usize,\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError>;\n\n    fn hash_md_circuit<CS: ConstraintSystem<Bls12>>(\n        _cs: &mut CS,\n        _elements: &[num::AllocatedNum<Bls12>],\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        unimplemented!();\n    }\n\n    fn hash_leaf_bits_circuit<CS: ConstraintSystem<Bls12>>(\n        _cs: CS,\n        _left: &[boolean::Boolean],\n        _right: &[boolean::Boolean],\n        _height: usize,\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        unimplemented!();\n    }\n\n    fn hash_circuit<CS: ConstraintSystem<Bls12>>(\n        cs: CS,\n        bits: &[boolean::Boolean],\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError>;\n\n    fn hash2_circuit<CS>(\n        cs: CS,\n        a: &num::AllocatedNum<Bls12>,\n        b: &num::AllocatedNum<Bls12>,\n    ) -> std::result::Result<num::AllocatedNum<Bls12>, SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>;\n}\n\npub trait Hasher: Clone + ::std::fmt::Debug + Eq + Default + Send + Sync {\n    type Domain: Domain + LightHashable<Self::Function> + AsRef<Self::Domain>;\n    type Function: HashFunction<Self::Domain>;\n\n    fn sloth_encode(key: &Self::Domain, ciphertext: &Self::Domain) -> Result<Self::Domain>;\n    fn sloth_decode(key: &Self::Domain, ciphertext: &Self::Domain) -> Result<Self::Domain>;\n\n    fn name() -> String;\n}\n"
  },
  {
    "path": "storage-proofs/core/src/lib.rs",
    "content": "#![deny(clippy::all, clippy::perf, clippy::correctness)]\n#![allow(clippy::many_single_char_names)]\n#![allow(clippy::unreadable_literal)]\n#![allow(clippy::type_repetition_in_bounds)]\n\n#[macro_use]\npub mod test_helper;\n\npub mod cache_key;\npub mod compound_proof;\npub mod crypto;\npub mod data;\npub mod drgraph;\npub mod error;\npub mod fr32;\npub mod gadgets;\npub mod hasher;\npub mod measurements;\npub mod merkle;\npub mod multi_proof;\npub mod parameter_cache;\npub mod partitions;\npub mod pieces;\npub mod por;\npub mod proof;\npub mod sector;\npub mod settings;\npub mod util;\n\npub use self::data::Data;\n\n#[cfg(test)]\npub(crate) const TEST_SEED: [u8; 16] = [\n    0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, 0xe5,\n];\n"
  },
  {
    "path": "storage-proofs/core/src/measurements.rs",
    "content": "#[cfg(feature = \"measurements\")]\nuse std::sync::mpsc::{channel, Receiver, Sender};\n#[cfg(feature = \"measurements\")]\nuse std::sync::Mutex;\n#[cfg(not(feature = \"measurements\"))]\nuse std::time::Duration;\n#[cfg(feature = \"measurements\")]\nuse std::time::{Duration, Instant};\n\n#[cfg(feature = \"measurements\")]\nuse cpu_time::ProcessTime;\n\nuse serde::Serialize;\n\n#[cfg(feature = \"measurements\")]\nuse lazy_static::lazy_static;\n\n#[cfg(feature = \"measurements\")]\nlazy_static! {\n    pub static ref OP_MEASUREMENTS: (\n        Mutex<Option<Sender<OpMeasurement>>>,\n        Mutex<Receiver<OpMeasurement>>\n    ) = {\n        // create asynchronous channel with unlimited buffer\n        let (tx, rx) = channel();\n        (Mutex::new(Some(tx)), Mutex::new(rx))\n    };\n}\n\n#[derive(Debug, Serialize)]\n#[serde(rename_all = \"kebab-case\")]\npub struct OpMeasurement {\n    pub op: Operation,\n    pub cpu_time: Duration,\n    pub wall_time: Duration,\n}\n\n#[derive(Debug, Serialize)]\n#[serde(rename_all = \"kebab-case\")]\npub enum Operation {\n    AddPiece,\n    GeneratePieceCommitment,\n    GenerateTreeC,\n    GenerateTreeRLast,\n    CommD,\n    EncodeWindowTimeAll,\n    WindowCommLeavesTime,\n    PorepCommitTime,\n    PostInclusionProofs,\n    PostFinalizeTicket,\n    PostReadChallengedRange,\n    PostPartialTicketHash,\n}\n\n#[cfg(feature = \"measurements\")]\npub fn measure_op<T, F>(op: Operation, f: F) -> T\nwhere\n    F: FnOnce() -> T,\n{\n    let cpu_time_start = ProcessTime::now();\n    let wall_start_time = Instant::now();\n\n    #[cfg(feature = \"profile\")]\n    gperftools::profiler::PROFILER\n        .lock()\n        .unwrap()\n        .start(format!(\"./{:?}.profile\", op))\n        .unwrap();\n    let x = f();\n    #[cfg(feature = \"profile\")]\n    gperftools::profiler::PROFILER\n        .lock()\n        .unwrap()\n        .stop()\n        .unwrap();\n\n    let opt_tx = OP_MEASUREMENTS\n        .0\n        .lock()\n        .expect(\"acquire lock on tx side of perf channel\");\n\n    if let Some(tx) = opt_tx.as_ref() {\n        tx.clone()\n            .send(OpMeasurement {\n                op,\n                cpu_time: cpu_time_start.elapsed(),\n                wall_time: wall_start_time.elapsed(),\n            })\n            .expect(\"failed to send to perf channel\");\n    }\n\n    x\n}\n\n#[cfg(not(feature = \"measurements\"))]\npub fn measure_op<T, F>(_: Operation, f: F) -> T\nwhere\n    F: FnOnce() -> T,\n{\n    f()\n}\n"
  },
  {
    "path": "storage-proofs/core/src/merkle/builders.rs",
    "content": "use std::io::Write;\nuse std::path::PathBuf;\n\nuse anyhow::{ensure, Result};\nuse generic_array::typenum::{self, Unsigned};\nuse log::trace;\nuse merkletree::merkle;\nuse merkletree::merkle::{\n    get_merkle_tree_leafs, is_merkle_tree_size_valid, FromIndexedParallelIterator,\n};\nuse merkletree::store::{ExternalReader, ReplicaConfig, Store, StoreConfig};\nuse rayon::prelude::*;\n\nuse crate::error::*;\nuse crate::hasher::{Domain, Hasher, PoseidonArity};\nuse crate::util::{data_at_node, NODE_SIZE};\n\nuse super::*;\n\n// Create a DiskTree from the provided config(s), each representing a 'base' layer tree with 'base_tree_len' elements.\npub fn create_disk_tree<Tree: MerkleTreeTrait>(\n    base_tree_len: usize,\n    configs: &[StoreConfig],\n) -> Result<DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>> {\n    let base_tree_leafs = get_merkle_tree_leafs(base_tree_len, Tree::Arity::to_usize())?;\n\n    if Tree::TopTreeArity::to_usize() > 0 {\n        ensure!(\n            Tree::SubTreeArity::to_usize() > 0,\n            \"Invalid top arity specified without sub arity\"\n        );\n\n        DiskTree::from_sub_tree_store_configs(base_tree_leafs, configs)\n    } else if Tree::SubTreeArity::to_usize() > 0 {\n        ensure!(\n            !configs.is_empty(),\n            \"Cannot create sub-tree with a single tree config\"\n        );\n\n        DiskTree::from_store_configs(base_tree_leafs, configs)\n    } else {\n        ensure!(configs.len() == 1, \"Invalid tree-shape specified\");\n        let store = DiskStore::new_from_disk(base_tree_len, Tree::Arity::to_usize(), &configs[0])?;\n\n        DiskTree::from_data_store(store, base_tree_leafs)\n    }\n}\n\n// Create an LCTree from the provided config(s) and replica(s), each representing a 'base' layer tree with 'base_tree_len' elements.\npub fn create_lc_tree<Tree: MerkleTreeTrait>(\n    base_tree_len: usize,\n    configs: &[StoreConfig],\n    replica_config: &ReplicaConfig,\n) -> Result<LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>> {\n    let base_tree_leafs = get_merkle_tree_leafs(base_tree_len, Tree::Arity::to_usize())?;\n\n    if Tree::TopTreeArity::to_usize() > 0 {\n        ensure!(\n            Tree::SubTreeArity::to_usize() > 0,\n            \"Invalid top arity specified without sub arity\"\n        );\n\n        LCTree::from_sub_tree_store_configs_and_replica(base_tree_leafs, configs, replica_config)\n    } else if Tree::SubTreeArity::to_usize() > 0 {\n        ensure!(\n            !configs.is_empty(),\n            \"Cannot create sub-tree with a single tree config\"\n        );\n\n        LCTree::from_store_configs_and_replica(base_tree_leafs, configs, replica_config)\n    } else {\n        ensure!(configs.len() == 1, \"Invalid tree-shape specified\");\n        let store = LCStore::new_from_disk_with_reader(\n            base_tree_len,\n            Tree::Arity::to_usize(),\n            &configs[0],\n            ExternalReader::new_from_path(&replica_config.path)?,\n        )?;\n\n        LCTree::from_data_store(store, base_tree_leafs)\n    }\n}\n\n// Given base tree configs and optionally a replica_config, returns\n// either a disktree or an lctree, specified by Tree.\npub fn create_tree<Tree: MerkleTreeTrait>(\n    base_tree_len: usize,\n    configs: &[StoreConfig],\n    replica_config: Option<&ReplicaConfig>,\n) -> Result<\n    MerkleTreeWrapper<\n        <Tree as MerkleTreeTrait>::Hasher,\n        <Tree as MerkleTreeTrait>::Store,\n        <Tree as MerkleTreeTrait>::Arity,\n        <Tree as MerkleTreeTrait>::SubTreeArity,\n        <Tree as MerkleTreeTrait>::TopTreeArity,\n    >,\n>\nwhere\n    Tree::Store: 'static,\n{\n    use std::any::Any;\n\n    let base_tree_leafs = get_base_tree_leafs::<Tree>(base_tree_len)?;\n    let mut trees = Vec::with_capacity(configs.len());\n    for i in 0..configs.len() {\n        let mut store = Tree::Store::new_with_config(\n            base_tree_len,\n            Tree::Arity::to_usize(),\n            configs[i].clone(),\n        )?;\n        if let Some(lc_store) = Any::downcast_mut::<\n            merkletree::store::LevelCacheStore<<Tree::Hasher as Hasher>::Domain, std::fs::File>,\n        >(&mut store)\n        {\n            ensure!(\n                replica_config.is_some(),\n                \"Cannot create LCTree without replica paths\"\n            );\n            let replica_config = replica_config.unwrap();\n            lc_store.set_external_reader(ExternalReader::new_from_config(&replica_config, i)?)?;\n        }\n\n        if configs.len() == 1 {\n            return MerkleTreeWrapper::<\n                Tree::Hasher,\n                Tree::Store,\n                Tree::Arity,\n                Tree::SubTreeArity,\n                Tree::TopTreeArity,\n            >::from_data_store(store, base_tree_leafs);\n        } else {\n            trees.push(MerkleTreeWrapper::<\n                Tree::Hasher,\n                Tree::Store,\n                Tree::Arity,\n                typenum::U0,\n                typenum::U0,\n            >::from_data_store(store, base_tree_leafs)?);\n        }\n    }\n\n    ensure!(\n        Tree::TopTreeArity::to_usize() > 0 || Tree::SubTreeArity::to_usize() > 0,\n        \"Cannot have a sub/top tree without more than 1 config\"\n    );\n    if Tree::TopTreeArity::to_usize() > 0 {\n        ensure!(\n            Tree::SubTreeArity::to_usize() > 0,\n            \"Invalid top arity specified without sub arity\"\n        );\n\n        MerkleTreeWrapper::<\n            Tree::Hasher,\n            Tree::Store,\n            Tree::Arity,\n            Tree::SubTreeArity,\n            Tree::TopTreeArity,\n        >::from_sub_trees_as_trees(trees)\n    } else {\n        ensure!(\n            !configs.is_empty(),\n            \"Cannot create sub-tree with a single tree config\"\n        );\n\n        MerkleTreeWrapper::from_trees(trees)\n    }\n}\n\npub fn create_base_merkle_tree<Tree: MerkleTreeTrait>(\n    config: Option<StoreConfig>,\n    size: usize,\n    data: &[u8],\n) -> Result<Tree> {\n    ensure!(\n        data.len() == (NODE_SIZE * size) as usize,\n        Error::InvalidMerkleTreeArgs(data.len(), NODE_SIZE, size)\n    );\n\n    trace!(\"create_merkle_tree called with size {}\", size);\n    trace!(\n        \"is_merkle_tree_size_valid({}, arity {}) = {}\",\n        size,\n        Tree::Arity::to_usize(),\n        is_merkle_tree_size_valid(size, Tree::Arity::to_usize())\n    );\n    ensure!(\n        is_merkle_tree_size_valid(size, Tree::Arity::to_usize()),\n        \"Invalid merkle tree size given the arity\"\n    );\n\n    let f = |i| {\n        // TODO Replace `expect()` with `context()` (problem is the parallel iterator)\n        let d = data_at_node(&data, i).expect(\"data_at_node math failed\");\n        // TODO/FIXME: This can panic. FOR NOW, let's leave this since we're experimenting with\n        // optimization paths. However, we need to ensure that bad input will not lead to a panic\n        // that isn't caught by the FPS API.\n        // Unfortunately, it's not clear how to perform this error-handling in the parallel\n        // iterator case.\n        <Tree::Hasher as Hasher>::Domain::try_from_bytes(d)\n            .expect(\"failed to convert node data to domain element\")\n    };\n\n    let tree = match config {\n        Some(x) => merkle::MerkleTree::<\n            <Tree::Hasher as Hasher>::Domain,\n            <Tree::Hasher as Hasher>::Function,\n            Tree::Store,\n            Tree::Arity,\n            Tree::SubTreeArity,\n            Tree::TopTreeArity,\n        >::from_par_iter_with_config((0..size).into_par_iter().map(f), x),\n        None => merkle::MerkleTree::<\n            <Tree::Hasher as Hasher>::Domain,\n            <Tree::Hasher as Hasher>::Function,\n            Tree::Store,\n            Tree::Arity,\n            Tree::SubTreeArity,\n            Tree::TopTreeArity,\n        >::from_par_iter((0..size).into_par_iter().map(f)),\n    }?;\n\n    Ok(Tree::from_merkle(tree))\n}\n\n/// Construct a new level cache merkle tree, given the specified\n/// config.\n///\n/// Note that while we don't need to pass both the data AND the\n/// replica path (since the replica file will contain the same data),\n/// we pass both since we have access from all callers and this avoids\n/// reading that data from the replica_config here.\npub fn create_base_lcmerkle_tree<H: Hasher, BaseTreeArity: 'static + PoseidonArity>(\n    config: StoreConfig,\n    size: usize,\n    data: &[u8],\n    replica_config: &ReplicaConfig,\n) -> Result<LCMerkleTree<H, BaseTreeArity>> {\n    trace!(\"create_base_lcmerkle_tree called with size {}\", size);\n    trace!(\n        \"is_merkle_tree_size_valid({}, arity {}) = {}\",\n        size,\n        BaseTreeArity::to_usize(),\n        is_merkle_tree_size_valid(size, BaseTreeArity::to_usize())\n    );\n    ensure!(\n        is_merkle_tree_size_valid(size, BaseTreeArity::to_usize()),\n        \"Invalid merkle tree size given the arity\"\n    );\n    ensure!(\n        data.len() == size * std::mem::size_of::<H::Domain>(),\n        \"Invalid data length for merkle tree\"\n    );\n\n    let f = |i| {\n        let d = data_at_node(&data, i)?;\n        H::Domain::try_from_bytes(d)\n    };\n\n    let mut lc_tree: LCMerkleTree<H, BaseTreeArity> =\n        LCMerkleTree::<H, BaseTreeArity>::try_from_iter_with_config((0..size).map(f), config)?;\n\n    lc_tree.set_external_reader_path(&replica_config.path)?;\n\n    Ok(lc_tree)\n}\n\n// Given a StoreConfig, generate additional ones with appended numbers\n// to uniquely identify them and return the results.  If count is 1,\n// the original config is not modified.\npub fn split_config(config: StoreConfig, count: usize) -> Result<Vec<StoreConfig>> {\n    if count == 1 {\n        return Ok(vec![config]);\n    }\n\n    let mut configs = Vec::with_capacity(count);\n    for i in 0..count {\n        configs.push(StoreConfig::from_config(\n            &config,\n            format!(\"{}-{}\", config.id, i),\n            None,\n        ));\n        configs[i].rows_to_discard = config.rows_to_discard;\n    }\n\n    Ok(configs)\n}\n\n// Given a StoreConfig, generate additional ones with appended numbers\n// to uniquely identify them and return the results.  If count is 1,\n// the original config is not modified.\n//\n// Useful for testing, where there the config may be None.\npub fn split_config_wrapped(\n    config: Option<StoreConfig>,\n    count: usize,\n) -> Result<Vec<Option<StoreConfig>>> {\n    if count == 1 {\n        return Ok(vec![config]);\n    }\n\n    match config {\n        Some(c) => {\n            let mut configs = Vec::with_capacity(count);\n            for i in 0..count {\n                configs.push(Some(StoreConfig::from_config(\n                    &c,\n                    format!(\"{}-{}\", c.id, i),\n                    None,\n                )));\n            }\n            Ok(configs)\n        }\n        None => Ok(vec![None]),\n    }\n}\n\n// Given a StoreConfig, replica path and tree_width (leaf nodes),\n// append numbers to each StoreConfig to uniquely identify them and\n// return the results along with a ReplicaConfig using calculated\n// offsets into the single replica path specified for later use with\n// external readers.  If count is 1, the original config is not\n// modified.\npub fn split_config_and_replica(\n    config: StoreConfig,\n    replica_path: PathBuf,\n    sub_tree_width: usize, // nodes, not bytes\n    count: usize,\n) -> Result<(Vec<StoreConfig>, ReplicaConfig)> {\n    if count == 1 {\n        return Ok((\n            vec![config],\n            ReplicaConfig {\n                path: replica_path,\n                offsets: vec![0],\n            },\n        ));\n    }\n\n    let mut configs = Vec::with_capacity(count);\n    let mut replica_offsets = Vec::with_capacity(count);\n\n    for i in 0..count {\n        configs.push(StoreConfig::from_config(\n            &config,\n            format!(\"{}-{}\", config.id, i),\n            None,\n        ));\n        configs[i].rows_to_discard = config.rows_to_discard;\n\n        replica_offsets.push(i * sub_tree_width * NODE_SIZE);\n    }\n\n    Ok((\n        configs,\n        ReplicaConfig {\n            path: replica_path,\n            offsets: replica_offsets,\n        },\n    ))\n}\n\npub fn get_base_tree_count<Tree: MerkleTreeTrait>() -> usize {\n    if Tree::TopTreeArity::to_usize() == 0 && Tree::SubTreeArity::to_usize() == 0 {\n        return 1;\n    }\n\n    if Tree::TopTreeArity::to_usize() > 0 {\n        assert!(Tree::SubTreeArity::to_usize() != 0);\n\n        Tree::TopTreeArity::to_usize() * Tree::SubTreeArity::to_usize()\n    } else {\n        Tree::SubTreeArity::to_usize()\n    }\n}\n\npub fn get_base_tree_leafs<Tree: MerkleTreeTrait>(base_tree_size: usize) -> Result<usize> {\n    get_merkle_tree_leafs(base_tree_size, Tree::Arity::to_usize())\n}\n\npub type ResTree<Tree> = MerkleTreeWrapper<\n    <Tree as MerkleTreeTrait>::Hasher,\n    <Tree as MerkleTreeTrait>::Store,\n    <Tree as MerkleTreeTrait>::Arity,\n    <Tree as MerkleTreeTrait>::SubTreeArity,\n    <Tree as MerkleTreeTrait>::TopTreeArity,\n>;\n\nfn generate_base_tree<R: rand::Rng, Tree: MerkleTreeTrait>(\n    rng: &mut R,\n    nodes: usize,\n    temp_path: Option<PathBuf>,\n) -> (Vec<u8>, ResTree<Tree>)\nwhere\n    Tree::Store: 'static,\n{\n    let elements = (0..nodes)\n        .map(|_| <Tree::Hasher as Hasher>::Domain::random(rng))\n        .collect::<Vec<_>>();\n\n    let mut data = Vec::new();\n    for el in &elements {\n        data.extend_from_slice(AsRef::<[u8]>::as_ref(el));\n    }\n\n    if let Some(ref temp_path) = temp_path {\n        let id: u64 = rng.gen();\n        let replica_path = temp_path.join(format!(\"replica-path-{}\", id));\n        let config = StoreConfig::new(\n            &temp_path,\n            format!(\"test-lc-tree-{}\", id),\n            StoreConfig::default_rows_to_discard(nodes, Tree::Arity::to_usize()),\n        );\n\n        let mut tree =\n            MerkleTreeWrapper::try_from_iter_with_config(elements.iter().map(|v| (Ok(*v))), config)\n                .unwrap();\n\n        // Write out the replica data.\n        let mut f = std::fs::File::create(&replica_path).unwrap();\n        f.write_all(&data).unwrap();\n\n        {\n            // Beware: evil dynamic downcasting RUST MAGIC down below.\n            use std::any::Any;\n\n            if let Some(lc_tree) = Any::downcast_mut::<\n                merkle::MerkleTree<\n                    <Tree::Hasher as Hasher>::Domain,\n                    <Tree::Hasher as Hasher>::Function,\n                    merkletree::store::LevelCacheStore<\n                        <Tree::Hasher as Hasher>::Domain,\n                        std::fs::File,\n                    >,\n                    Tree::Arity,\n                    Tree::SubTreeArity,\n                    Tree::TopTreeArity,\n                >,\n            >(&mut tree.inner)\n            {\n                lc_tree.set_external_reader_path(&replica_path).unwrap();\n            }\n        }\n\n        (data, tree)\n    } else {\n        (\n            data,\n            MerkleTreeWrapper::try_from_iter(elements.iter().map(|v| Ok(*v))).unwrap(),\n        )\n    }\n}\n\nfn generate_sub_tree<R: rand::Rng, Tree: MerkleTreeTrait>(\n    rng: &mut R,\n    nodes: usize,\n    temp_path: Option<PathBuf>,\n) -> (Vec<u8>, ResTree<Tree>)\nwhere\n    Tree::Store: 'static,\n{\n    let base_tree_count = Tree::SubTreeArity::to_usize();\n    let base_tree_size = nodes / base_tree_count;\n    let mut trees = Vec::with_capacity(base_tree_count);\n    let mut data = Vec::new();\n\n    for _ in 0..base_tree_count {\n        let (inner_data, tree) = generate_base_tree::<\n            R,\n            MerkleTreeWrapper<Tree::Hasher, Tree::Store, Tree::Arity>,\n        >(rng, base_tree_size, temp_path.clone());\n        trees.push(tree);\n        data.extend(inner_data);\n    }\n\n    (data, MerkleTreeWrapper::from_trees(trees).unwrap())\n}\n\n/// Only used for testing, but can't cfg-test it as that stops exports.\npub fn generate_tree<Tree: MerkleTreeTrait, R: rand::Rng>(\n    rng: &mut R,\n    nodes: usize,\n    temp_path: Option<PathBuf>,\n) -> (Vec<u8>, ResTree<Tree>)\nwhere\n    Tree::Store: 'static,\n{\n    let sub_tree_arity = Tree::SubTreeArity::to_usize();\n    let top_tree_arity = Tree::TopTreeArity::to_usize();\n\n    if top_tree_arity > 0 {\n        assert!(\n            sub_tree_arity != 0,\n            \"malformed tree with TopTreeArity > 0 and SubTreeARity == 0\"\n        );\n\n        let mut sub_trees = Vec::with_capacity(top_tree_arity);\n        let mut data = Vec::new();\n        for _i in 0..top_tree_arity {\n            let (inner_data, tree) = generate_sub_tree::<\n                R,\n                MerkleTreeWrapper<\n                    Tree::Hasher,\n                    Tree::Store,\n                    Tree::Arity,\n                    Tree::SubTreeArity,\n                    typenum::U0,\n                >,\n            >(rng, nodes / top_tree_arity, temp_path.clone());\n\n            sub_trees.push(tree);\n            data.extend(inner_data);\n        }\n        (data, MerkleTreeWrapper::from_sub_trees(sub_trees).unwrap())\n    } else if sub_tree_arity > 0 {\n        generate_sub_tree::<R, Tree>(rng, nodes, temp_path)\n    } else {\n        generate_base_tree::<R, Tree>(rng, nodes, temp_path)\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/merkle/mod.rs",
    "content": "#![allow(clippy::len_without_is_empty)]\n\nuse generic_array::typenum::{U0, U2, U4, U8};\n\nuse crate::hasher::Hasher;\n\nmod builders;\nmod proof;\nmod tree;\n\npub use builders::*;\npub use proof::*;\npub use tree::*;\n\n// Reexport here, so we don't depend on merkletree directly in other places.\npub use merkletree::store::{ExternalReader, Store};\n\npub type DiskStore<E> = merkletree::store::DiskStore<E>;\npub type LCStore<E> = merkletree::store::LevelCacheStore<E, std::fs::File>;\n\npub type MerkleStore<T> = DiskStore<T>;\n\npub type DiskTree<H, U, V, W> = MerkleTreeWrapper<H, DiskStore<<H as Hasher>::Domain>, U, V, W>;\npub type LCTree<H, U, V, W> = MerkleTreeWrapper<H, LCStore<<H as Hasher>::Domain>, U, V, W>;\n\npub type MerkleTree<H, U> = DiskTree<H, U, U0, U0>;\npub type LCMerkleTree<H, U> = LCTree<H, U, U0, U0>;\n\npub type BinaryMerkleTree<H> = MerkleTree<H, U2>;\npub type BinaryLCMerkleTree<H> = LCMerkleTree<H, U2>;\n\npub type BinarySubMerkleTree<H> = DiskTree<H, U2, U2, U0>;\n\npub type QuadMerkleTree<H> = MerkleTree<H, U4>;\npub type QuadLCMerkleTree<H> = LCMerkleTree<H, U4>;\n\npub type OctMerkleTree<H> = DiskTree<H, U8, U0, U0>;\npub type OctSubMerkleTree<H> = DiskTree<H, U8, U2, U0>;\npub type OctTopMerkleTree<H> = DiskTree<H, U8, U8, U2>;\n\npub type OctLCMerkleTree<H> = LCTree<H, U8, U0, U0>;\npub type OctLCSubMerkleTree<H> = LCTree<H, U8, U2, U0>;\npub type OctLCTopMerkleTree<H> = LCTree<H, U8, U8, U2>;\n"
  },
  {
    "path": "storage-proofs/core/src/merkle/proof.rs",
    "content": "#![allow(clippy::len_without_is_empty)]\n\nuse std::marker::PhantomData;\n\nuse anyhow::{ensure, Result};\nuse generic_array::typenum::{Unsigned, U0};\nuse merkletree::hash::Algorithm;\nuse merkletree::proof;\nuse paired::bls12_381::Fr;\nuse serde::{Deserialize, Serialize};\n\nuse crate::drgraph::graph_height;\nuse crate::hasher::{Hasher, PoseidonArity};\n\n/// Trait to abstract over the concept of Merkle Proof.\npub trait MerkleProofTrait:\n    Clone + Serialize + serde::de::DeserializeOwned + std::fmt::Debug + Sync + Send\n{\n    type Hasher: Hasher;\n    type Arity: 'static + PoseidonArity;\n    type SubTreeArity: 'static + PoseidonArity;\n    type TopTreeArity: 'static + PoseidonArity;\n\n    /// Try to convert a merkletree proof into this structure.\n    fn try_from_proof(\n        p: proof::Proof<<Self::Hasher as Hasher>::Domain, Self::Arity>,\n    ) -> Result<Self>;\n\n    fn as_options(&self) -> Vec<(Vec<Option<Fr>>, Option<usize>)> {\n        self.path()\n            .iter()\n            .map(|v| {\n                (\n                    v.0.iter().copied().map(Into::into).map(Some).collect(),\n                    Some(v.1),\n                )\n            })\n            .collect::<Vec<_>>()\n    }\n\n    fn into_options_with_leaf(self) -> (Option<Fr>, Vec<(Vec<Option<Fr>>, Option<usize>)>) {\n        let leaf = self.leaf();\n        let path = self.path();\n        (\n            Some(leaf.into()),\n            path.into_iter()\n                .map(|(a, b)| {\n                    (\n                        a.iter().copied().map(Into::into).map(Some).collect(),\n                        Some(b),\n                    )\n                })\n                .collect::<Vec<_>>(),\n        )\n    }\n    fn as_pairs(&self) -> Vec<(Vec<Fr>, usize)> {\n        self.path()\n            .iter()\n            .map(|v| (v.0.iter().copied().map(Into::into).collect(), v.1))\n            .collect::<Vec<_>>()\n    }\n    fn verify(&self) -> bool;\n\n    /// Validates the MerkleProof and that it corresponds to the supplied node.\n    ///\n    /// TODO: audit performance and usage in case verification is\n    /// unnecessary based on how it's used.\n    fn validate(&self, node: usize) -> bool {\n        if !self.verify() {\n            return false;\n        }\n\n        node == self.path_index()\n    }\n\n    fn validate_data(&self, data: <Self::Hasher as Hasher>::Domain) -> bool {\n        if !self.verify() {\n            return false;\n        }\n\n        self.leaf() == data\n    }\n\n    fn leaf(&self) -> <Self::Hasher as Hasher>::Domain;\n    fn root(&self) -> <Self::Hasher as Hasher>::Domain;\n    fn len(&self) -> usize;\n    fn path(&self) -> Vec<(Vec<<Self::Hasher as Hasher>::Domain>, usize)>;\n\n    fn path_index(&self) -> usize {\n        self.path()\n            .iter()\n            .rev()\n            .fold(0, |acc, (_, index)| (acc * Self::Arity::to_usize()) + index)\n    }\n\n    fn proves_challenge(&self, challenge: usize) -> bool {\n        self.path_index() == challenge\n    }\n\n    /// Calcluates the exected length of the full path, given the number of leaves in the base layer.\n    fn expected_len(&self, leaves: usize) -> usize {\n        compound_path_length::<Self::Arity, Self::SubTreeArity, Self::TopTreeArity>(leaves)\n    }\n\n    /// Test only method to break a valid proof.\n    #[cfg(test)]\n    fn break_me(&mut self, leaf: <Self::Hasher as Hasher>::Domain);\n}\n\npub fn base_path_length<A: Unsigned, B: Unsigned, C: Unsigned>(leaves: usize) -> usize {\n    let leaves = if C::to_usize() > 0 {\n        leaves / C::to_usize() / B::to_usize()\n    } else if B::to_usize() > 0 {\n        leaves / B::to_usize()\n    } else {\n        leaves\n    };\n\n    graph_height::<A>(leaves) - 1\n}\n\npub fn compound_path_length<A: Unsigned, B: Unsigned, C: Unsigned>(leaves: usize) -> usize {\n    let mut len = base_path_length::<A, B, C>(leaves);\n    if B::to_usize() > 0 {\n        len += 1;\n    }\n\n    if C::to_usize() > 0 {\n        len += 1;\n    }\n\n    len\n}\npub fn compound_tree_height<A: Unsigned, B: Unsigned, C: Unsigned>(leaves: usize) -> usize {\n    // base layer\n    let a = graph_height::<A>(leaves) - 1;\n\n    // sub tree layer\n    let b = if B::to_usize() > 0 {\n        B::to_usize() - 1\n    } else {\n        0\n    };\n\n    // top tree layer\n    let c = if C::to_usize() > 0 {\n        C::to_usize() - 1\n    } else {\n        0\n    };\n\n    a + b + c\n}\n\nmacro_rules! forward_method {\n    ($caller:expr, $name:ident) => {\n        match $caller {\n            ProofData::Single(ref proof) => proof.$name(),\n            ProofData::Sub(ref proof) => proof.$name(),\n            ProofData::Top(ref proof) => proof.$name(),\n        }\n    };\n    ($caller:expr, $name:ident, $( $args:expr ),+) => {\n        match $caller {\n            ProofData::Single(ref proof) => proof.$name($($args),+),\n            ProofData::Sub(ref proof) => proof.$name($($args),+),\n            ProofData::Top(ref proof) => proof.$name($($args),+),\n        }\n    };\n}\n\n#[derive(Debug, Default, Clone, Serialize, Deserialize)]\npub struct InclusionPath<H: Hasher, Arity: PoseidonArity> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    path: Vec<PathElement<H, Arity>>,\n}\n\nimpl<H: Hasher, Arity: PoseidonArity> From<Vec<PathElement<H, Arity>>> for InclusionPath<H, Arity> {\n    fn from(path: Vec<PathElement<H, Arity>>) -> Self {\n        Self { path }\n    }\n}\n\nimpl<H: Hasher, Arity: PoseidonArity> InclusionPath<H, Arity> {\n    /// Calculate the root of this path, given the leaf as input.\n    pub fn root(&self, leaf: H::Domain) -> H::Domain {\n        let mut a = H::Function::default();\n        (0..self.path.len()).fold(leaf, |h, height| {\n            a.reset();\n\n            let index = self.path[height].index;\n            let mut nodes = self.path[height].hashes.clone();\n            nodes.insert(index, h);\n\n            a.multi_node(&nodes, height)\n        })\n    }\n\n    pub fn len(&self) -> usize {\n        self.path.len()\n    }\n\n    pub fn is_empty(&self) -> bool {\n        self.path.is_empty()\n    }\n\n    pub fn iter(&self) -> std::slice::Iter<PathElement<H, Arity>> {\n        self.path.iter()\n    }\n\n    pub fn path_index(&self) -> usize {\n        self.path\n            .iter()\n            .rev()\n            .fold(0, |acc, p| (acc * Arity::to_usize()) + p.index)\n    }\n}\n\n#[derive(Debug, Default, Clone, Serialize, Deserialize)]\npub struct PathElement<H: Hasher, Arity: PoseidonArity> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    hashes: Vec<H::Domain>,\n    index: usize,\n    #[serde(skip)]\n    _arity: PhantomData<Arity>,\n}\n\n/// Representation of a merkle proof.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct MerkleProof<\n    H: Hasher,\n    BaseArity: PoseidonArity,\n    SubTreeArity: PoseidonArity = U0,\n    TopTreeArity: PoseidonArity = U0,\n> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    data: ProofData<H, BaseArity, SubTreeArity, TopTreeArity>,\n}\n\nimpl<\n        H: Hasher,\n        Arity: 'static + PoseidonArity,\n        SubTreeArity: 'static + PoseidonArity,\n        TopTreeArity: 'static + PoseidonArity,\n    > MerkleProofTrait for MerkleProof<H, Arity, SubTreeArity, TopTreeArity>\n{\n    type Hasher = H;\n    type Arity = Arity;\n    type SubTreeArity = SubTreeArity;\n    type TopTreeArity = TopTreeArity;\n\n    fn try_from_proof(\n        p: proof::Proof<<Self::Hasher as Hasher>::Domain, Self::Arity>,\n    ) -> Result<Self> {\n        if p.top_layer_nodes() > 0 {\n            Ok(MerkleProof {\n                data: ProofData::Top(TopProof::try_from_proof(p)?),\n            })\n        } else if p.sub_layer_nodes() > 0 {\n            Ok(MerkleProof {\n                data: ProofData::Sub(SubProof::try_from_proof(p)?),\n            })\n        } else {\n            Ok(MerkleProof {\n                data: ProofData::Single(SingleProof::try_from_proof(p)?),\n            })\n        }\n    }\n\n    fn verify(&self) -> bool {\n        forward_method!(self.data, verify)\n    }\n\n    fn leaf(&self) -> H::Domain {\n        forward_method!(self.data, leaf)\n    }\n\n    fn root(&self) -> H::Domain {\n        forward_method!(self.data, root)\n    }\n\n    fn len(&self) -> usize {\n        forward_method!(self.data, len)\n    }\n\n    fn path(&self) -> Vec<(Vec<H::Domain>, usize)> {\n        forward_method!(self.data, path)\n    }\n    fn path_index(&self) -> usize {\n        forward_method!(self.data, path_index)\n    }\n\n    /// Test only method to break a valid proof.\n    #[cfg(test)]\n    fn break_me(&mut self, leaf: H::Domain) {\n        match self.data {\n            ProofData::Single(ref mut proof) => {\n                proof.leaf = leaf;\n            }\n            ProofData::Sub(ref mut proof) => {\n                proof.leaf = leaf;\n            }\n            ProofData::Top(ref mut proof) => {\n                proof.leaf = leaf;\n            }\n        }\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\nenum ProofData<\n    H: Hasher,\n    BaseArity: PoseidonArity,\n    SubTreeArity: PoseidonArity,\n    TopTreeArity: PoseidonArity,\n> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    Single(SingleProof<H, BaseArity>),\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    Sub(SubProof<H, BaseArity, SubTreeArity>),\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    Top(TopProof<H, BaseArity, SubTreeArity, TopTreeArity>),\n}\n\n#[derive(Debug, Default, Clone, Serialize, Deserialize)]\nstruct SingleProof<H: Hasher, Arity: PoseidonArity> {\n    /// Root of the merkle tree.\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    root: H::Domain,\n    /// The original leaf data for this prof.\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    leaf: H::Domain,\n    /// The path from leaf to root.\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    path: InclusionPath<H, Arity>,\n}\n\nimpl<H: Hasher, Arity: PoseidonArity> SingleProof<H, Arity> {\n    pub fn new(path: InclusionPath<H, Arity>, root: H::Domain, leaf: H::Domain) -> Self {\n        SingleProof { root, leaf, path }\n    }\n}\n\n#[derive(Debug, Default, Clone, Serialize, Deserialize)]\nstruct SubProof<H: Hasher, BaseArity: PoseidonArity, SubTreeArity: PoseidonArity> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    base_proof: InclusionPath<H, BaseArity>,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    sub_proof: InclusionPath<H, SubTreeArity>,\n    /// Root of the merkle tree.\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    root: H::Domain,\n    /// The original leaf data for this prof.\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    leaf: H::Domain,\n}\n\nimpl<H: Hasher, BaseArity: PoseidonArity, SubTreeArity: PoseidonArity>\n    SubProof<H, BaseArity, SubTreeArity>\n{\n    pub fn new(\n        base_proof: InclusionPath<H, BaseArity>,\n        sub_proof: InclusionPath<H, SubTreeArity>,\n        root: H::Domain,\n        leaf: H::Domain,\n    ) -> Self {\n        Self {\n            base_proof,\n            sub_proof,\n            root,\n            leaf,\n        }\n    }\n}\n\n#[derive(Debug, Default, Clone, Serialize, Deserialize)]\nstruct TopProof<\n    H: Hasher,\n    BaseArity: PoseidonArity,\n    SubTreeArity: PoseidonArity,\n    TopTreeArity: PoseidonArity,\n> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    base_proof: InclusionPath<H, BaseArity>,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    sub_proof: InclusionPath<H, SubTreeArity>,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    top_proof: InclusionPath<H, TopTreeArity>,\n    /// Root of the merkle tree.\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    root: H::Domain,\n    /// The original leaf data for this prof.\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    leaf: H::Domain,\n}\n\nimpl<\n        H: Hasher,\n        BaseArity: PoseidonArity,\n        SubTreeArity: PoseidonArity,\n        TopTreeArity: PoseidonArity,\n    > TopProof<H, BaseArity, SubTreeArity, TopTreeArity>\n{\n    pub fn new(\n        base_proof: InclusionPath<H, BaseArity>,\n        sub_proof: InclusionPath<H, SubTreeArity>,\n        top_proof: InclusionPath<H, TopTreeArity>,\n        root: H::Domain,\n        leaf: H::Domain,\n    ) -> Self {\n        Self {\n            base_proof,\n            sub_proof,\n            top_proof,\n            root,\n            leaf,\n        }\n    }\n}\n\nimpl<\n        H: Hasher,\n        BaseArity: PoseidonArity,\n        SubTreeArity: PoseidonArity,\n        TopTreeArity: PoseidonArity,\n    > MerkleProof<H, BaseArity, SubTreeArity, TopTreeArity>\n{\n    pub fn new(n: usize) -> Self {\n        let root = Default::default();\n        let leaf = Default::default();\n        let path_elem = PathElement {\n            hashes: vec![Default::default(); BaseArity::to_usize()],\n            index: 0,\n            _arity: Default::default(),\n        };\n        let path = vec![path_elem; n];\n        MerkleProof {\n            data: ProofData::Single(SingleProof::new(path.into(), root, leaf)),\n        }\n    }\n}\n\n/// Converts a merkle_light proof to a SingleProof\nfn proof_to_single<H: Hasher, Arity: PoseidonArity, TargetArity: PoseidonArity>(\n    proof: &proof::Proof<H::Domain, Arity>,\n    lemma_start_index: usize,\n    sub_root: Option<H::Domain>,\n) -> SingleProof<H, TargetArity> {\n    let root = proof.root();\n    let leaf = if let Some(sub_root) = sub_root {\n        sub_root\n    } else {\n        proof.item()\n    };\n    let path = extract_path::<H, TargetArity>(proof.lemma(), proof.path(), lemma_start_index);\n\n    SingleProof::new(path, root, leaf)\n}\n\n/// 'lemma_start_index' is required because sub/top proofs start at\n/// index 0 and base proofs start at index 1 (skipping the leaf at the\n/// front)\nfn extract_path<H: Hasher, Arity: PoseidonArity>(\n    lemma: &[H::Domain],\n    path: &[usize],\n    lemma_start_index: usize,\n) -> InclusionPath<H, Arity> {\n    let path = lemma[lemma_start_index..lemma.len() - 1]\n        .chunks(Arity::to_usize() - 1)\n        .zip(path.iter())\n        .map(|(hashes, index)| PathElement {\n            hashes: hashes.to_vec(),\n            index: *index,\n            _arity: Default::default(),\n        })\n        .collect::<Vec<_>>();\n\n    path.into()\n}\n\nimpl<H: Hasher, Arity: 'static + PoseidonArity> SingleProof<H, Arity> {\n    fn try_from_proof(p: proof::Proof<<H as Hasher>::Domain, Arity>) -> Result<Self> {\n        Ok(proof_to_single(&p, 1, None))\n    }\n\n    fn verify(&self) -> bool {\n        let calculated_root = self.path.root(self.leaf);\n        self.root == calculated_root\n    }\n\n    fn leaf(&self) -> H::Domain {\n        self.leaf\n    }\n\n    fn root(&self) -> H::Domain {\n        self.root\n    }\n\n    fn len(&self) -> usize {\n        self.path.len() * (Arity::to_usize() - 1) + 2\n    }\n\n    fn path(&self) -> Vec<(Vec<H::Domain>, usize)> {\n        self.path\n            .iter()\n            .map(|x| (x.hashes.clone(), x.index))\n            .collect::<Vec<_>>()\n    }\n\n    fn path_index(&self) -> usize {\n        self.path.path_index()\n    }\n}\n\nimpl<H: Hasher, Arity: 'static + PoseidonArity, SubTreeArity: 'static + PoseidonArity>\n    SubProof<H, Arity, SubTreeArity>\n{\n    fn try_from_proof(p: proof::Proof<<H as Hasher>::Domain, Arity>) -> Result<Self> {\n        ensure!(\n            p.sub_layer_nodes() == SubTreeArity::to_usize(),\n            \"sub arity mismatch\"\n        );\n        ensure!(\n            p.sub_tree_proof.is_some(),\n            \"Cannot generate sub proof without a base-proof\"\n        );\n        let base_p = p.sub_tree_proof.as_ref().unwrap();\n\n        // Generate SubProof\n        let root = p.root();\n        let leaf = base_p.item();\n        let base_proof = extract_path::<H, Arity>(base_p.lemma(), base_p.path(), 1);\n        let sub_proof = extract_path::<H, SubTreeArity>(p.lemma(), p.path(), 0);\n\n        Ok(SubProof::new(base_proof, sub_proof, root, leaf))\n    }\n\n    fn verify(&self) -> bool {\n        let sub_leaf = self.base_proof.root(self.leaf);\n        let calculated_root = self.sub_proof.root(sub_leaf);\n\n        self.root == calculated_root\n    }\n\n    fn leaf(&self) -> H::Domain {\n        self.leaf\n    }\n\n    fn root(&self) -> H::Domain {\n        self.root\n    }\n\n    fn len(&self) -> usize {\n        SubTreeArity::to_usize()\n    }\n\n    fn path(&self) -> Vec<(Vec<H::Domain>, usize)> {\n        self.base_proof\n            .iter()\n            .map(|x| (x.hashes.clone(), x.index))\n            .chain(self.sub_proof.iter().map(|x| (x.hashes.clone(), x.index)))\n            .collect()\n    }\n\n    fn path_index(&self) -> usize {\n        let mut base_proof_leaves = 1;\n        for _i in 0..self.base_proof.len() {\n            base_proof_leaves *= Arity::to_usize()\n        }\n\n        let sub_proof_index = self.sub_proof.path_index();\n\n        (sub_proof_index * base_proof_leaves) + self.base_proof.path_index()\n    }\n}\n\nimpl<\n        H: Hasher,\n        Arity: 'static + PoseidonArity,\n        SubTreeArity: 'static + PoseidonArity,\n        TopTreeArity: 'static + PoseidonArity,\n    > TopProof<H, Arity, SubTreeArity, TopTreeArity>\n{\n    fn try_from_proof(p: proof::Proof<<H as Hasher>::Domain, Arity>) -> Result<Self> {\n        ensure!(\n            p.top_layer_nodes() == TopTreeArity::to_usize(),\n            \"top arity mismatch\"\n        );\n        ensure!(\n            p.sub_layer_nodes() == SubTreeArity::to_usize(),\n            \"sub arity mismatch\"\n        );\n\n        ensure!(\n            p.sub_tree_proof.is_some(),\n            \"Cannot generate top proof without a sub-proof\"\n        );\n        let sub_p = p.sub_tree_proof.as_ref().unwrap();\n\n        ensure!(\n            sub_p.sub_tree_proof.is_some(),\n            \"Cannot generate top proof without a base-proof\"\n        );\n        let base_p = sub_p.sub_tree_proof.as_ref().unwrap();\n\n        let root = p.root();\n        let leaf = base_p.item();\n\n        let base_proof = extract_path::<H, Arity>(base_p.lemma(), base_p.path(), 1);\n        let sub_proof = extract_path::<H, SubTreeArity>(sub_p.lemma(), sub_p.path(), 0);\n        let top_proof = extract_path::<H, TopTreeArity>(p.lemma(), p.path(), 0);\n\n        Ok(TopProof::new(base_proof, sub_proof, top_proof, root, leaf))\n    }\n\n    fn verify(&self) -> bool {\n        let sub_leaf = self.base_proof.root(self.leaf);\n        let top_leaf = self.sub_proof.root(sub_leaf);\n        let calculated_root = self.top_proof.root(top_leaf);\n\n        self.root == calculated_root\n    }\n\n    fn leaf(&self) -> H::Domain {\n        self.leaf\n    }\n\n    fn root(&self) -> H::Domain {\n        self.root\n    }\n\n    fn len(&self) -> usize {\n        TopTreeArity::to_usize()\n    }\n\n    fn path(&self) -> Vec<(Vec<H::Domain>, usize)> {\n        self.base_proof\n            .iter()\n            .map(|x| (x.hashes.clone(), x.index))\n            .chain(self.sub_proof.iter().map(|x| (x.hashes.clone(), x.index)))\n            .chain(self.top_proof.iter().map(|x| (x.hashes.clone(), x.index)))\n            .collect()\n    }\n\n    fn path_index(&self) -> usize {\n        let mut base_proof_leaves = 1;\n        for _i in 0..self.base_proof.len() {\n            base_proof_leaves *= Arity::to_usize()\n        }\n\n        let sub_proof_leaves = base_proof_leaves * SubTreeArity::to_usize();\n\n        let sub_proof_index = self.sub_proof.path_index();\n        let top_proof_index = self.top_proof.path_index();\n\n        (sub_proof_index * base_proof_leaves)\n            + (top_proof_index * sub_proof_leaves)\n            + self.base_proof.path_index()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::super::*;\n\n    use generic_array::typenum;\n    use rand;\n\n    use crate::hasher::{Blake2sHasher, Domain, PedersenHasher, PoseidonHasher, Sha256Hasher};\n    use crate::merkle::{generate_tree, MerkleProofTrait};\n\n    fn merklepath<Tree: 'static + MerkleTreeTrait>() {\n        let node_size = 32;\n        let nodes = 64 * get_base_tree_count::<Tree>();\n\n        let mut rng = rand::thread_rng();\n        let (data, tree) = generate_tree::<Tree, _>(&mut rng, nodes, None);\n\n        for i in 0..nodes {\n            let proof = tree.gen_proof(i).unwrap();\n\n            assert!(proof.verify(), \"failed to validate\");\n\n            assert!(proof.validate(i), \"failed to validate valid merkle path\");\n            let data_slice = &data[i * node_size..(i + 1) * node_size].to_vec();\n            assert!(\n                proof.validate_data(\n                    <Tree::Hasher as Hasher>::Domain::try_from_bytes(data_slice).unwrap()\n                ),\n                \"failed to validate valid data\"\n            );\n        }\n    }\n\n    #[test]\n    fn merklepath_pedersen_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PedersenHasher,\n                DiskStore<<PedersenHasher as Hasher>::Domain>,\n                typenum::U2,\n                typenum::U0,\n                typenum::U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_pedersen_4() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PedersenHasher,\n                DiskStore<<PedersenHasher as Hasher>::Domain>,\n                typenum::U4,\n                typenum::U0,\n                typenum::U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_pedersen_8() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PedersenHasher,\n                DiskStore<<PedersenHasher as Hasher>::Domain>,\n                typenum::U8,\n                typenum::U0,\n                typenum::U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_pedersen_2_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PedersenHasher,\n                DiskStore<<PedersenHasher as Hasher>::Domain>,\n                typenum::U2,\n                typenum::U2,\n                typenum::U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_pedersen_2_2_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PedersenHasher,\n                DiskStore<<PedersenHasher as Hasher>::Domain>,\n                typenum::U2,\n                typenum::U2,\n                typenum::U2,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_poseidon_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PoseidonHasher,\n                DiskStore<<PoseidonHasher as Hasher>::Domain>,\n                typenum::U2,\n                typenum::U0,\n                typenum::U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_poseidon_4() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PoseidonHasher,\n                DiskStore<<PoseidonHasher as Hasher>::Domain>,\n                typenum::U4,\n                typenum::U0,\n                typenum::U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_poseidon_8() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PoseidonHasher,\n                DiskStore<<PoseidonHasher as Hasher>::Domain>,\n                typenum::U8,\n                typenum::U0,\n                typenum::U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_poseidon_8_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PoseidonHasher,\n                DiskStore<<PoseidonHasher as Hasher>::Domain>,\n                typenum::U8,\n                typenum::U2,\n                typenum::U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_poseidon_8_4() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PoseidonHasher,\n                DiskStore<<PoseidonHasher as Hasher>::Domain>,\n                typenum::U8,\n                typenum::U4,\n                typenum::U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_poseidon_8_4_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PoseidonHasher,\n                DiskStore<<PoseidonHasher as Hasher>::Domain>,\n                typenum::U8,\n                typenum::U4,\n                typenum::U2,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_sha256_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                Sha256Hasher,\n                DiskStore<<Sha256Hasher as Hasher>::Domain>,\n                typenum::U2,\n                typenum::U0,\n                typenum::U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_sha256_4() {\n        merklepath::<\n            MerkleTreeWrapper<\n                Sha256Hasher,\n                DiskStore<<Sha256Hasher as Hasher>::Domain>,\n                typenum::U4,\n                typenum::U0,\n                typenum::U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_sha256_2_4() {\n        merklepath::<\n            MerkleTreeWrapper<\n                Sha256Hasher,\n                DiskStore<<Sha256Hasher as Hasher>::Domain>,\n                typenum::U2,\n                typenum::U4,\n                typenum::U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_sha256_top_2_4_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                Sha256Hasher,\n                DiskStore<<Sha256Hasher as Hasher>::Domain>,\n                typenum::U2,\n                typenum::U4,\n                typenum::U2,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_blake2s_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                Blake2sHasher,\n                DiskStore<<Blake2sHasher as Hasher>::Domain>,\n                typenum::U2,\n                typenum::U0,\n                typenum::U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_blake2s_4() {\n        merklepath::<\n            MerkleTreeWrapper<\n                Blake2sHasher,\n                DiskStore<<Blake2sHasher as Hasher>::Domain>,\n                typenum::U4,\n                typenum::U0,\n                typenum::U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_blake2s_8_4_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                Blake2sHasher,\n                DiskStore<<Blake2sHasher as Hasher>::Domain>,\n                typenum::U8,\n                typenum::U4,\n                typenum::U2,\n            >,\n        >();\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/merkle/tree.rs",
    "content": "#![allow(clippy::len_without_is_empty)]\n\nuse std::marker::PhantomData;\n\nuse anyhow::Result;\nuse generic_array::typenum::{self, U0};\nuse merkletree::hash::Hashable;\nuse merkletree::merkle;\nuse merkletree::merkle::FromIndexedParallelIterator;\nuse merkletree::store::{ReplicaConfig, StoreConfig};\nuse rayon::prelude::*;\n\nuse crate::hasher::{Hasher, PoseidonArity};\n\nuse super::*;\n\n/// Trait used to abstract over the way Merkle Trees are constructed and stored.\npub trait MerkleTreeTrait: Send + Sync + std::fmt::Debug {\n    type Arity: 'static + PoseidonArity;\n    type SubTreeArity: 'static + PoseidonArity;\n    type TopTreeArity: 'static + PoseidonArity;\n    type Hasher: 'static + Hasher;\n    type Store: Store<<Self::Hasher as Hasher>::Domain>;\n    type Proof: MerkleProofTrait<\n        Hasher = Self::Hasher,\n        Arity = Self::Arity,\n        SubTreeArity = Self::SubTreeArity,\n        TopTreeArity = Self::TopTreeArity,\n    >;\n\n    /// Print a unique name for this configuration.\n    fn display() -> String;\n    /// Returns the root hash of the tree.\n    fn root(&self) -> <Self::Hasher as Hasher>::Domain;\n    /// Creates a merkle proof of the node at the given index.\n    fn gen_proof(&self, index: usize) -> Result<Self::Proof>;\n    fn gen_cached_proof(&self, i: usize, rows_to_discard: Option<usize>) -> Result<Self::Proof>;\n    fn row_count(&self) -> usize;\n    fn leaves(&self) -> usize;\n    fn from_merkle(\n        tree: merkle::MerkleTree<\n            <Self::Hasher as Hasher>::Domain,\n            <Self::Hasher as Hasher>::Function,\n            Self::Store,\n            Self::Arity,\n            Self::SubTreeArity,\n            Self::TopTreeArity,\n        >,\n    ) -> Self;\n}\n\npub struct MerkleTreeWrapper<\n    H: Hasher,\n    S: Store<<H as Hasher>::Domain>,\n    U: PoseidonArity,\n    V: PoseidonArity = typenum::U0,\n    W: PoseidonArity = typenum::U0,\n> {\n    pub inner: merkle::MerkleTree<<H as Hasher>::Domain, <H as Hasher>::Function, S, U, V, W>,\n    pub h: PhantomData<H>,\n}\n\nimpl<\n        H: 'static + Hasher,\n        S: Store<<H as Hasher>::Domain>,\n        U: 'static + PoseidonArity,\n        V: 'static + PoseidonArity,\n        W: 'static + PoseidonArity,\n    > MerkleTreeTrait for MerkleTreeWrapper<H, S, U, V, W>\n{\n    type Arity = U;\n    type SubTreeArity = V;\n    type TopTreeArity = W;\n    type Hasher = H;\n    type Store = S;\n    type Proof = MerkleProof<Self::Hasher, Self::Arity, Self::SubTreeArity, Self::TopTreeArity>;\n\n    fn display() -> String {\n        format!(\n            \"merkletree-{}-{}-{}-{}\",\n            H::name(),\n            U::to_usize(),\n            V::to_usize(),\n            W::to_usize()\n        )\n    }\n\n    fn root(&self) -> <Self::Hasher as Hasher>::Domain {\n        self.inner.root()\n    }\n\n    fn gen_proof(&self, i: usize) -> Result<Self::Proof> {\n        let proof = self.inner.gen_proof(i)?;\n\n        debug_assert!(proof.validate::<H::Function>().expect(\"validate failed\"));\n\n        MerkleProof::try_from_proof(proof)\n    }\n\n    fn gen_cached_proof(&self, i: usize, rows_to_discard: Option<usize>) -> Result<Self::Proof> {\n        if rows_to_discard.is_some() && rows_to_discard.unwrap() == 0 {\n            return self.gen_proof(i);\n        }\n\n        let proof = self.inner.gen_cached_proof(i, rows_to_discard)?;\n\n        debug_assert!(proof.validate::<H::Function>().expect(\"validate failed\"));\n\n        MerkleProof::try_from_proof(proof)\n    }\n\n    fn row_count(&self) -> usize {\n        self.inner.row_count()\n    }\n\n    fn leaves(&self) -> usize {\n        self.inner.leafs()\n    }\n\n    fn from_merkle(\n        tree: merkle::MerkleTree<\n            <Self::Hasher as Hasher>::Domain,\n            <Self::Hasher as Hasher>::Function,\n            Self::Store,\n            Self::Arity,\n            Self::SubTreeArity,\n            Self::TopTreeArity,\n        >,\n    ) -> Self {\n        tree.into()\n    }\n}\n\nimpl<\n        H: Hasher,\n        S: Store<<H as Hasher>::Domain>,\n        U: PoseidonArity,\n        V: PoseidonArity,\n        W: PoseidonArity,\n    > From<merkle::MerkleTree<<H as Hasher>::Domain, <H as Hasher>::Function, S, U, V, W>>\n    for MerkleTreeWrapper<H, S, U, V, W>\n{\n    fn from(\n        tree: merkle::MerkleTree<<H as Hasher>::Domain, <H as Hasher>::Function, S, U, V, W>,\n    ) -> Self {\n        Self {\n            inner: tree,\n            h: Default::default(),\n        }\n    }\n}\n\nimpl<\n        H: Hasher,\n        S: Store<<H as Hasher>::Domain>,\n        U: PoseidonArity,\n        V: PoseidonArity,\n        W: PoseidonArity,\n    > MerkleTreeWrapper<H, S, U, V, W>\n{\n    pub fn new<I: IntoIterator<Item = H::Domain>>(data: I) -> Result<Self> {\n        let tree = merkle::MerkleTree::new(data)?;\n        Ok(tree.into())\n    }\n\n    pub fn new_with_config<I: IntoIterator<Item = H::Domain>>(\n        data: I,\n        config: StoreConfig,\n    ) -> Result<Self> {\n        let tree = merkle::MerkleTree::new_with_config(data, config)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_data_with_config<O: Hashable<H::Function>, I: IntoIterator<Item = O>>(\n        data: I,\n        config: StoreConfig,\n    ) -> Result<Self> {\n        let tree = merkle::MerkleTree::from_data_with_config(data, config)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_data_store(data: S, leafs: usize) -> Result<Self> {\n        let tree = merkle::MerkleTree::from_data_store(data, leafs)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_byte_slice_with_config(data: &[u8], config: StoreConfig) -> Result<Self> {\n        let tree = merkle::MerkleTree::from_byte_slice_with_config(data, config)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_tree_slice(data: &[u8], leafs: usize) -> Result<Self> {\n        let tree = merkle::MerkleTree::from_tree_slice(data, leafs)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_tree_slice_with_config(\n        data: &[u8],\n        leafs: usize,\n        config: StoreConfig,\n    ) -> Result<Self> {\n        let tree = merkle::MerkleTree::from_tree_slice_with_config(data, leafs, config)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_trees(trees: Vec<MerkleTreeWrapper<H, S, U, U0, U0>>) -> Result<Self> {\n        let trees = trees.into_iter().map(|t| t.inner).collect();\n        let tree = merkle::MerkleTree::from_trees(trees)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_sub_trees(trees: Vec<MerkleTreeWrapper<H, S, U, V, U0>>) -> Result<Self> {\n        let trees = trees.into_iter().map(|t| t.inner).collect();\n        let tree = merkle::MerkleTree::from_sub_trees(trees)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_sub_trees_as_trees(trees: Vec<MerkleTreeWrapper<H, S, U, U0, U0>>) -> Result<Self> {\n        let trees = trees.into_iter().map(|t| t.inner).collect();\n        let tree = merkle::MerkleTree::from_sub_trees_as_trees(trees)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_slices(\n        tree_data: &[&[u8]],\n        leafs: usize,\n    ) -> Result<MerkleTreeWrapper<H, S, U, V, U0>> {\n        let tree = merkle::MerkleTree::<\n                <H as Hasher>::Domain, <H as Hasher>::Function, S, U, V, U0\n        >::from_slices(tree_data, leafs)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_slices_with_configs(\n        tree_data: &[&[u8]],\n        leafs: usize,\n        configs: &[StoreConfig],\n    ) -> Result<Self> {\n        let tree = merkle::MerkleTree::from_slices_with_configs(tree_data, leafs, configs)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_stores(leafs: usize, stores: Vec<S>) -> Result<Self> {\n        let tree = merkle::MerkleTree::from_stores(leafs, stores)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_store_configs(leafs: usize, configs: &[StoreConfig]) -> Result<Self> {\n        let tree = merkle::MerkleTree::from_store_configs(leafs, configs)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_store_configs_and_replica(\n        leafs: usize,\n        configs: &[StoreConfig],\n        replica_config: &ReplicaConfig,\n    ) -> Result<LCTree<H, U, V, W>> {\n        let tree =\n            merkle::MerkleTree::from_store_configs_and_replica(leafs, configs, replica_config)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_sub_tree_store_configs(leafs: usize, configs: &[StoreConfig]) -> Result<Self> {\n        let tree = merkle::MerkleTree::from_sub_tree_store_configs(leafs, configs)?;\n        Ok(tree.into())\n    }\n\n    pub fn try_from_iter<I: IntoIterator<Item = Result<H::Domain>>>(into: I) -> Result<Self> {\n        let tree = merkle::MerkleTree::try_from_iter(into)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_sub_tree_store_configs_and_replica(\n        leafs: usize,\n        configs: &[StoreConfig],\n        replica_config: &ReplicaConfig,\n    ) -> Result<LCTree<H, U, V, W>> {\n        let tree = merkle::MerkleTree::from_sub_tree_store_configs_and_replica(\n            leafs,\n            configs,\n            replica_config,\n        )?;\n        Ok(tree.into())\n    }\n\n    pub fn try_from_iter_with_config<I: IntoIterator<Item = Result<H::Domain>>>(\n        into: I,\n        config: StoreConfig,\n    ) -> Result<Self> {\n        let tree = merkle::MerkleTree::try_from_iter_with_config(into, config)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_par_iter<I>(par_iter: I) -> Result<Self>\n    where\n        I: IntoParallelIterator<Item = H::Domain>,\n        I::Iter: IndexedParallelIterator,\n    {\n        let tree = merkle::MerkleTree::from_par_iter(par_iter)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_par_iter_with_config<I>(par_iter: I, config: StoreConfig) -> Result<Self>\n    where\n        I: IntoParallelIterator<Item = H::Domain>,\n        I::Iter: IndexedParallelIterator,\n    {\n        let tree = merkle::MerkleTree::from_par_iter_with_config(par_iter, config)?;\n        Ok(tree.into())\n    }\n}\n\nimpl<\n        H: Hasher,\n        S: Store<<H as Hasher>::Domain>,\n        BaseArity: PoseidonArity,\n        SubTreeArity: PoseidonArity,\n        TopTreeArity: PoseidonArity,\n    > std::fmt::Debug for MerkleTreeWrapper<H, S, BaseArity, SubTreeArity, TopTreeArity>\n{\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        f.debug_struct(\"MerkleTreeWrapper\")\n            .field(\"inner\", &self.inner)\n            .field(\"Hasher\", &H::name())\n            .finish()\n    }\n}\n\nimpl<\n        H: Hasher,\n        S: Store<<H as Hasher>::Domain>,\n        BaseArity: PoseidonArity,\n        SubTreeArity: PoseidonArity,\n        TopTreeArity: PoseidonArity,\n    > std::ops::Deref for MerkleTreeWrapper<H, S, BaseArity, SubTreeArity, TopTreeArity>\n{\n    type Target =\n        merkle::MerkleTree<H::Domain, H::Function, S, BaseArity, SubTreeArity, TopTreeArity>;\n\n    fn deref(&self) -> &Self::Target {\n        &self.inner\n    }\n}\n\nimpl<\n        H: Hasher,\n        S: Store<<H as Hasher>::Domain>,\n        BaseArity: PoseidonArity,\n        SubTreeArity: PoseidonArity,\n        TopTreeArity: PoseidonArity,\n    > std::ops::DerefMut for MerkleTreeWrapper<H, S, BaseArity, SubTreeArity, TopTreeArity>\n{\n    fn deref_mut(&mut self) -> &mut Self::Target {\n        &mut self.inner\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/multi_proof.rs",
    "content": "use bellperson::groth16;\n\nuse crate::error::Result;\nuse anyhow::Context;\nuse paired::bls12_381::Bls12;\nuse std::io::{self, Read, Write};\n\npub struct MultiProof<'a> {\n    pub circuit_proofs: Vec<groth16::Proof<Bls12>>,\n    pub verifying_key: &'a groth16::VerifyingKey<Bls12>,\n}\n\nimpl<'a> MultiProof<'a> {\n    pub fn new(\n        groth_proofs: Vec<groth16::Proof<Bls12>>,\n        verifying_key: &'a groth16::VerifyingKey<Bls12>,\n    ) -> Self {\n        MultiProof {\n            circuit_proofs: groth_proofs,\n            verifying_key,\n        }\n    }\n\n    pub fn new_from_reader<R: Read>(\n        partitions: Option<usize>,\n        mut reader: R,\n        verifying_key: &'a groth16::VerifyingKey<Bls12>,\n    ) -> Result<Self> {\n        let num_proofs = match partitions {\n            Some(n) => n,\n            None => 1,\n        };\n        let proofs = (0..num_proofs)\n            .map(|_| groth16::Proof::read(&mut reader))\n            .collect::<io::Result<Vec<_>>>()?;\n\n        Ok(Self::new(proofs, verifying_key))\n    }\n\n    pub fn write<W: Write>(&self, mut writer: W) -> Result<()> {\n        for proof in &self.circuit_proofs {\n            proof.write(&mut writer)?\n        }\n        Ok(())\n    }\n\n    pub fn to_vec(&self) -> Result<Vec<u8>> {\n        let mut out = Vec::new();\n        self.write(&mut out).context(\"known allocation target\")?;\n        Ok(out)\n    }\n\n    pub fn len(&self) -> usize {\n        self.circuit_proofs.len()\n    }\n\n    pub fn is_empty(&self) -> bool {\n        self.circuit_proofs.is_empty()\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/parameter_cache.rs",
    "content": "use crate::error::*;\nuse anyhow::bail;\nuse bellperson::groth16::Parameters;\nuse bellperson::{groth16, Circuit};\nuse fs2::FileExt;\nuse itertools::Itertools;\nuse log::info;\nuse paired::bls12_381::Bls12;\nuse rand::RngCore;\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\n\nuse std::env;\nuse std::fs::{self, create_dir_all, File};\nuse std::io::{self, SeekFrom};\nuse std::path::{Path, PathBuf};\n\n/// Bump this when circuits change to invalidate the cache.\npub const VERSION: usize = 26;\n\npub const PARAMETER_CACHE_ENV_VAR: &str = \"FIL_PROOFS_PARAMETER_CACHE\";\npub const PARAMETER_CACHE_DIR: &str = \"/var/tmp/filecoin-proof-parameters/\";\npub const GROTH_PARAMETER_EXT: &str = \"params\";\npub const PARAMETER_METADATA_EXT: &str = \"meta\";\npub const VERIFYING_KEY_EXT: &str = \"vk\";\n\n#[derive(Debug)]\nstruct LockedFile(File);\n\n// TODO: use in memory lock as well, as file locks do not guarantee exclusive access across OSes.\n\nimpl LockedFile {\n    pub fn open_exclusive_read<P: AsRef<Path>>(p: P) -> io::Result<Self> {\n        let f = fs::OpenOptions::new().read(true).open(p)?;\n        f.lock_exclusive()?;\n\n        Ok(LockedFile(f))\n    }\n\n    pub fn open_exclusive<P: AsRef<Path>>(p: P) -> io::Result<Self> {\n        let f = fs::OpenOptions::new()\n            .read(true)\n            .write(true)\n            .create(true)\n            .open(p)?;\n        f.lock_exclusive()?;\n\n        Ok(LockedFile(f))\n    }\n}\n\nimpl io::Write for LockedFile {\n    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {\n        self.0.write(buf)\n    }\n\n    fn flush(&mut self) -> io::Result<()> {\n        self.0.flush()\n    }\n}\n\nimpl io::Read for LockedFile {\n    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {\n        self.0.read(buf)\n    }\n}\n\nimpl io::Seek for LockedFile {\n    fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {\n        self.0.seek(pos)\n    }\n}\n\nimpl Drop for LockedFile {\n    fn drop(&mut self) {\n        self.0\n            .unlock()\n            .unwrap_or_else(|e| panic!(\"{}: failed to {:?} unlock file safely\", e, &self.0));\n    }\n}\n\nfn parameter_cache_dir_name() -> String {\n    match env::var(PARAMETER_CACHE_ENV_VAR) {\n        Ok(dir) => dir,\n        Err(_) => String::from(PARAMETER_CACHE_DIR),\n    }\n}\n\npub fn parameter_cache_dir() -> PathBuf {\n    Path::new(&parameter_cache_dir_name()).to_path_buf()\n}\n\npub fn parameter_cache_params_path(parameter_set_identifier: &str) -> PathBuf {\n    let dir = Path::new(&parameter_cache_dir_name()).to_path_buf();\n    dir.join(format!(\n        \"v{}-{}.{}\",\n        VERSION, parameter_set_identifier, GROTH_PARAMETER_EXT\n    ))\n}\n\npub fn parameter_cache_metadata_path(parameter_set_identifier: &str) -> PathBuf {\n    let dir = Path::new(&parameter_cache_dir_name()).to_path_buf();\n    dir.join(format!(\n        \"v{}-{}.{}\",\n        VERSION, parameter_set_identifier, PARAMETER_METADATA_EXT\n    ))\n}\n\npub fn parameter_cache_verifying_key_path(parameter_set_identifier: &str) -> PathBuf {\n    let dir = Path::new(&parameter_cache_dir_name()).to_path_buf();\n    dir.join(format!(\n        \"v{}-{}.{}\",\n        VERSION, parameter_set_identifier, VERIFYING_KEY_EXT\n    ))\n}\n\nfn ensure_ancestor_dirs_exist(cache_entry_path: PathBuf) -> Result<PathBuf> {\n    info!(\n        \"ensuring that all ancestor directories for: {:?} exist\",\n        cache_entry_path\n    );\n\n    if let Some(parent_dir) = cache_entry_path.parent() {\n        if let Err(err) = create_dir_all(&parent_dir) {\n            match err.kind() {\n                io::ErrorKind::AlreadyExists => {}\n                _ => return Err(From::from(err)),\n            }\n        }\n    } else {\n        bail!(\"{:?} has no parent directory\", cache_entry_path);\n    }\n\n    Ok(cache_entry_path)\n}\n\npub trait ParameterSetMetadata: Clone {\n    fn identifier(&self) -> String;\n    fn sector_size(&self) -> u64;\n}\n\n#[derive(Clone, Debug, Serialize, Deserialize)]\npub struct CacheEntryMetadata {\n    pub sector_size: u64,\n}\n\npub trait CacheableParameters<C, P>\nwhere\n    C: Circuit<Bls12>,\n    P: ParameterSetMetadata,\n{\n    fn cache_prefix() -> String;\n\n    fn cache_meta(pub_params: &P) -> CacheEntryMetadata {\n        CacheEntryMetadata {\n            sector_size: pub_params.sector_size(),\n        }\n    }\n\n    fn cache_identifier(pub_params: &P) -> String {\n        let param_identifier = pub_params.identifier();\n        info!(\"parameter set identifier for cache: {}\", param_identifier);\n        let mut hasher = Sha256::default();\n        hasher.input(&param_identifier.into_bytes());\n        let circuit_hash = hasher.result();\n        format!(\n            \"{}-{:02x}\",\n            Self::cache_prefix(),\n            circuit_hash.iter().format(\"\")\n        )\n    }\n\n    fn get_param_metadata(_circuit: C, pub_params: &P) -> Result<CacheEntryMetadata> {\n        let id = Self::cache_identifier(pub_params);\n\n        // generate (or load) metadata\n        let meta_path = ensure_ancestor_dirs_exist(parameter_cache_metadata_path(&id))?;\n        read_cached_metadata(&meta_path)\n            .or_else(|_| write_cached_metadata(&meta_path, Self::cache_meta(pub_params)))\n    }\n\n    /// If the rng option argument is set, parameters will be\n    /// generated using it.  This is used for testing only, or where\n    /// parameters are otherwise unavailable (e.g. benches).  If rng\n    /// is not set, an error will result if parameters are not\n    /// present.\n    fn get_groth_params<R: RngCore>(\n        rng: Option<&mut R>,\n        circuit: C,\n        pub_params: &P,\n    ) -> Result<groth16::MappedParameters<Bls12>> {\n        let id = Self::cache_identifier(pub_params);\n\n        let generate = || -> Result<_> {\n            if let Some(rng) = rng {\n                use std::time::Instant;\n\n                info!(\"Actually generating groth params. (id: {})\", &id);\n                let start = Instant::now();\n                let parameters = groth16::generate_random_parameters::<Bls12, _, _>(circuit, rng)?;\n                let generation_time = start.elapsed();\n                info!(\n                    \"groth_parameter_generation_time: {:?} (id: {})\",\n                    generation_time, &id\n                );\n                Ok(parameters)\n            } else {\n                bail!(\"No cached parameters found for {}\", id);\n            }\n        };\n\n        // load or generate Groth parameter mappings\n        let cache_path = ensure_ancestor_dirs_exist(parameter_cache_params_path(&id))?;\n        match read_cached_params(&cache_path) {\n            Ok(x) => Ok(x),\n            Err(_) => {\n                write_cached_params(&cache_path, generate()?).unwrap_or_else(|e| {\n                    panic!(\"{}: failed to write generated parameters to cache\", e)\n                });\n                Ok(read_cached_params(&cache_path)?)\n            }\n        }\n    }\n\n    /// If the rng option argument is set, parameters will be\n    /// generated using it.  This is used for testing only, or where\n    /// parameters are otherwise unavailable (e.g. benches).  If rng\n    /// is not set, an error will result if parameters are not\n    /// present.\n    fn get_verifying_key<R: RngCore>(\n        rng: Option<&mut R>,\n        circuit: C,\n        pub_params: &P,\n    ) -> Result<groth16::VerifyingKey<Bls12>> {\n        let id = Self::cache_identifier(pub_params);\n\n        let generate = || -> Result<groth16::VerifyingKey<Bls12>> {\n            let groth_params = Self::get_groth_params(rng, circuit, pub_params)?;\n            info!(\"Getting verifying key. (id: {})\", &id);\n            Ok(groth_params.vk)\n        };\n\n        // generate (or load) verifying key\n        let cache_path = ensure_ancestor_dirs_exist(parameter_cache_verifying_key_path(&id))?;\n        read_cached_verifying_key(&cache_path)\n            .or_else(|_| write_cached_verifying_key(&cache_path, generate()?))\n    }\n}\n\nfn ensure_parent(path: &PathBuf) -> Result<()> {\n    match path.parent() {\n        Some(dir) => {\n            create_dir_all(dir)?;\n            Ok(())\n        }\n        None => Ok(()),\n    }\n}\n\n// Reads parameter mappings using mmap so that they can be lazily\n// loaded later.\nfn read_cached_params(cache_entry_path: &PathBuf) -> Result<groth16::MappedParameters<Bls12>> {\n    info!(\"checking cache_path: {:?} for parameters\", cache_entry_path);\n    with_exclusive_read_lock(cache_entry_path, |_| {\n        let params = Parameters::build_mapped_parameters(cache_entry_path.to_path_buf(), false)?;\n        info!(\"read parameters from cache {:?} \", cache_entry_path);\n\n        Ok(params)\n    })\n}\n\nfn read_cached_verifying_key(cache_entry_path: &PathBuf) -> Result<groth16::VerifyingKey<Bls12>> {\n    info!(\n        \"checking cache_path: {:?} for verifying key\",\n        cache_entry_path\n    );\n    with_exclusive_read_lock(cache_entry_path, |mut file| {\n        let key = groth16::VerifyingKey::read(&mut file)?;\n        info!(\"read verifying key from cache {:?} \", cache_entry_path);\n\n        Ok(key)\n    })\n}\n\nfn read_cached_metadata(cache_entry_path: &PathBuf) -> Result<CacheEntryMetadata> {\n    info!(\"checking cache_path: {:?} for metadata\", cache_entry_path);\n    with_exclusive_read_lock(cache_entry_path, |file| {\n        let value = serde_json::from_reader(file)?;\n        info!(\"read metadata from cache {:?} \", cache_entry_path);\n\n        Ok(value)\n    })\n}\n\nfn write_cached_metadata(\n    cache_entry_path: &PathBuf,\n    value: CacheEntryMetadata,\n) -> Result<CacheEntryMetadata> {\n    with_exclusive_lock(cache_entry_path, |file| {\n        serde_json::to_writer(file, &value)?;\n        info!(\"wrote metadata to cache {:?} \", cache_entry_path);\n\n        Ok(value)\n    })\n}\n\nfn write_cached_verifying_key(\n    cache_entry_path: &PathBuf,\n    value: groth16::VerifyingKey<Bls12>,\n) -> Result<groth16::VerifyingKey<Bls12>> {\n    with_exclusive_lock(cache_entry_path, |file| {\n        value.write(file)?;\n        info!(\"wrote verifying key to cache {:?} \", cache_entry_path);\n\n        Ok(value)\n    })\n}\n\nfn write_cached_params(\n    cache_entry_path: &PathBuf,\n    value: groth16::Parameters<Bls12>,\n) -> Result<groth16::Parameters<Bls12>> {\n    with_exclusive_lock(cache_entry_path, |file| {\n        value.write(file)?;\n        info!(\"wrote groth parameters to cache {:?} \", cache_entry_path);\n\n        Ok(value)\n    })\n}\n\nfn with_exclusive_lock<T>(\n    file_path: &PathBuf,\n    f: impl FnOnce(&mut LockedFile) -> Result<T>,\n) -> Result<T> {\n    with_open_file(file_path, LockedFile::open_exclusive, f)\n}\n\nfn with_exclusive_read_lock<T>(\n    file_path: &PathBuf,\n    f: impl FnOnce(&mut LockedFile) -> Result<T>,\n) -> Result<T> {\n    with_open_file(file_path, LockedFile::open_exclusive_read, f)\n}\n\nfn with_open_file<'a, T>(\n    file_path: &'a PathBuf,\n    open_file: impl FnOnce(&'a PathBuf) -> io::Result<LockedFile>,\n    f: impl FnOnce(&mut LockedFile) -> Result<T>,\n) -> Result<T> {\n    ensure_parent(&file_path)?;\n    f(&mut open_file(&file_path)?)\n}\n"
  },
  {
    "path": "storage-proofs/core/src/partitions.rs",
    "content": "pub type Partitions = Option<usize>;\n\npub fn partition_count(partitions: Partitions) -> usize {\n    match partitions {\n        None => 1,\n        Some(0) => panic!(\"cannot specify zero partitions\"),\n        Some(k) => k,\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/pieces.rs",
    "content": "use std::io::Read;\n\nuse anyhow::{ensure, Context};\nuse merkletree::merkle::next_pow2;\n\nuse crate::error::*;\nuse crate::fr32::Fr32Ary;\nuse crate::hasher::{Domain, Hasher};\nuse crate::merkle::BinaryMerkleTree;\nuse crate::util::NODE_SIZE;\n\n/// `position`, `length` are in H::Domain units\n#[derive(Clone, Debug)]\npub struct PieceSpec {\n    pub comm_p: Fr32Ary,\n    pub position: usize,\n    pub number_of_leaves: usize,\n}\n\nimpl PieceSpec {\n    /// `compute_packing` returns a packing list and a proof size.\n    /// A packing list is a pair of (start, length) pairs, relative to the beginning of the piece,\n    /// in leaf units.\n    /// Proof size is a number of elements (size same as one leaf) provided in the variable part of a PieceInclusionProof.\n    pub fn compute_packing(&self, tree_len: usize) -> Result<(Vec<(usize, usize)>, usize)> {\n        ensure!(self.is_aligned(tree_len)?, Error::UnalignedPiece);\n\n        let packing_list = vec![(0, self.number_of_leaves)];\n        Ok((packing_list, self.proof_length(tree_len)))\n    }\n\n    pub fn is_aligned(&self, tree_len: usize) -> Result<bool> {\n        piece_is_aligned(self.position, self.number_of_leaves, tree_len)\n    }\n\n    fn height(&self) -> usize {\n        height_for_length(self.number_of_leaves)\n    }\n\n    // `proof_length` is length of proof that comm_p is in the containing root, excluding comm_p and root, which aren't needed for the proof itself.\n    fn proof_length(&self, tree_len: usize) -> usize {\n        height_for_length(tree_len) - self.height()\n    }\n}\n\n/// Generate `comm_p` from a source and return it as bytes.\npub fn generate_piece_commitment_bytes_from_source<H: Hasher>(\n    source: &mut dyn Read,\n    padded_piece_size: usize,\n) -> Result<Fr32Ary> {\n    ensure!(padded_piece_size > 32, \"piece is too small\");\n    ensure!(padded_piece_size % 32 == 0, \"piece is not valid size\");\n\n    let mut buf = [0; NODE_SIZE];\n\n    let parts = (padded_piece_size as f64 / NODE_SIZE as f64).ceil() as usize;\n\n    let tree = BinaryMerkleTree::<H>::try_from_iter((0..parts).map(|_| {\n        source.read_exact(&mut buf)?;\n        <H::Domain as Domain>::try_from_bytes(&buf).context(\"invalid Fr element\")\n    }))\n    .context(\"failed to build tree\")?;\n\n    let mut comm_p_bytes = [0; NODE_SIZE];\n    let comm_p = tree.root();\n    comm_p.write_bytes(&mut comm_p_bytes)?;\n\n    Ok(comm_p_bytes)\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Utility\n\npub fn piece_is_aligned(position: usize, length: usize, tree_len: usize) -> Result<bool> {\n    let capacity_at_pos = subtree_capacity(position, tree_len)?;\n\n    Ok(capacity_at_pos.is_power_of_two() && capacity_at_pos >= length)\n}\n\nfn height_for_length(n: usize) -> usize {\n    if n == 0 {\n        0\n    } else {\n        (n as f64).log2().ceil() as usize\n    }\n}\n\nfn subtree_capacity(pos: usize, total: usize) -> Result<usize> {\n    ensure!(pos < total, \"position must be less than tree capacity\");\n\n    let mut capacity = 1;\n    // If tree is not 'full', then pos 0 will have subtree_capacity greater than size of tree.\n    let mut cursor = pos + next_pow2(total);\n\n    while cursor & 1 == 0 {\n        capacity *= 2;\n        cursor >>= 1;\n    }\n    Ok(capacity)\n}\n////////////////////////////////////////////////////////////////////////////////\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::hasher::PedersenHasher;\n\n    #[test]\n    fn test_subtree_capacity() {\n        assert_eq!(subtree_capacity(0, 16).unwrap(), 16);\n        assert_eq!(subtree_capacity(1, 16).unwrap(), 1);\n        assert_eq!(subtree_capacity(2, 16).unwrap(), 2);\n        assert_eq!(subtree_capacity(3, 16).unwrap(), 1);\n        assert_eq!(subtree_capacity(4, 16).unwrap(), 4);\n        assert_eq!(subtree_capacity(5, 16).unwrap(), 1);\n        assert_eq!(subtree_capacity(6, 16).unwrap(), 2);\n        assert_eq!(subtree_capacity(7, 16).unwrap(), 1);\n        assert_eq!(subtree_capacity(8, 16).unwrap(), 8);\n        assert_eq!(subtree_capacity(9, 16).unwrap(), 1);\n        assert_eq!(subtree_capacity(10, 16).unwrap(), 2);\n        assert_eq!(subtree_capacity(11, 16).unwrap(), 1);\n        assert_eq!(subtree_capacity(12, 16).unwrap(), 4);\n        assert_eq!(subtree_capacity(13, 16).unwrap(), 1);\n        assert_eq!(subtree_capacity(14, 16).unwrap(), 2);\n        assert_eq!(subtree_capacity(15, 16).unwrap(), 1);\n    }\n\n    #[test]\n    fn test_generate_piece_commitment_bytes_from_source() -> Result<()> {\n        let some_bytes: Vec<u8> = vec![0; 64];\n        let mut some_bytes_slice: &[u8] = &some_bytes;\n        generate_piece_commitment_bytes_from_source::<PedersenHasher>(&mut some_bytes_slice, 64)\n            .expect(\"threshold for sufficient bytes is 32\");\n\n        let not_enough_bytes: Vec<u8> = vec![0; 7];\n        let mut not_enough_bytes_slice: &[u8] = &not_enough_bytes;\n        assert!(\n            generate_piece_commitment_bytes_from_source::<PedersenHasher>(\n                &mut not_enough_bytes_slice,\n                7\n            )\n            .is_err(),\n            \"insufficient bytes should error out\"\n        );\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/por.rs",
    "content": "use anyhow::ensure;\nuse serde::{Deserialize, Serialize};\nuse std::marker::PhantomData;\n\nuse crate::error::*;\nuse crate::hasher::{Domain, Hasher};\nuse crate::merkle::{MerkleProofTrait, MerkleTreeTrait};\nuse crate::parameter_cache::ParameterSetMetadata;\nuse crate::proof::{NoRequirements, ProofScheme};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct DataProof<Proof: MerkleProofTrait> {\n    #[serde(bound(\n        serialize = \"<Proof::Hasher as Hasher>::Domain: Serialize\",\n        deserialize = \"<Proof::Hasher as Hasher>::Domain: Deserialize<'de>\"\n    ))]\n    pub proof: Proof,\n    #[serde(bound(\n        serialize = \"<Proof::Hasher as Hasher>::Domain: Serialize\",\n        deserialize = \"<Proof::Hasher as Hasher>::Domain: Deserialize<'de>\"\n    ))]\n    pub data: <Proof::Hasher as Hasher>::Domain,\n}\n\n/// The parameters shared between the prover and verifier.\n#[derive(Clone, Debug)]\npub struct PublicParams {\n    /// How many leaves the underlying merkle tree has.\n    pub leaves: usize,\n    pub private: bool,\n}\n\nimpl ParameterSetMetadata for PublicParams {\n    fn identifier(&self) -> String {\n        format!(\n            \"merklepor::PublicParams{{leaves: {}; private: {}}}\",\n            self.leaves, self.private\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        unimplemented!(\"required for parameter metadata file generation\")\n    }\n}\n\n/// The inputs that are necessary for the verifier to verify the proof.\n#[derive(Debug, Clone)]\npub struct PublicInputs<T: Domain> {\n    /// The root hash of the underlying merkle tree.\n    pub commitment: Option<T>,\n    /// The challenge, which leaf to prove.\n    pub challenge: usize,\n}\n\n/// The inputs that are only available to the prover.\n#[derive(Debug)]\npub struct PrivateInputs<'a, Tree: 'a + MerkleTreeTrait> {\n    /// The data of the leaf.\n    pub leaf: <Tree::Hasher as Hasher>::Domain,\n    /// The underlying merkle tree.\n    pub tree: &'a Tree,\n}\n\nimpl<'a, Tree: MerkleTreeTrait> PrivateInputs<'a, Tree> {\n    pub fn new(leaf: <Tree::Hasher as Hasher>::Domain, tree: &'a Tree) -> Self {\n        PrivateInputs { leaf, tree }\n    }\n}\n\n#[derive(Clone, Debug)]\npub struct SetupParams {\n    pub leaves: usize,\n    pub private: bool,\n}\n\n/// Merkle tree based proof of retrievability.\n#[derive(Debug, Default)]\npub struct PoR<Tree: MerkleTreeTrait> {\n    _tree: PhantomData<Tree>,\n}\n\nimpl<'a, Tree: 'a + MerkleTreeTrait> ProofScheme<'a> for PoR<Tree> {\n    type PublicParams = PublicParams;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<<Tree::Hasher as Hasher>::Domain>;\n    type PrivateInputs = PrivateInputs<'a, Tree>;\n    type Proof = DataProof<Tree::Proof>;\n    type Requirements = NoRequirements;\n\n    fn setup(sp: &SetupParams) -> Result<PublicParams> {\n        // atm only binary trees are implemented\n        Ok(PublicParams {\n            leaves: sp.leaves,\n            private: sp.private,\n        })\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        let challenge = pub_inputs.challenge % pub_params.leaves;\n        let tree = priv_inputs.tree;\n\n        if let Some(ref commitment) = pub_inputs.commitment {\n            ensure!(commitment == &tree.root(), Error::InvalidCommitment);\n        }\n        let proof = tree.gen_proof(challenge)?;\n        Ok(Self::Proof {\n            proof,\n            data: priv_inputs.leaf,\n        })\n    }\n\n    fn verify(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        proof: &Self::Proof,\n    ) -> Result<bool> {\n        {\n            // This was verify_proof_meta.\n            let commitments_match = match pub_inputs.commitment {\n                Some(ref commitment) => commitment == &proof.proof.root(),\n                None => true,\n            };\n\n            let expected_path_length = proof.proof.expected_len(pub_params.leaves);\n            let path_length_match = expected_path_length == proof.proof.path().len();\n\n            if !(commitments_match && path_length_match) {\n                dbg!(\n                    commitments_match,\n                    path_length_match,\n                    expected_path_length,\n                    proof.proof.path().len()\n                );\n                return Ok(false);\n            }\n        }\n\n        let data_valid = proof.proof.validate_data(proof.data);\n        let path_valid = proof.proof.validate(pub_inputs.challenge);\n\n        Ok(data_valid && path_valid)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use ff::Field;\n    use generic_array::typenum;\n    use paired::bls12_381::Fr;\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n\n    use crate::drgraph::{BucketGraph, Graph, BASE_DEGREE};\n    use crate::fr32::fr_into_bytes;\n    use crate::hasher::{Blake2sHasher, PedersenHasher, PoseidonHasher, Sha256Hasher};\n    use crate::merkle::{create_base_merkle_tree, DiskStore, MerkleProofTrait, MerkleTreeWrapper};\n    use crate::util::data_at_node;\n\n    fn test_merklepor<Tree: MerkleTreeTrait>() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 16;\n        let pub_params = PublicParams {\n            leaves,\n            private: false,\n        };\n\n        let data: Vec<u8> = (0..leaves)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n        let porep_id = [3; 32];\n        let graph = BucketGraph::<Tree::Hasher>::new(leaves, BASE_DEGREE, 0, porep_id).unwrap();\n        let tree = create_base_merkle_tree::<Tree>(None, graph.size(), data.as_slice()).unwrap();\n\n        let pub_inputs = PublicInputs {\n            challenge: 3,\n            commitment: Some(tree.root()),\n        };\n\n        let leaf = <Tree::Hasher as Hasher>::Domain::try_from_bytes(\n            data_at_node(data.as_slice(), pub_inputs.challenge).unwrap(),\n        )\n        .unwrap();\n\n        let priv_inputs = PrivateInputs::new(leaf, &tree);\n\n        let proof =\n            PoR::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs).expect(\"proving failed\");\n\n        let is_valid =\n            PoR::<Tree>::verify(&pub_params, &pub_inputs, &proof).expect(\"verification failed\");\n\n        assert!(is_valid);\n    }\n\n    type TestTree<H, U> =\n        MerkleTreeWrapper<H, DiskStore<<H as Hasher>::Domain>, U, typenum::U0, typenum::U0>;\n\n    #[test]\n    fn merklepor_pedersen_binary() {\n        test_merklepor::<TestTree<PedersenHasher, typenum::U2>>();\n    }\n\n    #[test]\n    fn merklepor_poseidon_binary() {\n        test_merklepor::<TestTree<PoseidonHasher, typenum::U2>>();\n    }\n\n    #[test]\n    fn merklepor_sha256_binary() {\n        test_merklepor::<TestTree<Sha256Hasher, typenum::U2>>();\n    }\n\n    #[test]\n    fn merklepor_blake2s_binary() {\n        test_merklepor::<TestTree<Blake2sHasher, typenum::U2>>();\n    }\n\n    #[test]\n    fn merklepor_pedersen_quad() {\n        test_merklepor::<TestTree<PedersenHasher, typenum::U4>>();\n    }\n\n    #[test]\n    fn merklepor_poseidon_quad() {\n        test_merklepor::<TestTree<PoseidonHasher, typenum::U4>>();\n    }\n\n    #[test]\n    fn merklepor_sha256_quad() {\n        test_merklepor::<TestTree<Sha256Hasher, typenum::U4>>();\n    }\n\n    #[test]\n    fn merklepor_blake2s_quad() {\n        test_merklepor::<TestTree<Blake2sHasher, typenum::U4>>();\n    }\n\n    // Takes a valid proof and breaks it.\n    fn make_bogus_proof<Proof: MerkleProofTrait>(\n        rng: &mut XorShiftRng,\n        mut proof: DataProof<Proof>,\n    ) -> DataProof<Proof> {\n        let bogus_leaf = <Proof::Hasher as Hasher>::Domain::random(rng);\n        proof.proof.break_me(bogus_leaf);\n        proof\n    }\n\n    fn test_merklepor_validates<Tree: MerkleTreeTrait>() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 64;\n        let pub_params = PublicParams {\n            leaves,\n            private: false,\n        };\n\n        let data: Vec<u8> = (0..leaves)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n\n        let porep_id = [99; 32];\n\n        let graph = BucketGraph::<Tree::Hasher>::new(leaves, BASE_DEGREE, 0, porep_id).unwrap();\n        let tree = create_base_merkle_tree::<Tree>(None, graph.size(), data.as_slice()).unwrap();\n\n        let pub_inputs = PublicInputs {\n            challenge: 3,\n            commitment: Some(tree.root()),\n        };\n\n        let leaf = <Tree::Hasher as Hasher>::Domain::try_from_bytes(\n            data_at_node(data.as_slice(), pub_inputs.challenge).unwrap(),\n        )\n        .unwrap();\n\n        let priv_inputs = PrivateInputs::<Tree>::new(leaf, &tree);\n\n        let good_proof =\n            PoR::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs).expect(\"proving failed\");\n\n        let verified = PoR::<Tree>::verify(&pub_params, &pub_inputs, &good_proof)\n            .expect(\"verification failed\");\n        assert!(verified);\n\n        let bad_proof = make_bogus_proof::<Tree::Proof>(rng, good_proof);\n\n        let verified =\n            PoR::<Tree>::verify(&pub_params, &pub_inputs, &bad_proof).expect(\"verification failed\");\n\n        // A bad proof should not be verified!\n        assert!(!verified);\n    }\n\n    #[test]\n    fn merklepor_actually_validates_sha256_binary() {\n        test_merklepor_validates::<TestTree<Sha256Hasher, typenum::U2>>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_blake2s_binary() {\n        test_merklepor_validates::<TestTree<Blake2sHasher, typenum::U2>>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_pedersen_binary() {\n        test_merklepor_validates::<TestTree<PedersenHasher, typenum::U2>>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_poseidon_binary() {\n        test_merklepor_validates::<TestTree<PoseidonHasher, typenum::U2>>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_sha256_quad() {\n        test_merklepor_validates::<TestTree<Sha256Hasher, typenum::U4>>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_blake2s_quad() {\n        test_merklepor_validates::<TestTree<Blake2sHasher, typenum::U4>>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_pedersen_quad() {\n        test_merklepor_validates::<TestTree<PedersenHasher, typenum::U4>>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_poseidon_quad() {\n        test_merklepor_validates::<TestTree<PoseidonHasher, typenum::U4>>();\n    }\n\n    fn test_merklepor_validates_challenge_identity<Tree: MerkleTreeTrait>() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 64;\n\n        let pub_params = PublicParams {\n            leaves,\n            private: false,\n        };\n\n        let data: Vec<u8> = (0..leaves)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n\n        let porep_id = [32; 32];\n        let graph = BucketGraph::<Tree::Hasher>::new(leaves, BASE_DEGREE, 0, porep_id).unwrap();\n        let tree = create_base_merkle_tree::<Tree>(None, graph.size(), data.as_slice()).unwrap();\n\n        let pub_inputs = PublicInputs {\n            challenge: 3,\n            commitment: Some(tree.root()),\n        };\n\n        let leaf = <Tree::Hasher as Hasher>::Domain::try_from_bytes(\n            data_at_node(data.as_slice(), pub_inputs.challenge).unwrap(),\n        )\n        .unwrap();\n\n        let priv_inputs = PrivateInputs::<Tree>::new(leaf, &tree);\n\n        let proof =\n            PoR::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs).expect(\"proving failed\");\n\n        let different_pub_inputs = PublicInputs {\n            challenge: 999,\n            commitment: Some(tree.root()),\n        };\n\n        let verified = PoR::<Tree>::verify(&pub_params, &different_pub_inputs, &proof)\n            .expect(\"verification failed\");\n\n        // A proof created with a the wrong challenge not be verified!\n        assert!(!verified);\n    }\n\n    #[test]\n    fn merklepor_actually_validates_challenge_identity_sha256_binary() {\n        test_merklepor_validates_challenge_identity::<TestTree<Sha256Hasher, typenum::U2>>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_challenge_identity_blake2s_binary() {\n        test_merklepor_validates_challenge_identity::<TestTree<Blake2sHasher, typenum::U2>>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_challenge_identity_pedersen_binary() {\n        test_merklepor_validates_challenge_identity::<TestTree<PedersenHasher, typenum::U2>>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_challenge_identity_poseidon_binary() {\n        test_merklepor_validates_challenge_identity::<TestTree<PoseidonHasher, typenum::U2>>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_challenge_identity_sha256_quad() {\n        test_merklepor_validates_challenge_identity::<TestTree<Sha256Hasher, typenum::U4>>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_challenge_identity_blake2s_quad() {\n        test_merklepor_validates_challenge_identity::<TestTree<Blake2sHasher, typenum::U4>>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_challenge_identity_pedersen_quad() {\n        test_merklepor_validates_challenge_identity::<TestTree<PedersenHasher, typenum::U4>>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_challenge_identity_poseidon_quad() {\n        test_merklepor_validates_challenge_identity::<TestTree<PoseidonHasher, typenum::U4>>();\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/proof.rs",
    "content": "use std::time::Instant;\n\nuse log::info;\nuse serde::de::DeserializeOwned;\nuse serde::ser::Serialize;\n\nuse crate::error::Result;\n\n/// The ProofScheme trait provides the methods that any proof scheme needs to implement.\npub trait ProofScheme<'a> {\n    type PublicParams: Clone;\n    type SetupParams: Clone;\n    type PublicInputs: Clone;\n    type PrivateInputs;\n    type Proof: Clone + Serialize + DeserializeOwned;\n    type Requirements: Default;\n\n    /// setup is used to generate public parameters from setup parameters in order to specialize\n    /// a ProofScheme to the specific parameters required by a consumer.\n    fn setup(_: &Self::SetupParams) -> Result<Self::PublicParams>;\n\n    /// prove generates and returns a proof from public parameters, public inputs, and private inputs.\n    fn prove(\n        _: &Self::PublicParams,\n        _: &Self::PublicInputs,\n        _: &Self::PrivateInputs,\n    ) -> Result<Self::Proof>;\n\n    fn prove_all_partitions(\n        pub_params: &Self::PublicParams,\n        pub_in: &Self::PublicInputs,\n        priv_in: &Self::PrivateInputs,\n        partition_count: usize,\n    ) -> Result<Vec<Self::Proof>> {\n        info!(\"groth_proof_count: {}\", partition_count);\n        info!(\"generating {} groth proofs.\", partition_count);\n        let start = Instant::now();\n\n        let result = (0..partition_count)\n            .map(|k| {\n                info!(\"generating groth proof {}.\", k);\n                let start = Instant::now();\n\n                let partition_pub_in = Self::with_partition((*pub_in).clone(), Some(k));\n                let proof = Self::prove(pub_params, &partition_pub_in, priv_in);\n\n                let proof_time = start.elapsed();\n                info!(\"groth_proof_time: {:?}\", proof_time);\n\n                proof\n            })\n            .collect::<Result<Vec<Self::Proof>>>();\n\n        let total_proof_time = start.elapsed();\n        info!(\"total_groth_proof_time: {:?}\", total_proof_time);\n\n        result\n    }\n\n    /// verify returns true if the supplied proof is valid for the given public parameter and public inputs.\n    /// Note that verify does not have access to private inputs.\n    /// Remember that proof is untrusted, and any data it provides MUST be validated as corresponding\n    /// to the supplied public parameters and inputs.\n    fn verify(\n        _pub_params: &Self::PublicParams,\n        _pub_inputs: &Self::PublicInputs,\n        _proof: &Self::Proof,\n    ) -> Result<bool> {\n        unimplemented!();\n    }\n\n    fn verify_all_partitions(\n        pub_params: &Self::PublicParams,\n        pub_in: &Self::PublicInputs,\n        proofs: &[Self::Proof],\n    ) -> Result<bool> {\n        for (k, proof) in proofs.iter().enumerate() {\n            let partition_pub_in = Self::with_partition((*pub_in).clone(), Some(k)); //\n\n            if !Self::verify(pub_params, &partition_pub_in, proof)? {\n                return Ok(false);\n            }\n        }\n\n        Ok(true)\n    }\n\n    // This method must be specialized by concrete ProofScheme implementations which use partitions.\n    fn with_partition(pub_in: Self::PublicInputs, _k: Option<usize>) -> Self::PublicInputs {\n        pub_in\n    }\n\n    fn satisfies_requirements(\n        _pub_params: &Self::PublicParams,\n        _requirements: &Self::Requirements,\n        _partitions: usize,\n    ) -> bool {\n        true\n    }\n}\n\n#[derive(Default)]\npub struct NoRequirements;\n"
  },
  {
    "path": "storage-proofs/core/src/sector.rs",
    "content": "use std::collections::BTreeSet;\nuse std::fmt;\n\nuse byteorder::ByteOrder;\nuse ff::PrimeField;\nuse paired::bls12_381::{Fr, FrRepr};\nuse serde::{Deserialize, Serialize};\n\n/// An ordered set of `SectorId`s.\npub type OrderedSectorSet = BTreeSet<SectorId>;\n\n/// Identifier for a single sector.\n#[derive(\n    Default, Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize,\n)]\npub struct SectorId(u64);\n\nimpl From<u64> for SectorId {\n    fn from(n: u64) -> Self {\n        SectorId(n)\n    }\n}\n\nimpl From<SectorId> for u64 {\n    fn from(n: SectorId) -> Self {\n        n.0\n    }\n}\n\nimpl From<SectorId> for Fr {\n    fn from(n: SectorId) -> Self {\n        Fr::from_repr(FrRepr::from(n.0)).unwrap()\n    }\n}\n\nimpl fmt::Display for SectorId {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"SectorId({})\", self.0)\n    }\n}\n\nimpl SectorId {\n    pub fn as_fr_safe(self) -> [u8; 31] {\n        let mut buf: [u8; 31] = [0; 31];\n        byteorder::LittleEndian::write_u64(&mut buf[0..8], self.0);\n        buf\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/settings.rs",
    "content": "use std::sync::Mutex;\n\nuse config::{Config, ConfigError, Environment, File};\nuse lazy_static::lazy_static;\nuse serde::{Deserialize, Serialize};\n\nlazy_static! {\n    pub static ref SETTINGS: Mutex<Settings> =\n        Mutex::new(Settings::new().expect(\"invalid configuration\"));\n}\n\nconst SETTINGS_PATH: &str = \"./rust-fil-proofs.config.toml\";\n\n#[derive(Debug, Serialize, Deserialize)]\n#[serde(default)]\npub struct Settings {\n    pub maximize_caching: bool,\n    pub pedersen_hash_exp_window_size: u32,\n    pub use_gpu_column_builder: bool,\n    pub max_gpu_column_batch_size: u32,\n    pub max_gpu_tree_batch_size: u32,\n    pub rows_to_discard: u32,\n}\n\nimpl Default for Settings {\n    fn default() -> Self {\n        Settings {\n            maximize_caching: false,\n            pedersen_hash_exp_window_size: 16,\n            use_gpu_column_builder: false,\n            max_gpu_column_batch_size: 400_000,\n            max_gpu_tree_batch_size: 700_000,\n            rows_to_discard: 0,\n        }\n    }\n}\n\nimpl Settings {\n    fn new() -> Result<Settings, ConfigError> {\n        let mut s = Config::new();\n\n        s.merge(File::with_name(SETTINGS_PATH).required(false))?;\n        s.merge(Environment::with_prefix(\"FIL_PROOFS\"))?;\n\n        s.try_into()\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/test_helper.rs",
    "content": "use memmap::MmapMut;\nuse memmap::MmapOptions;\nuse std::fs::OpenOptions;\nuse std::io::Write;\nuse std::path::Path;\n\npub fn setup_replica(data: &[u8], replica_path: &Path) -> MmapMut {\n    let mut f = OpenOptions::new()\n        .read(true)\n        .write(true)\n        .create(true)\n        .open(replica_path)\n        .expect(\"Failed to create replica\");\n    f.write_all(data).expect(\"Failed to write data to replica\");\n\n    unsafe {\n        MmapOptions::new()\n            .map_mut(&f)\n            .expect(\"Failed to back memory map with tempfile\")\n    }\n}\n\n#[macro_export]\nmacro_rules! table_tests {\n    ($property_test_func:ident {\n        $( $(#[$attr:meta])* $test_name:ident( $( $param:expr ),* ); )+\n    }) => {\n        $(\n            $(#[$attr])*\n                #[test]\n            fn $test_name() {\n                $property_test_func($( $param ),* )\n            }\n        )+\n    }\n}\n"
  },
  {
    "path": "storage-proofs/core/src/util.rs",
    "content": "use crate::error;\nuse anyhow::ensure;\nuse bellperson::gadgets::boolean::{self, AllocatedBit, Boolean};\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse merkletree::merkle::get_merkle_tree_row_count;\nuse paired::Engine;\n\nuse super::settings;\n\npub const NODE_SIZE: usize = 32;\n\n/// Returns the start position of the data, 0-indexed.\npub fn data_at_node_offset(v: usize) -> usize {\n    v * NODE_SIZE\n}\n\n/// Returns the byte slice representing one node (of uniform size, NODE_SIZE) at position v in data.\npub fn data_at_node(data: &[u8], v: usize) -> error::Result<&[u8]> {\n    let offset = data_at_node_offset(v);\n\n    ensure!(\n        offset + NODE_SIZE <= data.len(),\n        error::Error::OutOfBounds(offset + NODE_SIZE, data.len())\n    );\n\n    Ok(&data[offset..offset + NODE_SIZE])\n}\n\n/// Converts bytes into their bit representation, in little endian format.\npub fn bytes_into_bits(bytes: &[u8]) -> Vec<bool> {\n    bytes\n        .iter()\n        .flat_map(|&byte| (0..8).map(move |i| (byte >> i) & 1u8 == 1u8))\n        .collect()\n}\n\n/// Converts bytes into their bit representation, in little endian format.\npub fn bytes_into_bits_opt(bytes: &[u8]) -> Vec<Option<bool>> {\n    bytes\n        .iter()\n        .flat_map(|&byte| (0..8).map(move |i| Some((byte >> i) & 1u8 == 1u8)))\n        .collect()\n}\n\n/// Converts bytes into their bit representation, in big endian format.\npub fn bytes_into_bits_be(bytes: &[u8]) -> Vec<bool> {\n    bytes\n        .iter()\n        .flat_map(|&byte| (0..8).rev().map(move |i| (byte >> i) & 1u8 == 1u8))\n        .collect()\n}\n\n/// Converts the bytes into a boolean vector, in little endian format.\npub fn bytes_into_boolean_vec<E: Engine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    value: Option<&[u8]>,\n    size: usize,\n) -> Result<Vec<boolean::Boolean>, SynthesisError> {\n    let values = match value {\n        Some(value) => bytes_into_bits(value).into_iter().map(Some).collect(),\n        None => vec![None; size],\n    };\n\n    let bits = values\n        .into_iter()\n        .enumerate()\n        .map(|(i, b)| {\n            Ok(Boolean::from(AllocatedBit::alloc(\n                cs.namespace(|| format!(\"bit {}\", i)),\n                b,\n            )?))\n        })\n        .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n    Ok(bits)\n}\n\n/// Converts the bytes into a boolean vector, in big endian format.\npub fn bytes_into_boolean_vec_be<E: Engine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    value: Option<&[u8]>,\n    size: usize,\n) -> Result<Vec<boolean::Boolean>, SynthesisError> {\n    let values = match value {\n        Some(value) => bytes_into_bits_be(value).into_iter().map(Some).collect(),\n        None => vec![None; size],\n    };\n\n    let bits = values\n        .into_iter()\n        .enumerate()\n        .map(|(i, b)| {\n            Ok(Boolean::from(AllocatedBit::alloc(\n                cs.namespace(|| format!(\"bit {}\", i)),\n                b,\n            )?))\n        })\n        .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n    Ok(bits)\n}\n\n#[allow(dead_code)]\n#[inline]\nfn bool_to_u8(bit: bool, offset: usize) -> u8 {\n    if bit {\n        1u8 << offset\n    } else {\n        0u8\n    }\n}\n\n/// Converts a slice of bools into their byte representation, in little endian.\n#[allow(dead_code)]\npub fn bits_to_bytes(bits: &[bool]) -> Vec<u8> {\n    bits.chunks(8)\n        .map(|bits| {\n            bool_to_u8(bits[7], 7)\n                | bool_to_u8(bits[6], 6)\n                | bool_to_u8(bits[5], 5)\n                | bool_to_u8(bits[4], 4)\n                | bool_to_u8(bits[3], 3)\n                | bool_to_u8(bits[2], 2)\n                | bool_to_u8(bits[1], 1)\n                | bool_to_u8(bits[0], 0)\n        })\n        .collect()\n}\n\n/// Reverse the order of bits within each byte (bit numbering), but without altering the order of bytes\n/// within the array (endianness) — when bit array is viewed as a flattened sequence of octets.\n/// Before intra-byte bit reversal begins, zero-bit padding is added so every byte is full.\npub fn reverse_bit_numbering(bits: Vec<boolean::Boolean>) -> Vec<boolean::Boolean> {\n    let mut padded_bits = bits;\n    // Pad partial bytes\n    while padded_bits.len() % 8 != 0 {\n        padded_bits.push(boolean::Boolean::Constant(false));\n    }\n\n    padded_bits\n        .chunks(8)\n        .map(|chunk| chunk.iter().rev())\n        .flatten()\n        .cloned()\n        .collect()\n}\n\n// If the tree is large enough to use the default value (per-arity), use it.  If it's too small to cache anything (i.e. not enough rows), don't discard any.\npub fn default_rows_to_discard(leafs: usize, arity: usize) -> usize {\n    let row_count = get_merkle_tree_row_count(leafs, arity);\n    if row_count <= 2 {\n        // If a tree only has a root row and/or base, there is\n        // nothing to discard.\n        return 0;\n    } else if row_count == 3 {\n        // If a tree only has 1 row between the base and root,\n        // it's all that can be discarded.\n        return 1;\n    }\n\n    // row_count - 2 discounts the base layer (1) and root (1)\n    let max_rows_to_discard = row_count - 2;\n\n    // This configurable setting is for a default oct-tree\n    // rows_to_discard value, which defaults to 2.\n    let rows_to_discard = settings::SETTINGS.lock().unwrap().rows_to_discard as usize;\n\n    // Discard at most 'constant value' rows (coded below,\n    // differing by arity) while respecting the max number that\n    // the tree can support discarding.\n    match arity {\n        2 => std::cmp::min(max_rows_to_discard, 7),\n        4 => std::cmp::min(max_rows_to_discard, 5),\n        _ => std::cmp::min(max_rows_to_discard, rows_to_discard),\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use crate::fr32::fr_into_bytes;\n    use crate::gadgets::TestConstraintSystem;\n    use bellperson::gadgets::num;\n    use ff::Field;\n    use paired::bls12_381::*;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    #[test]\n    fn test_bytes_into_boolean_vec() {\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for i in 0..100 {\n            let data: Vec<u8> = (0..i + 10).map(|_| rng.gen()).collect();\n            let bools = {\n                let mut cs = cs.namespace(|| format!(\"round: {}\", i));\n                bytes_into_boolean_vec(&mut cs, Some(data.as_slice()), 8).unwrap()\n            };\n\n            let bytes_actual: Vec<u8> = bits_to_bytes(\n                bools\n                    .iter()\n                    .map(|b| b.get_value().unwrap())\n                    .collect::<Vec<bool>>()\n                    .as_slice(),\n            );\n\n            assert_eq!(data, bytes_actual);\n        }\n    }\n\n    #[test]\n    fn test_bool_to_u8() {\n        assert_eq!(bool_to_u8(false, 2), 0b0000_0000);\n        assert_eq!(bool_to_u8(true, 0), 0b0000_0001);\n        assert_eq!(bool_to_u8(true, 1), 0b0000_0010);\n        assert_eq!(bool_to_u8(true, 7), 0b1000_0000);\n    }\n\n    #[test]\n    fn test_bits_into_bytes() {\n        assert_eq!(\n            bits_to_bytes(&[true, false, false, false, false, false, false, false]),\n            vec![1]\n        );\n        assert_eq!(\n            bits_to_bytes(&[true, true, true, true, true, true, true, true]),\n            vec![255]\n        );\n    }\n\n    #[test]\n    fn test_bytes_into_bits() {\n        assert_eq!(\n            bytes_into_bits(&[1u8]),\n            vec![true, false, false, false, false, false, false, false]\n        );\n\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for i in 10..100 {\n            let bytes: Vec<u8> = (0..i).map(|_| rng.gen()).collect();\n\n            let bits = bytes_into_bits(bytes.as_slice());\n            assert_eq!(bits_to_bytes(bits.as_slice()), bytes);\n        }\n    }\n\n    #[test]\n    fn test_reverse_bit_numbering() {\n        for _ in 0..100 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n            let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n            let val_fr = Fr::random(rng);\n            let val_vec = fr_into_bytes(&val_fr);\n\n            let val_num =\n                num::AllocatedNum::alloc(cs.namespace(|| \"val_num\"), || Ok(val_fr.into())).unwrap();\n            let val_num_bits = val_num.to_bits_le(cs.namespace(|| \"val_bits\")).unwrap();\n\n            let bits =\n                bytes_into_boolean_vec_be(cs.namespace(|| \"val_bits_2\"), Some(&val_vec), 256)\n                    .unwrap();\n\n            let val_num_reversed_bit_numbering = reverse_bit_numbering(val_num_bits);\n\n            let a_values: Vec<bool> = val_num_reversed_bit_numbering\n                .iter()\n                .map(|v| v.get_value().unwrap())\n                .collect();\n\n            let b_values: Vec<bool> = bits.iter().map(|v| v.get_value().unwrap()).collect();\n            assert_eq!(&a_values[..], &b_values[..]);\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/parameters.json",
    "content": "{\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params\": {\n    \"cid\": \"QmVxjFRyhmyQaZEtCh7nk2abc7LhFkzhnRX4rcHqCCpikR\",\n    \"digest\": \"7610b9f82bfc88405b7a832b651ce2f6\",\n    \"sector_size\": 2048\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk\": {\n    \"cid\": \"QmcS5JZs8X3TdtkEBpHAdUYjdNDqcL7fWQFtQz69mpnu2X\",\n    \"digest\": \"0e0958009936b9d5e515ec97b8cb792d\",\n    \"sector_size\": 2048\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params\": {\n    \"cid\": \"QmUiRx71uxfmUE8V3H9sWAsAXoM88KR4eo1ByvvcFNeTLR\",\n    \"digest\": \"1a7d4a9c8a502a497ed92a54366af33f\",\n    \"sector_size\": 536870912\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk\": {\n    \"cid\": \"QmfCeddjFpWtavzfEzZpJfzSajGNwfL4RjFXWAvA9TSnTV\",\n    \"digest\": \"4dae975de4f011f101f5a2f86d1daaba\",\n    \"sector_size\": 536870912\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params\": {\n    \"cid\": \"QmcSTqDcFVLGGVYz1njhUZ7B6fkKtBumsLUwx4nkh22TzS\",\n    \"digest\": \"82c88066be968bb550a05e30ff6c2413\",\n    \"sector_size\": 2048\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk\": {\n    \"cid\": \"QmSTCXF2ipGA3f6muVo6kHc2URSx6PzZxGUqu7uykaH5KU\",\n    \"digest\": \"ffd79788d614d27919ae5bd2d94eacb6\",\n    \"sector_size\": 2048\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params\": {\n    \"cid\": \"QmU9SBzJNrcjRFDiFc4GcApqdApN6z9X7MpUr66mJ2kAJP\",\n    \"digest\": \"700171ecf7334e3199437c930676af82\",\n    \"sector_size\": 8388608\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk\": {\n    \"cid\": \"QmbmUMa3TbbW3X5kFhExs6WgC4KeWT18YivaVmXDkB6ANG\",\n    \"digest\": \"79ebb55f56fda427743e35053edad8fc\",\n    \"sector_size\": 8388608\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params\": {\n    \"cid\": \"QmdNEL2RtqL52GQNuj8uz6mVj5Z34NVnbaJ1yMyh1oXtBx\",\n    \"digest\": \"c49499bb76a0762884896f9683403f55\",\n    \"sector_size\": 8388608\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk\": {\n    \"cid\": \"QmUiVYCQUgr6Y13pZFr8acWpSM4xvTXUdcvGmxyuHbKhsc\",\n    \"digest\": \"34d4feeacd9abf788d69ef1bb4d8fd00\",\n    \"sector_size\": 8388608\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params\": {\n    \"cid\": \"QmVgCsJFRXKLuuUhT3aMYwKVGNA9rDeR6DCrs7cAe8riBT\",\n    \"digest\": \"827359440349fe8f5a016e7598993b79\",\n    \"sector_size\": 536870912\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk\": {\n    \"cid\": \"QmfA31fbCWojSmhSGvvfxmxaYCpMoXP95zEQ9sLvBGHNaN\",\n    \"digest\": \"bd2cd62f65c1ab84f19ca27e97b7c731\",\n    \"sector_size\": 536870912\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params\": {\n    \"cid\": \"QmaUmfcJt6pozn8ndq1JVBzLRjRJdHMTPd4foa8iw5sjBZ\",\n    \"digest\": \"2cf49eb26f1fee94c85781a390ddb4c8\",\n    \"sector_size\": 34359738368\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk\": {\n    \"cid\": \"QmR9i9KL3vhhAqTBGj1bPPC7LvkptxrH9RvxJxLN1vvsBE\",\n    \"digest\": \"0f8ec542485568fa3468c066e9fed82b\",\n    \"sector_size\": 34359738368\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params\": {\n    \"cid\": \"Qmdtczp7p4wrbDofmHdGhiixn9irAcN77mV9AEHZBaTt1i\",\n    \"digest\": \"d84f79a16fe40e9e25a36e2107bb1ba0\",\n    \"sector_size\": 34359738368\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk\": {\n    \"cid\": \"QmZCvxKcKP97vDAk8Nxs9R1fWtqpjQrAhhfXPoCi1nkDoF\",\n    \"digest\": \"fc02943678dd119e69e7fab8420e8819\",\n    \"sector_size\": 34359738368\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params\": {\n    \"cid\": \"QmeAN4vuANhXsF8xP2Lx5j2L6yMSdogLzpcvqCJThRGK1V\",\n    \"digest\": \"3810b7780ac0e299b22ae70f1f94c9bc\",\n    \"sector_size\": 68719476736\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk\": {\n    \"cid\": \"QmWV8rqZLxs1oQN9jxNWmnT1YdgLwCcscv94VARrhHf1T7\",\n    \"digest\": \"59d2bf1857adc59a4f08fcf2afaa916b\",\n    \"sector_size\": 68719476736\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params\": {\n    \"cid\": \"QmVkrXc1SLcpgcudK5J25HH93QvR9tNsVhVTYHm5UymXAz\",\n    \"digest\": \"2170a91ad5bae22ea61f2ea766630322\",\n    \"sector_size\": 68719476736\n  },\n  \"v28-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk\": {\n    \"cid\": \"QmbfQjPD7EpzjhWGmvWAsyN2mAZ4PcYhsf3ujuhU9CSuBm\",\n    \"digest\": \"6d3789148fb6466d07ee1e24d6292fd6\",\n    \"sector_size\": 68719476736\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params\": {\n    \"cid\": \"QmWceMgnWYLopMuM4AoGMvGEau7tNe5UK83XFjH5V9B17h\",\n    \"digest\": \"434fb1338ecfaf0f59256f30dde4968f\",\n    \"sector_size\": 2048\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk\": {\n    \"cid\": \"QmamahpFCstMUqHi2qGtVoDnRrsXhid86qsfvoyCTKJqHr\",\n    \"digest\": \"dc1ade9929ade1708238f155343044ac\",\n    \"sector_size\": 2048\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params\": {\n    \"cid\": \"QmYBpTt7LWNAWr1JXThV5VxX7wsQFLd1PHrGYVbrU1EZjC\",\n    \"digest\": \"6c77597eb91ab936c1cef4cf19eba1b3\",\n    \"sector_size\": 536870912\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk\": {\n    \"cid\": \"QmWionkqH2B6TXivzBSQeSyBxojaiAFbzhjtwYRrfwd8nH\",\n    \"digest\": \"065179da19fbe515507267677f02823e\",\n    \"sector_size\": 536870912\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params\": {\n    \"cid\": \"QmPXAPPuQtuQz7Zz3MHMAMEtsYwqM1o9H1csPLeiMUQwZH\",\n    \"digest\": \"09e612e4eeb7a0eb95679a88404f960c\",\n    \"sector_size\": 8388608\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk\": {\n    \"cid\": \"QmYCuipFyvVW1GojdMrjK1JnMobXtT4zRCZs1CGxjizs99\",\n    \"digest\": \"b687beb9adbd9dabe265a7e3620813e4\",\n    \"sector_size\": 8388608\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params\": {\n    \"cid\": \"QmengpM684XLQfG8754ToonszgEg2bQeAGUan5uXTHUQzJ\",\n    \"digest\": \"6a388072a518cf46ebd661f5cc46900a\",\n    \"sector_size\": 34359738368\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk\": {\n    \"cid\": \"Qmf93EMrADXAK6CyiSfE8xx45fkMfR3uzKEPCvZC1n2kzb\",\n    \"digest\": \"0c7b4aac1c40fdb7eb82bc355b41addf\",\n    \"sector_size\": 34359738368\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params\": {\n    \"cid\": \"QmS7ye6Ri2MfFzCkcUJ7FQ6zxDKuJ6J6B8k5PN7wzSR9sX\",\n    \"digest\": \"1801f8a6e1b00bceb00cc27314bb5ce3\",\n    \"sector_size\": 68719476736\n  },\n  \"v28-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk\": {\n    \"cid\": \"QmehSmC6BhrgRZakPDta2ewoH9nosNzdjCqQRXsNFNUkLN\",\n    \"digest\": \"a89884252c04c298d0b3c81bfd884164\",\n    \"sector_size\": 68719476736\n  }\n}"
  },
  {
    "path": "storage-proofs/porep/Cargo.toml",
    "content": "[package]\nname = \"storage-proofs-porep\"\nversion = \"1.0.0-alpha.0\"\nauthors = [\"dignifiedquire <me@dignifiedquire.com>\"]\nlicense = \"MIT OR Apache-2.0\"\nedition = \"2018\"\nrepository = \"https://github.com/filecoin-project/rust-fil-proofs\"\nreadme = \"README.md\"\n\n[dependencies]\nstorage-proofs-core = { path = \"../core\", version = \"1.0.0-alpha.0\" }\nrand = \"0.7\"\nmerkletree = \"0.18.0\"\nmemmap = \"0.7\"\nnum-bigint = \"0.2\"\nnum-traits = \"0.2\"\nsha2 = { version = \"0.8.3\", package = \"sha2ni\" }\nsha2raw = { version = \"0.1.0\", path = \"../../sha2raw\" }\nrayon = \"1.0.0\"\nserde = { version = \"1.0\", features = [\"derive\"]}\nff = { version = \"0.2.1\", package = \"fff\" }\nbellperson = \"0.7.0\"\npaired = { version = \"0.19.0\", features = [\"serde\"] }\nfil-sapling-crypto = \"0.5.1\"\nlog = \"0.4.7\"\npretty_assertions = \"0.6.1\"\ngeneric-array = \"0.13.2\"\nanyhow = \"1.0.23\"\nonce_cell = \"1.3.1\"\nneptune = { version = \"0.5.6\", features = [\"gpu\"] }\nnum_cpus = \"1.10.1\"\n\n[dev-dependencies]\ntempdir = \"0.3.7\"\ntempfile = \"3\"\nrand_xorshift = \"0.2.0\"\n\n[features]\ndefault = []\n"
  },
  {
    "path": "storage-proofs/porep/README.md",
    "content": "# Storage Proofs PoRep\n\n## License\n\nMIT or Apache 2.0\n"
  },
  {
    "path": "storage-proofs/porep/benches/batch_hasher.rs",
    "content": "use criterion::{black_box, criterion_group, criterion_main, Criterion};\nuse ff::Field;\nuse paired::bls12_381::Fr;\nuse rand::{Rng, SeedableRng};\nuse rand_xorshift::XorShiftRng;\nuse sha2raw::Sha256;\nuse storage_proofs_core::fr32::fr_into_bytes;\nuse storage_proofs_porep::nse::*;\n\nfn bench_batch_hash(c: &mut Criterion) {\n    let rng = &mut XorShiftRng::seed_from_u64(5);\n\n    let window_size = 1024 * 1024 * 1024 * 4;\n    let config = Config {\n        k: 8,\n        num_nodes_window: window_size / 32,\n        degree_expander: 384,\n        degree_butterfly: 4,\n        num_expander_layers: 6,\n        num_butterfly_layers: 4,\n        sector_size: 1 * window_size,\n    };\n    let k = config.k;\n    let degree = config.degree_expander;\n\n    let data: Vec<u8> = (0..config.num_nodes_window)\n        .map(|_| fr_into_bytes(&Fr::random(rng)))\n        .flatten()\n        .collect();\n    let graph: ExpanderGraph = config.clone().into();\n    let mut parents = Vec::with_capacity(config.k as usize * config.degree_expander);\n\n    c.bench_function(\"batch-hash\", move |b| {\n        let mut i = 0;\n        b.iter(|| {\n            parents.clear();\n            parents.extend(graph.expanded_parents(i));\n\n            let mut hasher = Sha256::new();\n            hasher.input(&[&[0u8; 32][..], &[0u8; 32][..]]); // fake prefix\n            let res = batch_hash(k as usize, degree, hasher, &parents, &data);\n            black_box(res);\n            if i < config.num_nodes_window as u32 {\n                i += 1;\n            } else {\n                i = 0;\n            }\n        });\n    });\n}\n\ncriterion_group!(benches, bench_batch_hash);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/porep/benches/encode.rs",
    "content": "use criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput};\nuse ff::Field;\nuse paired::bls12_381::Fr;\nuse rand::thread_rng;\nuse storage_proofs_core::fr32::fr_into_bytes;\nuse storage_proofs_core::hasher::sha256::Sha256Hasher;\nuse storage_proofs_core::hasher::{Domain, Hasher};\nuse storage_proofs_porep::stacked::{create_label, create_label_exp, StackedBucketGraph};\n\nstruct Pregenerated<H: 'static + Hasher> {\n    data: Vec<u8>,\n    replica_id: H::Domain,\n    graph: StackedBucketGraph<H>,\n}\n\nfn pregenerate_data<H: Hasher>(degree: usize) -> Pregenerated<H> {\n    assert_eq!(degree, 6 + 8);\n    let mut rng = thread_rng();\n    let size = degree * 4 * 1024 * 1024;\n    let data: Vec<u8> = (0..size)\n        .flat_map(|_| fr_into_bytes(&Fr::random(&mut rng)))\n        .collect();\n    let replica_id: H::Domain = H::Domain::random(&mut rng);\n\n    let graph = StackedBucketGraph::<H>::new_stacked(size, 6, 8, [32; 32]).unwrap();\n\n    Pregenerated {\n        data,\n        replica_id,\n        graph,\n    }\n}\n\nfn kdf_benchmark(c: &mut Criterion) {\n    let degree = 14;\n    let Pregenerated {\n        data,\n        replica_id,\n        graph,\n    } = pregenerate_data::<Sha256Hasher>(degree);\n\n    let mut group = c.benchmark_group(\"kdf\");\n    group.sample_size(10);\n    group.throughput(Throughput::Bytes(\n        /* replica id + 37 parents + node id */ 39 * 32,\n    ));\n\n    group.bench_function(\"exp\", |b| {\n        let mut raw_data = data.clone();\n        raw_data.extend_from_slice(&data);\n        let (data, exp_data) = raw_data.split_at_mut(data.len());\n\n        let graph = &graph;\n        let replica_id = replica_id.clone();\n\n        b.iter(|| {\n            black_box(create_label_exp(\n                graph,\n                None,\n                &replica_id,\n                &*exp_data,\n                data,\n                1,\n                2,\n            ))\n        })\n    });\n\n    group.bench_function(\"non-exp\", |b| {\n        let mut data = data.clone();\n        let graph = &graph;\n        let replica_id = replica_id.clone();\n\n        b.iter(|| black_box(create_label(graph, None, &replica_id, &mut data, 1, 2)))\n    });\n\n    group.finish();\n}\n\ncriterion_group!(benches, kdf_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/porep/benches/parents.rs",
    "content": "use criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};\nuse storage_proofs_core::{\n    drgraph::{Graph, BASE_DEGREE},\n    hasher::blake2s::Blake2sHasher,\n    hasher::pedersen::PedersenHasher,\n    hasher::sha256::Sha256Hasher,\n    hasher::Hasher,\n};\nuse storage_proofs_porep::stacked::{StackedBucketGraph, EXP_DEGREE};\n\n#[cfg(feature = \"cpu-profile\")]\n#[inline(always)]\nfn start_profile(stage: &str) {\n    gperftools::profiler::PROFILER\n        .lock()\n        .unwrap()\n        .start(format!(\"./{}.profile\", stage))\n        .unwrap();\n}\n\n#[cfg(not(feature = \"cpu-profile\"))]\n#[inline(always)]\nfn start_profile(_stage: &str) {}\n\n#[cfg(feature = \"cpu-profile\")]\n#[inline(always)]\nfn stop_profile() {\n    gperftools::profiler::PROFILER\n        .lock()\n        .unwrap()\n        .stop()\n        .unwrap();\n}\n\n#[cfg(not(feature = \"cpu-profile\"))]\n#[inline(always)]\nfn stop_profile() {}\n\nfn pregenerate_graph<H: Hasher>(size: usize) -> StackedBucketGraph<H> {\n    let seed = [1u8; 28];\n    StackedBucketGraph::<H>::new_stacked(size, BASE_DEGREE, EXP_DEGREE, [32; 32]).unwrap()\n}\n\nfn parents_loop<H: Hasher, G: Graph<H>>(graph: &G, parents: &mut [u32]) {\n    (0..graph.size())\n        .map(|node| graph.parents(node, parents).unwrap())\n        .collect()\n}\n\nfn parents_loop_benchmark(cc: &mut Criterion) {\n    let sizes = vec![10, 50, 1000];\n\n    cc.bench(\n        \"parents in a loop\",\n        ParameterizedBenchmark::new(\n            \"Blake2s\",\n            |b, size| {\n                let graph = pregenerate_graph::<Blake2sHasher>(*size);\n                let mut parents = vec![0; graph.degree()];\n                start_profile(&format!(\"parents-blake2s-{}\", *size));\n                b.iter(|| black_box(parents_loop::<Blake2sHasher, _>(&graph, &mut parents)));\n                stop_profile();\n            },\n            sizes,\n        )\n        .with_function(\"Pedersen\", |b, degree| {\n            let graph = pregenerate_graph::<PedersenHasher>(*degree);\n            let mut parents = vec![0; graph.degree()];\n            b.iter(|| black_box(parents_loop::<PedersenHasher, _>(&graph, &mut parents)))\n        })\n        .with_function(\"Sha256\", |b, degree| {\n            let graph = pregenerate_graph::<Sha256Hasher>(*degree);\n            let mut parents = vec![0; graph.degree()];\n            b.iter(|| black_box(parents_loop::<Sha256Hasher, _>(&graph, &mut parents)))\n        }),\n    );\n}\n\ncriterion_group!(benches, parents_loop_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs/porep/parent_cache.json",
    "content": "{\n  \"v28-sdr-parent-2aa9c77c3e58259481351cc4be2079cc71e1c9af39700866545c043bfa30fb42\": {\n    \"sector_size\": 536870912,\n    \"digest\": \"3adcc092423aa76d6a7184016893406da44dd974b219a89cd3ece25e4e3018f5\"\n  },\n  \"v28-sdr-parent-3f0eef38bb48af1f48ad65e14eb85b4ebfc167cec18cd81764f6d998836c9899\": {\n    \"sector_size\": 2048,\n    \"digest\": \"3da49221e2ed55371b86d0bf3d6526fcf128af61bed904f966428db1b531750d\"\n  },\n  \"v28-sdr-parent-8a99e8d6b6be7ab87a56b632e6739ff201c23ea14e99737c74690f0e265574d6\": {\n    \"sector_size\": 68719476736,\n    \"digest\": \"2778a732ad46a7dc18e0564dfdf59fd321dcde74ab476fd6d3c4e6735d7cd89c\"\n  },\n  \"v28-sdr-parent-dcdabb0fbe4364bf0ac28b6a18c66de246409fa1a9020a00f33fb3e3053da6dc\": {\n    \"sector_size\": 8388608,\n    \"digest\": \"a76604f2f59f2744c7151653bbb1d8596b6b57d295e6fa6c1f0c41d725b502ce\"\n  },\n  \"v28-sdr-parent-e1fa5d5b811ddbd118be3412c4a8c329156b8b8acc72632bca459455b5a05a13\": {\n    \"sector_size\": 34359738368,\n    \"digest\": \"3c4f9841fcc75aed8c695800e58d08480629f25af3a2aefd81904181d75cc0b6\"\n  }\n}"
  },
  {
    "path": "storage-proofs/porep/src/drg/circuit.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellperson::gadgets::{\n    boolean::Boolean,\n    sha256::sha256 as sha256_circuit,\n    {multipack, num},\n};\nuse bellperson::{Circuit, ConstraintSystem, SynthesisError};\nuse ff::PrimeField;\nuse fil_sapling_crypto::jubjub::JubjubEngine;\nuse paired::bls12_381::{Bls12, Fr};\n\nuse storage_proofs_core::{\n    compound_proof::CircuitComponent, error::Result, gadgets::constraint, gadgets::encode,\n    gadgets::por::PoRCircuit, gadgets::uint64, gadgets::variables::Root, hasher::Hasher,\n    merkle::BinaryMerkleTree, util::fixup_bits,\n};\n\n/// DRG based Proof of Replication.\n///\n/// # Fields\n///\n/// * `params` - parameters for the curve\n///\n/// ----> Private `replica_node` - The replica node being proven.\n///\n/// * `replica_node` - The replica node being proven.\n/// * `replica_node_path` - The path of the replica node being proven.\n/// * `replica_root` - The merkle root of the replica.\n///\n/// * `replica_parents` - A list of all parents in the replica, with their value.\n/// * `replica_parents_paths` - A list of all parents paths in the replica.\n///\n/// ----> Private `data_node` - The data node being proven.\n///\n/// * `data_node_path` - The path of the data node being proven.\n/// * `data_root` - The merkle root of the data.\n/// * `replica_id` - The id of the replica.\n///\n\npub struct DrgPoRepCircuit<'a, H: Hasher> {\n    pub replica_nodes: Vec<Option<Fr>>,\n    #[allow(clippy::type_complexity)]\n    pub replica_nodes_paths: Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>,\n    pub replica_root: Root<Bls12>,\n    pub replica_parents: Vec<Vec<Option<Fr>>>,\n    #[allow(clippy::type_complexity)]\n    pub replica_parents_paths: Vec<Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>>,\n    pub data_nodes: Vec<Option<Fr>>,\n    #[allow(clippy::type_complexity)]\n    pub data_nodes_paths: Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>,\n    pub data_root: Root<Bls12>,\n    pub replica_id: Option<Fr>,\n    pub private: bool,\n    pub _h: PhantomData<&'a H>,\n}\n\nimpl<'a, H: 'static + Hasher> DrgPoRepCircuit<'a, H> {\n    #[allow(clippy::type_complexity, clippy::too_many_arguments)]\n    pub fn synthesize<CS>(\n        mut cs: CS,\n        replica_nodes: Vec<Option<Fr>>,\n        replica_nodes_paths: Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>,\n        replica_root: Root<Bls12>,\n        replica_parents: Vec<Vec<Option<Fr>>>,\n        replica_parents_paths: Vec<Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>>,\n        data_nodes: Vec<Option<Fr>>,\n        data_nodes_paths: Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>,\n        data_root: Root<Bls12>,\n        replica_id: Option<Fr>,\n        private: bool,\n    ) -> Result<(), SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>,\n    {\n        DrgPoRepCircuit::<H> {\n            replica_nodes,\n            replica_nodes_paths,\n            replica_root,\n            replica_parents,\n            replica_parents_paths,\n            data_nodes,\n            data_nodes_paths,\n            data_root,\n            replica_id,\n            private,\n            _h: Default::default(),\n        }\n        .synthesize(&mut cs)\n    }\n}\n\n#[derive(Default, Clone)]\npub struct ComponentPrivateInputs {\n    pub comm_r: Option<Root<Bls12>>,\n    pub comm_d: Option<Root<Bls12>>,\n}\n\nimpl<'a, H: Hasher> CircuitComponent for DrgPoRepCircuit<'a, H> {\n    type ComponentPrivateInputs = ComponentPrivateInputs;\n}\n\n///\n/// # Public Inputs\n///\n/// * [0] replica_id/0\n/// * [1] replica_id/1\n/// * [2] replica auth_path_bits\n/// * [3] replica commitment (root hash)\n/// * for i in 0..replica_parents.len()\n///   * [ ] replica parent auth_path_bits\n///   * [ ] replica parent commitment (root hash) // Same for all.\n/// * [r + 1] data auth_path_bits\n/// * [r + 2] data commitment (root hash)\n///\n///  Total = 6 + (2 * replica_parents.len())\n/// # Private Inputs\n///\n/// * [ ] replica value/0\n/// * for i in 0..replica_parents.len()\n///  * [ ] replica parent value/0\n/// * [ ] data value/\n///\n/// Total = 2 + replica_parents.len()\n///\nimpl<'a, H: 'static + Hasher> Circuit<Bls12> for DrgPoRepCircuit<'a, H> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let replica_id = self.replica_id;\n        let replica_root = self.replica_root;\n        let data_root = self.data_root;\n\n        let nodes = self.data_nodes.len();\n\n        assert_eq!(self.replica_nodes.len(), nodes);\n        assert_eq!(self.replica_nodes_paths.len(), nodes);\n        assert_eq!(self.replica_parents.len(), nodes);\n        assert_eq!(self.replica_parents_paths.len(), nodes);\n        assert_eq!(self.data_nodes_paths.len(), nodes);\n\n        let replica_node_num = num::AllocatedNum::alloc(cs.namespace(|| \"replica_id_num\"), || {\n            replica_id.ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        replica_node_num.inputize(cs.namespace(|| \"replica_id\"))?;\n\n        // get the replica_id in bits\n        let replica_id_bits =\n            fixup_bits(replica_node_num.to_bits_le(cs.namespace(|| \"replica_id_bits\"))?);\n\n        let replica_root_var = Root::Var(replica_root.allocated(cs.namespace(|| \"replica_root\"))?);\n        let data_root_var = Root::Var(data_root.allocated(cs.namespace(|| \"data_root\"))?);\n\n        for i in 0..self.data_nodes.len() {\n            let mut cs = cs.namespace(|| format!(\"challenge_{}\", i));\n            // ensure that all inputs are well formed\n            let replica_node_path = &self.replica_nodes_paths[i];\n            let replica_parents_paths = &self.replica_parents_paths[i];\n            let data_node_path = &self.data_nodes_paths[i];\n\n            let replica_node = &self.replica_nodes[i];\n            let replica_parents = &self.replica_parents[i];\n            let data_node = &self.data_nodes[i];\n\n            assert_eq!(replica_parents.len(), replica_parents_paths.len());\n            assert_eq!(data_node_path.len(), replica_node_path.len());\n            assert_eq!(replica_node.is_some(), data_node.is_some());\n\n            // Inclusion checks\n            {\n                let mut cs = cs.namespace(|| \"inclusion_checks\");\n                PoRCircuit::<BinaryMerkleTree<H>>::synthesize(\n                    cs.namespace(|| \"replica_inclusion\"),\n                    Root::Val(*replica_node),\n                    replica_node_path.clone().into(),\n                    replica_root_var.clone(),\n                    self.private,\n                )?;\n\n                // validate each replica_parents merkle proof\n                for j in 0..replica_parents.len() {\n                    PoRCircuit::<BinaryMerkleTree<H>>::synthesize(\n                        cs.namespace(|| format!(\"parents_inclusion_{}\", j)),\n                        Root::Val(replica_parents[j]),\n                        replica_parents_paths[j].clone().into(),\n                        replica_root_var.clone(),\n                        self.private,\n                    )?;\n                }\n\n                // validate data node commitment\n                PoRCircuit::<BinaryMerkleTree<H>>::synthesize(\n                    cs.namespace(|| \"data_inclusion\"),\n                    Root::Val(*data_node),\n                    data_node_path.clone().into(),\n                    data_root_var.clone(),\n                    self.private,\n                )?;\n            }\n\n            // Encoding checks\n            {\n                let mut cs = cs.namespace(|| \"encoding_checks\");\n                // get the parents into bits\n                let parents_bits: Vec<Vec<Boolean>> = replica_parents\n                    .iter()\n                    .enumerate()\n                    .map(|(i, val)| {\n                        let num = num::AllocatedNum::alloc(\n                            cs.namespace(|| format!(\"parents_{}_num\", i)),\n                            || {\n                                val.map(Into::into)\n                                    .ok_or_else(|| SynthesisError::AssignmentMissing)\n                            },\n                        )?;\n                        Ok(fixup_bits(num.to_bits_le(\n                            cs.namespace(|| format!(\"parents_{}_bits\", i)),\n                        )?))\n                    })\n                    .collect::<Result<Vec<Vec<Boolean>>, SynthesisError>>()?;\n\n                // generate the encryption key\n                let key = kdf(\n                    cs.namespace(|| \"kdf\"),\n                    &replica_id_bits,\n                    parents_bits,\n                    None,\n                    None,\n                )?;\n\n                let replica_node_num =\n                    num::AllocatedNum::alloc(cs.namespace(|| \"replica_node\"), || {\n                        (*replica_node).ok_or_else(|| SynthesisError::AssignmentMissing)\n                    })?;\n\n                let decoded = encode::decode(cs.namespace(|| \"decode\"), &key, &replica_node_num)?;\n\n                // TODO this should not be here, instead, this should be the leaf Fr in the data_auth_path\n                // TODO also note that we need to change/makesurethat the leaves are the data, instead of hashes of the data\n                let expected = num::AllocatedNum::alloc(cs.namespace(|| \"data node\"), || {\n                    data_node.ok_or_else(|| SynthesisError::AssignmentMissing)\n                })?;\n\n                // ensure the encrypted data and data_node match\n                constraint::equal(&mut cs, || \"equality\", &expected, &decoded);\n            }\n        }\n        // profit!\n        Ok(())\n    }\n}\n\n/// Key derivation function.\nfn kdf<E, CS>(\n    mut cs: CS,\n    id: &[Boolean],\n    parents: Vec<Vec<Boolean>>,\n    window_index: Option<uint64::UInt64>,\n    node: Option<uint64::UInt64>,\n) -> Result<num::AllocatedNum<E>, SynthesisError>\nwhere\n    E: JubjubEngine,\n    CS: ConstraintSystem<E>,\n{\n    // ciphertexts will become a buffer of the layout\n    // id | node | encodedParentNode1 | encodedParentNode1 | ...\n\n    let mut ciphertexts = id.to_vec();\n\n    if let Some(window_index) = window_index {\n        ciphertexts.extend_from_slice(&window_index.to_bits_be());\n    }\n\n    if let Some(node) = node {\n        ciphertexts.extend_from_slice(&node.to_bits_be());\n    }\n\n    for parent in parents.into_iter() {\n        ciphertexts.extend_from_slice(&parent);\n    }\n\n    let alloc_bits = sha256_circuit(cs.namespace(|| \"hash\"), &ciphertexts[..])?;\n    let fr = if alloc_bits[0].get_value().is_some() {\n        let be_bits = alloc_bits\n            .iter()\n            .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing))\n            .collect::<Result<Vec<bool>, SynthesisError>>()?;\n\n        let le_bits = be_bits\n            .chunks(8)\n            .flat_map(|chunk| chunk.iter().rev())\n            .copied()\n            .take(E::Fr::CAPACITY as usize)\n            .collect::<Vec<bool>>();\n\n        Ok(multipack::compute_multipacking::<E>(&le_bits)[0])\n    } else {\n        Err(SynthesisError::AssignmentMissing)\n    };\n\n    num::AllocatedNum::<E>::alloc(cs.namespace(|| \"result_num\"), || fr)\n}\n\n#[cfg(test)]\nmod tests {\n\n    use super::*;\n\n    use ff::Field;\n    use generic_array::typenum;\n    use merkletree::store::StoreConfig;\n    use pretty_assertions::assert_eq;\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::{\n        cache_key::CacheKey,\n        compound_proof,\n        drgraph::{graph_height, new_seed, BucketGraph, BASE_DEGREE},\n        fr32::{bytes_into_fr, fr_into_bytes},\n        gadgets::TestConstraintSystem,\n        hasher::PedersenHasher,\n        merkle::MerkleProofTrait,\n        proof::ProofScheme,\n        util::data_at_node,\n    };\n\n    use super::super::compound::DrgPoRepCompound;\n    use crate::drg;\n    use crate::stacked::BINARY_ARITY;\n    use crate::PoRep;\n\n    #[test]\n    fn drgporep_input_circuit_with_bls12_381() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let nodes = 16;\n        let degree = BASE_DEGREE;\n        let challenge = 2;\n\n        let replica_id: Fr = Fr::random(rng);\n\n        let mut data: Vec<u8> = (0..nodes)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n\n        // TODO: don't clone everything\n        let original_data = data.clone();\n        let data_node: Option<Fr> = Some(\n            bytes_into_fr(\n                data_at_node(&original_data, challenge).expect(\"failed to read original data\"),\n            )\n            .unwrap(),\n        );\n\n        let sp = drg::SetupParams {\n            drg: drg::DrgParams {\n                nodes,\n                degree,\n                expansion_degree: 0,\n                seed: new_seed(),\n            },\n            private: false,\n            challenges_count: 1,\n        };\n\n        // MT for original data is always named tree-d, and it will be\n        // referenced later in the process as such.\n        let cache_dir = tempfile::tempdir().unwrap();\n        let config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            StoreConfig::default_cached_above_base_layer(nodes, BINARY_ARITY),\n        );\n\n        // Generate a replica path.\n        let temp_dir = tempdir::TempDir::new(\"drgporep-input-circuit-with-bls12-381\").unwrap();\n        let temp_path = temp_dir.path();\n        let replica_path = temp_path.join(\"replica-path\");\n\n        let pp = drg::DrgPoRep::<PedersenHasher, BucketGraph<_>>::setup(&sp)\n            .expect(\"failed to create drgporep setup\");\n        let (tau, aux) = drg::DrgPoRep::<PedersenHasher, _>::replicate(\n            &pp,\n            &replica_id.into(),\n            (&mut data[..]).into(),\n            None,\n            config,\n            replica_path.clone(),\n        )\n        .expect(\"failed to replicate\");\n\n        let pub_inputs = drg::PublicInputs {\n            replica_id: Some(replica_id.into()),\n            challenges: vec![challenge],\n            tau: Some(tau.into()),\n        };\n\n        let priv_inputs = drg::PrivateInputs::<PedersenHasher> {\n            tree_d: &aux.tree_d,\n            tree_r: &aux.tree_r,\n            tree_r_config_levels: StoreConfig::default_cached_above_base_layer(nodes, BINARY_ARITY),\n        };\n\n        let proof_nc = drg::DrgPoRep::<PedersenHasher, _>::prove(&pp, &pub_inputs, &priv_inputs)\n            .expect(\"failed to prove\");\n\n        assert!(\n            drg::DrgPoRep::<PedersenHasher, _>::verify(&pp, &pub_inputs, &proof_nc)\n                .expect(\"failed to verify\"),\n            \"failed to verify (non circuit)\"\n        );\n\n        let replica_node: Option<Fr> = Some(proof_nc.replica_nodes[0].data.into());\n\n        let replica_node_path = proof_nc.replica_nodes[0].proof.as_options();\n        let replica_root = Root::Val(Some(proof_nc.replica_root.into()));\n        let replica_parents = proof_nc\n            .replica_parents\n            .iter()\n            .map(|v| {\n                v.iter()\n                    .map(|(_, parent)| Some(parent.data.into()))\n                    .collect()\n            })\n            .collect();\n        let replica_parents_paths: Vec<_> = proof_nc\n            .replica_parents\n            .iter()\n            .map(|v| {\n                v.iter()\n                    .map(|(_, parent)| parent.proof.as_options())\n                    .collect()\n            })\n            .collect();\n\n        let data_node_path = proof_nc.nodes[0].proof.as_options();\n        let data_root = Root::Val(Some(proof_nc.data_root.into()));\n        let replica_id = Some(replica_id);\n\n        assert!(\n            proof_nc.nodes[0].proof.validate(challenge),\n            \"failed to verify data commitment\"\n        );\n        assert!(\n            proof_nc.nodes[0]\n                .proof\n                .validate_data(data_node.unwrap().into()),\n            \"failed to verify data commitment with data\"\n        );\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        DrgPoRepCircuit::<PedersenHasher>::synthesize(\n            cs.namespace(|| \"drgporep\"),\n            vec![replica_node],\n            vec![replica_node_path],\n            replica_root,\n            replica_parents,\n            replica_parents_paths,\n            vec![data_node],\n            vec![data_node_path],\n            data_root,\n            replica_id,\n            false,\n        )\n        .expect(\"failed to synthesize circuit\");\n\n        if !cs.is_satisfied() {\n            println!(\n                \"failed to satisfy: {:?}\",\n                cs.which_is_unsatisfied().unwrap()\n            );\n        }\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n        assert_eq!(cs.num_inputs(), 18, \"wrong number of inputs\");\n        assert_eq!(cs.num_constraints(), 149_580, \"wrong number of constraints\");\n\n        assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n\n        assert_eq!(\n            cs.get_input(1, \"drgporep/replica_id/input variable\"),\n            replica_id.unwrap()\n        );\n\n        let generated_inputs =\n                <DrgPoRepCompound<_, _> as compound_proof::CompoundProof<_, _>>::generate_public_inputs(\n                    &pub_inputs,\n                    &pp,\n                    None,\n                )\n                .unwrap();\n        let expected_inputs = cs.get_inputs();\n\n        for ((input, label), generated_input) in\n            expected_inputs.iter().skip(1).zip(generated_inputs.iter())\n        {\n            assert_eq!(input, generated_input, \"{}\", label);\n        }\n\n        assert_eq!(\n            generated_inputs.len(),\n            expected_inputs.len() - 1,\n            \"inputs are not the same length\"\n        );\n    }\n\n    #[test]\n    fn drgporep_input_circuit_num_constraints() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        // 1 GB\n        let n = (1 << 30) / 32;\n        let m = BASE_DEGREE;\n        let tree_depth = graph_height::<typenum::U2>(n);\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        DrgPoRepCircuit::<PedersenHasher>::synthesize(\n            cs.namespace(|| \"drgporep\"),\n            vec![Some(Fr::random(rng)); 1],\n            vec![vec![(vec![Some(Fr::random(rng))], Some(0)); tree_depth]; 1],\n            Root::Val(Some(Fr::random(rng))),\n            vec![vec![Some(Fr::random(rng)); m]; 1],\n            vec![vec![vec![(vec![Some(Fr::random(rng))], Some(0)); tree_depth]; m]; 1],\n            vec![Some(Fr::random(rng)); 1],\n            vec![vec![(vec![Some(Fr::random(rng))], Some(0)); tree_depth]; 1],\n            Root::Val(Some(Fr::random(rng))),\n            Some(Fr::random(rng)),\n            false,\n        )\n        .expect(\"failed to synthesize circuit\");\n\n        assert_eq!(cs.num_inputs(), 18, \"wrong number of inputs\");\n        assert_eq!(cs.num_constraints(), 391_404, \"wrong number of constraints\");\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/drg/compound.rs",
    "content": "use std::marker::PhantomData;\n\nuse anyhow::{ensure, Context};\nuse bellperson::Circuit;\nuse generic_array::typenum;\nuse paired::bls12_381::{Bls12, Fr};\n\nuse storage_proofs_core::{\n    compound_proof::{CircuitComponent, CompoundProof},\n    drgraph::Graph,\n    error::Result,\n    gadgets::por::PoRCompound,\n    gadgets::variables::Root,\n    hasher::Hasher,\n    merkle::{BinaryMerkleTree, MerkleProofTrait},\n    parameter_cache::{CacheableParameters, ParameterSetMetadata},\n    por,\n    proof::ProofScheme,\n};\n\nuse super::circuit::DrgPoRepCircuit;\nuse super::DrgPoRep;\n\n/// DRG based Proof of Replication.\n///\n/// # Fields\n///\n/// * `params` - parameters for the curve\n///\n/// ----> Private `replica_node` - The replica node being proven.\n///\n/// * `replica_node` - The replica node being proven.\n/// * `replica_node_path` - The path of the replica node being proven.\n/// * `replica_root` - The merkle root of the replica.\n///\n/// * `replica_parents` - A list of all parents in the replica, with their value.\n/// * `replica_parents_paths` - A list of all parents paths in the replica.\n///\n/// ----> Private `data_node` - The data node being proven.\n///\n/// * `data_node_path` - The path of the data node being proven.\n/// * `data_root` - The merkle root of the data.\n/// * `replica_id` - The id of the replica.\n///\n\npub struct DrgPoRepCompound<H, G>\nwhere\n    H: Hasher,\n    G::Key: AsRef<H::Domain>,\n    G: Graph<H>,\n{\n    // Sad phantom is sad\n    _h: PhantomData<H>,\n    _g: PhantomData<G>,\n}\n\nimpl<C: Circuit<Bls12>, H: Hasher, G: Graph<H>, P: ParameterSetMetadata> CacheableParameters<C, P>\n    for DrgPoRepCompound<H, G>\nwhere\n    G::Key: AsRef<H::Domain>,\n{\n    fn cache_prefix() -> String {\n        format!(\"drg-proof-of-replication-{}\", H::name())\n    }\n}\n\nimpl<'a, H, G> CompoundProof<'a, DrgPoRep<'a, H, G>, DrgPoRepCircuit<'a, H>>\n    for DrgPoRepCompound<H, G>\nwhere\n    H: 'static + Hasher,\n    G::Key: AsRef<<H as Hasher>::Domain>,\n    G: 'a + Graph<H> + ParameterSetMetadata + Sync + Send,\n{\n    fn generate_public_inputs(\n        pub_in: &<DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicInputs,\n        pub_params: &<DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicParams,\n        // We can ignore k because challenges are generated by caller and included\n        // in PublicInputs.\n        _k: Option<usize>,\n    ) -> Result<Vec<Fr>> {\n        let replica_id = pub_in.replica_id.context(\"missing replica id\")?;\n        let challenges = &pub_in.challenges;\n\n        ensure!(\n            pub_in.tau.is_none() == pub_params.private,\n            \"Public input parameter tau must be unset\"\n        );\n\n        let (comm_r, comm_d) = match pub_in.tau {\n            None => (None, None),\n            Some(tau) => (Some(tau.comm_r), Some(tau.comm_d)),\n        };\n\n        let leaves = pub_params.graph.size();\n\n        let por_pub_params = por::PublicParams {\n            leaves,\n            private: pub_params.private,\n        };\n\n        let mut input: Vec<Fr> = Vec::new();\n        input.push(replica_id.into());\n\n        let mut parents = vec![0; pub_params.graph.degree()];\n        for challenge in challenges {\n            let mut por_nodes = vec![*challenge as u32];\n            pub_params.graph.parents(*challenge, &mut parents)?;\n            por_nodes.extend_from_slice(&parents);\n\n            for node in por_nodes {\n                let por_pub_inputs = por::PublicInputs {\n                    commitment: comm_r,\n                    challenge: node as usize,\n                };\n                let por_inputs = PoRCompound::<BinaryMerkleTree<H>>::generate_public_inputs(\n                    &por_pub_inputs,\n                    &por_pub_params,\n                    None,\n                )?;\n\n                input.extend(por_inputs);\n            }\n\n            let por_pub_inputs = por::PublicInputs {\n                commitment: comm_d,\n                challenge: *challenge,\n            };\n\n            let por_inputs = PoRCompound::<BinaryMerkleTree<H>>::generate_public_inputs(\n                &por_pub_inputs,\n                &por_pub_params,\n                None,\n            )?;\n            input.extend(por_inputs);\n        }\n        Ok(input)\n    }\n\n    fn circuit(\n        public_inputs: &<DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicInputs,\n        component_private_inputs: <DrgPoRepCircuit<H> as CircuitComponent>::ComponentPrivateInputs,\n        proof: &<DrgPoRep<'a, H, G> as ProofScheme<'a>>::Proof,\n        public_params: &<DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Result<DrgPoRepCircuit<'a, H>> {\n        let challenges = public_params.challenges_count;\n        let len = proof.nodes.len();\n\n        ensure!(len <= challenges, \"too many challenges\");\n        ensure!(\n            proof.replica_parents.len() == len,\n            \"Number of replica parents must match\"\n        );\n        ensure!(\n            proof.replica_nodes.len() == len,\n            \"Number of replica nodes must match\"\n        );\n\n        let replica_nodes: Vec<_> = proof\n            .replica_nodes\n            .iter()\n            .map(|node| Some(node.data.into()))\n            .collect();\n\n        let replica_nodes_paths: Vec<_> = proof\n            .replica_nodes\n            .iter()\n            .map(|node| node.proof.as_options())\n            .collect();\n\n        let is_private = public_params.private;\n\n        let (data_root, replica_root) = if is_private {\n            (\n                component_private_inputs.comm_d.context(\"is_private\")?,\n                component_private_inputs.comm_r.context(\"is_private\")?,\n            )\n        } else {\n            (\n                Root::Val(Some(proof.data_root.into())),\n                Root::Val(Some(proof.replica_root.into())),\n            )\n        };\n\n        let replica_id = public_inputs.replica_id;\n\n        let replica_parents: Vec<_> = proof\n            .replica_parents\n            .iter()\n            .map(|parents| {\n                parents\n                    .iter()\n                    .map(|(_, parent)| Some(parent.data.into()))\n                    .collect()\n            })\n            .collect();\n\n        let replica_parents_paths: Vec<Vec<_>> = proof\n            .replica_parents\n            .iter()\n            .map(|parents| {\n                let p: Vec<_> = parents\n                    .iter()\n                    .map(|(_, parent)| parent.proof.as_options())\n                    .collect();\n                p\n            })\n            .collect();\n\n        let data_nodes: Vec<_> = proof\n            .nodes\n            .iter()\n            .map(|node| Some(node.data.into()))\n            .collect();\n\n        let data_nodes_paths: Vec<_> = proof\n            .nodes\n            .iter()\n            .map(|node| node.proof.as_options())\n            .collect();\n\n        ensure!(\n            public_inputs.tau.is_none() == public_params.private,\n            \"inconsistent private state\"\n        );\n\n        Ok(DrgPoRepCircuit {\n            replica_nodes,\n            replica_nodes_paths,\n            replica_root,\n            replica_parents,\n            replica_parents_paths,\n            data_nodes,\n            data_nodes_paths,\n            data_root,\n            replica_id: replica_id.map(Into::into),\n            private: public_params.private,\n            _h: Default::default(),\n        })\n    }\n\n    fn blank_circuit(\n        public_params: &<DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicParams,\n    ) -> DrgPoRepCircuit<'a, H> {\n        let depth = public_params.graph.merkle_tree_depth::<typenum::U2>() as usize;\n        let degree = public_params.graph.degree();\n        let arity = 2;\n\n        let challenges_count = public_params.challenges_count;\n\n        let replica_nodes = vec![None; challenges_count];\n        let replica_nodes_paths =\n            vec![vec![(vec![None; arity - 1], None); depth - 1]; challenges_count];\n\n        let replica_root = Root::Val(None);\n        let replica_parents = vec![vec![None; degree]; challenges_count];\n        let replica_parents_paths =\n            vec![vec![vec![(vec![None; arity - 1], None); depth - 1]; degree]; challenges_count];\n        let data_nodes = vec![None; challenges_count];\n        let data_nodes_paths =\n            vec![vec![(vec![None; arity - 1], None); depth - 1]; challenges_count];\n        let data_root = Root::Val(None);\n\n        DrgPoRepCircuit {\n            replica_nodes,\n            replica_nodes_paths,\n            replica_root,\n            replica_parents,\n            replica_parents_paths,\n            data_nodes,\n            data_nodes_paths,\n            data_root,\n            replica_id: None,\n            private: public_params.private,\n            _h: Default::default(),\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use ff::Field;\n    use merkletree::store::StoreConfig;\n    use pretty_assertions::assert_eq;\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::{\n        cache_key::CacheKey,\n        compound_proof,\n        drgraph::{BucketGraph, BASE_DEGREE},\n        fr32::fr_into_bytes,\n        gadgets::{MetricCS, TestConstraintSystem},\n        hasher::{Hasher, PedersenHasher, PoseidonHasher},\n        merkle::{BinaryMerkleTree, MerkleTreeTrait},\n        proof::NoRequirements,\n        test_helper::setup_replica,\n        util::default_rows_to_discard,\n    };\n\n    use crate::stacked::BINARY_ARITY;\n    use crate::{drg, PoRep};\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn test_drgporep_compound_pedersen() {\n        drgporep_test_compound::<BinaryMerkleTree<PedersenHasher>>();\n    }\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn test_drgporep_compound_poseidon() {\n        drgporep_test_compound::<BinaryMerkleTree<PoseidonHasher>>();\n    }\n\n    fn drgporep_test_compound<Tree: 'static + MerkleTreeTrait>() {\n        // femme::pretty::Logger::new()\n        //     .start(log::LevelFilter::Trace)\n        //     .ok();\n\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let nodes = 8;\n        let degree = BASE_DEGREE;\n        let challenges = vec![1, 3];\n\n        let replica_id: Fr = Fr::random(rng);\n        let data: Vec<u8> = (0..nodes)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n\n        // MT for original data is always named tree-d, and it will be\n        // referenced later in the process as such.\n        let cache_dir = tempfile::tempdir().unwrap();\n        let config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            default_rows_to_discard(nodes, BINARY_ARITY),\n        );\n\n        // Generate a replica path.\n        let replica_path = cache_dir.path().join(\"replica-path\");\n        let mut mmapped_data = setup_replica(&data, &replica_path);\n\n        let setup_params = compound_proof::SetupParams {\n            vanilla_params: drg::SetupParams {\n                drg: drg::DrgParams {\n                    nodes,\n                    degree,\n                    expansion_degree: 0,\n                    porep_id: [32; 32],\n                },\n                private: false,\n                challenges_count: 2,\n            },\n            partitions: None,\n            priority: false,\n        };\n\n        let public_params =\n            DrgPoRepCompound::<Tree::Hasher, BucketGraph<Tree::Hasher>>::setup(&setup_params)\n                .expect(\"setup failed\");\n\n        let data_tree: Option<BinaryMerkleTree<Tree::Hasher>> = None;\n        let (tau, aux) = drg::DrgPoRep::<Tree::Hasher, BucketGraph<_>>::replicate(\n            &public_params.vanilla_params,\n            &replica_id.into(),\n            (mmapped_data.as_mut()).into(),\n            data_tree,\n            config,\n            replica_path.clone(),\n        )\n        .expect(\"failed to replicate\");\n\n        let public_inputs = drg::PublicInputs::<<Tree::Hasher as Hasher>::Domain> {\n            replica_id: Some(replica_id.into()),\n            challenges,\n            tau: Some(tau),\n        };\n        let private_inputs = drg::PrivateInputs {\n            tree_d: &aux.tree_d,\n            tree_r: &aux.tree_r,\n            tree_r_config_rows_to_discard: default_rows_to_discard(nodes, BINARY_ARITY),\n        };\n\n        // This duplication is necessary so public_params don't outlive public_inputs and private_inputs.\n        let setup_params = compound_proof::SetupParams {\n            vanilla_params: drg::SetupParams {\n                drg: drg::DrgParams {\n                    nodes,\n                    degree,\n                    expansion_degree: 0,\n                    porep_id: [32; 32],\n                },\n                private: false,\n                challenges_count: 2,\n            },\n            partitions: None,\n            priority: false,\n        };\n\n        let public_params =\n            DrgPoRepCompound::<Tree::Hasher, BucketGraph<Tree::Hasher>>::setup(&setup_params)\n                .expect(\"setup failed\");\n\n        {\n            let (circuit, inputs) = DrgPoRepCompound::<Tree::Hasher, _>::circuit_for_test(\n                &public_params,\n                &public_inputs,\n                &private_inputs,\n            )\n            .unwrap();\n\n            let mut cs = TestConstraintSystem::new();\n\n            circuit\n                .synthesize(&mut cs)\n                .expect(\"failed to synthesize test circuit\");\n            assert!(cs.is_satisfied());\n            assert!(cs.verify(&inputs));\n\n            let blank_circuit = <DrgPoRepCompound<_, _> as CompoundProof<_, _>>::blank_circuit(\n                &public_params.vanilla_params,\n            );\n\n            let mut cs_blank = MetricCS::new();\n            blank_circuit\n                .synthesize(&mut cs_blank)\n                .expect(\"failed to synthesize blank circuit\");\n\n            let a = cs_blank.pretty_print_list();\n            let b = cs.pretty_print_list();\n\n            for (i, (a, b)) in a.chunks(100).zip(b.chunks(100)).enumerate() {\n                assert_eq!(a, b, \"failed at chunk {}\", i);\n            }\n        }\n\n        {\n            let gparams = DrgPoRepCompound::<Tree::Hasher, _>::groth_params(\n                Some(rng),\n                &public_params.vanilla_params,\n            )\n            .expect(\"failed to get groth params\");\n\n            let proof = DrgPoRepCompound::<Tree::Hasher, _>::prove(\n                &public_params,\n                &public_inputs,\n                &private_inputs,\n                &gparams,\n            )\n            .expect(\"failed while proving\");\n\n            let verified = DrgPoRepCompound::<Tree::Hasher, _>::verify(\n                &public_params,\n                &public_inputs,\n                &proof,\n                &NoRequirements,\n            )\n            .expect(\"failed while verifying\");\n\n            assert!(verified);\n        }\n\n        cache_dir.close().expect(\"Failed to remove cache dir\");\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/drg/mod.rs",
    "content": "mod circuit;\nmod compound;\nmod vanilla;\n\npub use self::circuit::*;\npub use self::compound::*;\npub use self::vanilla::*;\n"
  },
  {
    "path": "storage-proofs/porep/src/drg/vanilla.rs",
    "content": "use std::marker::PhantomData;\nuse std::path::PathBuf;\n\nuse anyhow::{ensure, Context};\nuse generic_array::typenum;\nuse merkletree::store::{ReplicaConfig, StoreConfig};\nuse rayon::prelude::*;\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\n\nuse storage_proofs_core::{\n    drgraph::Graph,\n    error::Result,\n    fr32::bytes_into_fr_repr_safe,\n    hasher::{Domain, HashFunction, Hasher, PoseidonArity},\n    merkle::{\n        create_base_lcmerkle_tree, create_base_merkle_tree, BinaryLCMerkleTree, BinaryMerkleTree,\n        LCMerkleTree, MerkleProof, MerkleProofTrait, MerkleTreeTrait,\n    },\n    parameter_cache::ParameterSetMetadata,\n    proof::{NoRequirements, ProofScheme},\n    util::{data_at_node, data_at_node_offset, NODE_SIZE},\n    Data,\n};\n\nuse crate::{encode, PoRep};\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize)]\npub struct Tau<T> {\n    pub comm_r: T,\n    pub comm_d: T,\n}\n\nimpl<T: Domain> Tau<T> {\n    pub fn new(comm_d: T, comm_r: T) -> Self {\n        Tau { comm_d, comm_r }\n    }\n}\n\n#[derive(Debug)]\npub struct ProverAux<H: Hasher> {\n    pub tree_d: BinaryMerkleTree<H>,\n    pub tree_r: BinaryLCMerkleTree<H>,\n}\n\nimpl<H: Hasher> ProverAux<H> {\n    pub fn new(tree_d: BinaryMerkleTree<H>, tree_r: BinaryLCMerkleTree<H>) -> Self {\n        ProverAux { tree_d, tree_r }\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct PublicInputs<T: Domain> {\n    pub replica_id: Option<T>,\n    pub challenges: Vec<usize>,\n    pub tau: Option<Tau<T>>,\n}\n\n#[derive(Debug)]\npub struct PrivateInputs<'a, H: 'a + Hasher> {\n    pub tree_d: &'a BinaryMerkleTree<H>,\n    pub tree_r: &'a BinaryLCMerkleTree<H>,\n    pub tree_r_config_rows_to_discard: usize,\n}\n\n#[derive(Clone, Debug)]\npub struct SetupParams {\n    pub drg: DrgParams,\n    pub private: bool,\n    pub challenges_count: usize,\n}\n\n#[derive(Debug, Clone)]\npub struct DrgParams {\n    // Number of nodes\n    pub nodes: usize,\n\n    // Base degree of DRG\n    pub degree: usize,\n\n    pub expansion_degree: usize,\n\n    pub porep_id: [u8; 32],\n}\n\n#[derive(Debug, Clone)]\npub struct PublicParams<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata,\n{\n    pub graph: G,\n    pub private: bool,\n    pub challenges_count: usize,\n\n    _h: PhantomData<H>,\n}\n\nimpl<H, G> PublicParams<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata,\n{\n    pub fn new(graph: G, private: bool, challenges_count: usize) -> Self {\n        PublicParams {\n            graph,\n            private,\n            challenges_count,\n            _h: PhantomData,\n        }\n    }\n}\n\nimpl<H, G> ParameterSetMetadata for PublicParams<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata,\n{\n    fn identifier(&self) -> String {\n        format!(\n            \"drgporep::PublicParams{{graph: {}}}\",\n            self.graph.identifier(),\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        self.graph.sector_size()\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct DataProof<H: Hasher, U: PoseidonArity> {\n    #[serde(bound(\n        serialize = \"MerkleProof<H, U>: Serialize\",\n        deserialize = \"MerkleProof<H, U>: Deserialize<'de>\"\n    ))]\n    pub proof: MerkleProof<H, U>,\n    pub data: H::Domain,\n}\n\nimpl<H: Hasher, U: 'static + PoseidonArity> DataProof<H, U> {\n    pub fn new(n: usize) -> Self {\n        DataProof {\n            proof: MerkleProof::new(n),\n            data: Default::default(),\n        }\n    }\n\n    /// proves_challenge returns true if this self.proof corresponds to challenge.\n    /// This is useful for verifying that a supplied proof is actually relevant to a given challenge.\n    pub fn proves_challenge(&self, challenge: usize) -> bool {\n        self.proof.proves_challenge(challenge)\n    }\n}\n\npub type ReplicaParents<H> = Vec<(u32, DataProof<H, typenum::U2>)>;\n\n#[derive(Default, Debug, Clone, Serialize, Deserialize)]\npub struct Proof<H: Hasher> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    pub data_root: H::Domain,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    pub replica_root: H::Domain,\n    #[serde(bound(\n        serialize = \"DataProof<H, typenum::U2>: Serialize\",\n        deserialize = \"DataProof<H, typenum::U2>: Deserialize<'de>\"\n    ))]\n    pub replica_nodes: Vec<DataProof<H, typenum::U2>>,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    pub replica_parents: Vec<ReplicaParents<H>>,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    pub nodes: Vec<DataProof<H, typenum::U2>>,\n}\n\nimpl<H: Hasher> Proof<H> {\n    pub fn new_empty(height: usize, degree: usize, challenges: usize) -> Proof<H> {\n        Proof {\n            data_root: Default::default(),\n            replica_root: Default::default(),\n            replica_nodes: vec![DataProof::new(height); challenges],\n            replica_parents: vec![vec![(0, DataProof::new(height)); degree]; challenges],\n            nodes: vec![DataProof::new(height); challenges],\n        }\n    }\n\n    pub fn new(\n        replica_nodes: Vec<DataProof<H, typenum::U2>>,\n        replica_parents: Vec<ReplicaParents<H>>,\n        nodes: Vec<DataProof<H, typenum::U2>>,\n    ) -> Proof<H> {\n        Proof {\n            data_root: nodes[0].proof.root(),\n            replica_root: replica_nodes[0].proof.root(),\n            replica_nodes,\n            replica_parents,\n            nodes,\n        }\n    }\n}\n\nimpl<'a, H: Hasher> From<&'a Proof<H>> for Proof<H> {\n    fn from(p: &Proof<H>) -> Proof<H> {\n        Proof {\n            data_root: p.nodes[0].proof.root(),\n            replica_root: p.replica_nodes[0].proof.root(),\n            replica_nodes: p.replica_nodes.clone(),\n            replica_parents: p.replica_parents.clone(),\n            nodes: p.nodes.clone(),\n        }\n    }\n}\n\n#[derive(Default)]\npub struct DrgPoRep<'a, H, G>\nwhere\n    H: 'a + Hasher,\n    G: 'a + Graph<H>,\n{\n    _h: PhantomData<&'a H>,\n    _g: PhantomData<G>,\n}\n\nimpl<'a, H, G> ProofScheme<'a> for DrgPoRep<'a, H, G>\nwhere\n    H: 'static + Hasher,\n    G: 'a + Graph<H> + ParameterSetMetadata,\n{\n    type PublicParams = PublicParams<H, G>;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<<H as Hasher>::Domain>;\n    type PrivateInputs = PrivateInputs<'a, H>;\n    type Proof = Proof<H>;\n    type Requirements = NoRequirements;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        let graph = G::new(\n            sp.drg.nodes,\n            sp.drg.degree,\n            sp.drg.expansion_degree,\n            sp.drg.porep_id,\n        )?;\n\n        Ok(PublicParams::new(graph, sp.private, sp.challenges_count))\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        let len = pub_inputs.challenges.len();\n        ensure!(\n            len <= pub_params.challenges_count,\n            \"too many challenges {} > {}\",\n            len,\n            pub_params.challenges_count\n        );\n\n        let mut replica_nodes = Vec::with_capacity(len);\n        let mut replica_parents = Vec::with_capacity(len);\n        let mut data_nodes: Vec<DataProof<H, typenum::U2>> = Vec::with_capacity(len);\n\n        for i in 0..len {\n            let challenge = pub_inputs.challenges[i] % pub_params.graph.size();\n            ensure!(challenge != 0, \"cannot prove the first node\");\n\n            let tree_d = &priv_inputs.tree_d;\n            let tree_r = &priv_inputs.tree_r;\n            let tree_r_config_rows_to_discard = priv_inputs.tree_r_config_rows_to_discard;\n\n            let data = tree_r.read_at(challenge)?;\n            let tree_proof =\n                tree_r.gen_cached_proof(challenge, Some(tree_r_config_rows_to_discard))?;\n            replica_nodes.push(DataProof {\n                proof: tree_proof,\n                data,\n            });\n\n            let mut parents = vec![0; pub_params.graph.degree()];\n            pub_params.graph.parents(challenge, &mut parents)?;\n            let mut replica_parentsi = Vec::with_capacity(parents.len());\n\n            for p in &parents {\n                replica_parentsi.push((*p, {\n                    let proof = tree_r\n                        .gen_cached_proof(*p as usize, Some(tree_r_config_rows_to_discard))?;\n                    DataProof {\n                        proof,\n                        data: tree_r.read_at(*p as usize)?,\n                    }\n                }));\n            }\n\n            replica_parents.push(replica_parentsi);\n\n            let node_proof = tree_d.gen_proof(challenge)?;\n\n            {\n                // TODO: use this again, I can't make lifetimes work though atm and I do not know why\n                // let extracted = Self::extract(\n                //     pub_params,\n                //     &pub_inputs.replica_id.into_bytes(),\n                //     &replica,\n                //     challenge,\n                // )?;\n\n                let extracted = decode_domain_block::<H>(\n                    &pub_inputs.replica_id.context(\"missing replica_id\")?,\n                    tree_r,\n                    challenge,\n                    tree_r.read_at(challenge)?,\n                    &parents,\n                )?;\n                data_nodes.push(DataProof {\n                    data: extracted,\n                    proof: node_proof,\n                });\n            }\n        }\n\n        let proof = Proof::new(replica_nodes, replica_parents, data_nodes);\n\n        Ok(proof)\n    }\n\n    fn verify(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        proof: &Self::Proof,\n    ) -> Result<bool> {\n        let mut hasher = Sha256::new();\n\n        for i in 0..pub_inputs.challenges.len() {\n            {\n                // This was verify_proof_meta.\n                if pub_inputs.challenges[i] >= pub_params.graph.size() {\n                    return Ok(false);\n                }\n\n                if !(proof.nodes[i].proves_challenge(pub_inputs.challenges[i])) {\n                    return Ok(false);\n                }\n\n                if !(proof.replica_nodes[i].proves_challenge(pub_inputs.challenges[i])) {\n                    return Ok(false);\n                }\n\n                let mut expected_parents = vec![0; pub_params.graph.degree()];\n                pub_params\n                    .graph\n                    .parents(pub_inputs.challenges[i], &mut expected_parents)?;\n                if proof.replica_parents[i].len() != expected_parents.len() {\n                    println!(\n                        \"proof parents were not the same length as in public parameters: {} != {}\",\n                        proof.replica_parents[i].len(),\n                        expected_parents.len()\n                    );\n                    return Ok(false);\n                }\n\n                let parents_as_expected = proof.replica_parents[i]\n                    .iter()\n                    .zip(&expected_parents)\n                    .all(|(actual, expected)| actual.0 == *expected);\n\n                if !parents_as_expected {\n                    println!(\"proof parents were not those provided in public parameters\");\n                    return Ok(false);\n                }\n            }\n\n            let challenge = pub_inputs.challenges[i] % pub_params.graph.size();\n            ensure!(challenge != 0, \"cannot prove the first node\");\n\n            if !proof.replica_nodes[i].proof.validate(challenge) {\n                return Ok(false);\n            }\n\n            for (parent_node, p) in &proof.replica_parents[i] {\n                if !p.proof.validate(*parent_node as usize) {\n                    return Ok(false);\n                }\n            }\n\n            let key = {\n                let prover_bytes = pub_inputs.replica_id.context(\"missing replica_id\")?;\n                hasher.input(AsRef::<[u8]>::as_ref(&prover_bytes));\n\n                for p in proof.replica_parents[i].iter() {\n                    hasher.input(AsRef::<[u8]>::as_ref(&p.1.data));\n                }\n\n                let hash = hasher.result_reset();\n                bytes_into_fr_repr_safe(hash.as_ref()).into()\n            };\n\n            let unsealed = encode::decode(key, proof.replica_nodes[i].data);\n\n            if unsealed != proof.nodes[i].data {\n                return Ok(false);\n            }\n\n            if !proof.nodes[i].proof.validate_data(unsealed) {\n                println!(\"invalid data for merkle path {:?}\", unsealed);\n                return Ok(false);\n            }\n        }\n\n        Ok(true)\n    }\n}\n\nimpl<'a, H, G> PoRep<'a, H, H> for DrgPoRep<'a, H, G>\nwhere\n    H: 'static + Hasher,\n    G::Key: AsRef<<H as Hasher>::Domain>,\n    G: 'a + Graph<H> + ParameterSetMetadata + Sync + Send,\n{\n    type Tau = Tau<<H as Hasher>::Domain>;\n    type ProverAux = ProverAux<H>;\n\n    fn replicate(\n        pp: &Self::PublicParams,\n        replica_id: &<H as Hasher>::Domain,\n        mut data: Data<'a>,\n        data_tree: Option<BinaryMerkleTree<H>>,\n        config: StoreConfig,\n        replica_path: PathBuf,\n    ) -> Result<(Self::Tau, Self::ProverAux)> {\n        use storage_proofs_core::cache_key::CacheKey;\n\n        let tree_d = match data_tree {\n            Some(tree) => tree,\n            None => create_base_merkle_tree::<BinaryMerkleTree<H>>(\n                Some(config.clone()),\n                pp.graph.size(),\n                data.as_ref(),\n            )?,\n        };\n\n        let graph = &pp.graph;\n        // encode(&pp.graph, replica_id, data, None)?;\n        // Because a node always follows all of its parents in the data,\n        // the nodes are by definition already topologically sorted.\n        // Therefore, if we simply traverse the data in order, encoding each node in place,\n        // we can always get each parent's encodings with a simple lookup --\n        // since we will already have encoded the parent earlier in the traversal.\n\n        let mut parents = vec![0; graph.degree()];\n        for node in 0..graph.size() {\n            graph.parents(node, &mut parents)?;\n            let key = graph.create_key(replica_id, node, &parents, data.as_ref(), None)?;\n            let start = data_at_node_offset(node);\n            let end = start + NODE_SIZE;\n\n            let node_data = <H as Hasher>::Domain::try_from_bytes(&data.as_ref()[start..end])?;\n            let encoded = H::sloth_encode(key.as_ref(), &node_data)?;\n\n            encoded.write_bytes(&mut data.as_mut()[start..end])?;\n        }\n\n        let replica_config = ReplicaConfig {\n            path: replica_path,\n            offsets: vec![0],\n        };\n        let tree_r_last_config =\n            StoreConfig::from_config(&config, CacheKey::CommRLastTree.to_string(), None);\n        let tree_r =\n            create_base_lcmerkle_tree::<H, <BinaryLCMerkleTree<H> as MerkleTreeTrait>::Arity>(\n                tree_r_last_config,\n                pp.graph.size(),\n                &data.as_ref(),\n                &replica_config,\n            )?;\n\n        let comm_d = tree_d.root();\n        let comm_r = tree_r.root();\n\n        Ok((Tau::new(comm_d, comm_r), ProverAux::new(tree_d, tree_r)))\n    }\n\n    fn extract_all<'b>(\n        pp: &'b Self::PublicParams,\n        replica_id: &'b <H as Hasher>::Domain,\n        data: &'b [u8],\n        _config: Option<StoreConfig>,\n    ) -> Result<Vec<u8>> {\n        decode(&pp.graph, replica_id, data, None)\n    }\n\n    fn extract(\n        pp: &Self::PublicParams,\n        replica_id: &<H as Hasher>::Domain,\n        data: &[u8],\n        node: usize,\n        _config: Option<StoreConfig>,\n    ) -> Result<Vec<u8>> {\n        Ok(decode_block(&pp.graph, replica_id, data, None, node)?.into_bytes())\n    }\n}\n\npub fn decode<'a, H, G>(\n    graph: &'a G,\n    replica_id: &'a <H as Hasher>::Domain,\n    data: &'a [u8],\n    exp_parents_data: Option<&'a [u8]>,\n) -> Result<Vec<u8>>\nwhere\n    H: Hasher,\n    G::Key: AsRef<H::Domain>,\n    G: Graph<H> + Sync,\n{\n    // TODO: proper error handling\n    let result = (0..graph.size())\n        .into_par_iter()\n        .flat_map(|i| {\n            decode_block::<H, G>(graph, replica_id, data, exp_parents_data, i)\n                .unwrap()\n                .into_bytes()\n        })\n        .collect();\n\n    Ok(result)\n}\n\npub fn decode_block<'a, H, G>(\n    graph: &'a G,\n    replica_id: &'a <H as Hasher>::Domain,\n    data: &'a [u8],\n    exp_parents_data: Option<&'a [u8]>,\n    v: usize,\n) -> Result<<H as Hasher>::Domain>\nwhere\n    H: Hasher,\n    G::Key: AsRef<H::Domain>,\n    G: Graph<H>,\n{\n    let mut parents = vec![0; graph.degree()];\n    graph.parents(v, &mut parents)?;\n    let key = graph.create_key(replica_id, v, &parents, &data, exp_parents_data)?;\n    let node_data = <H as Hasher>::Domain::try_from_bytes(&data_at_node(data, v)?)?;\n\n    Ok(encode::decode(*key.as_ref(), node_data))\n}\n\npub fn decode_domain_block<H: Hasher>(\n    replica_id: &H::Domain,\n    tree: &BinaryLCMerkleTree<H>,\n    node: usize,\n    node_data: H::Domain,\n    parents: &[u32],\n) -> Result<H::Domain>\nwhere\n    H: Hasher,\n{\n    let key = create_key_from_tree::<H, _>(replica_id, node, parents, tree)?;\n\n    Ok(encode::decode(key, node_data))\n}\n\n/// Creates the encoding key from a `MerkleTree`.\n/// The algorithm for that is `Blake2s(id | encodedParentNode1 | encodedParentNode1 | ...)`.\n/// It is only public so that it can be used for benchmarking\npub fn create_key_from_tree<H: Hasher, U: 'static + PoseidonArity>(\n    id: &H::Domain,\n    node: usize,\n    parents: &[u32],\n    tree: &LCMerkleTree<H, U>,\n) -> Result<H::Domain> {\n    let mut hasher = Sha256::new();\n    hasher.input(AsRef::<[u8]>::as_ref(&id));\n\n    // The hash is about the parents, hence skip if a node doesn't have any parents\n    if node != parents[0] as usize {\n        let mut scratch: [u8; NODE_SIZE] = [0; NODE_SIZE];\n        for parent in parents.iter() {\n            tree.read_into(*parent as usize, &mut scratch)?;\n            hasher.input(&scratch);\n        }\n    }\n\n    let hash = hasher.result();\n    Ok(bytes_into_fr_repr_safe(hash.as_ref()).into())\n}\n\npub fn replica_id<H: Hasher>(prover_id: [u8; 32], sector_id: [u8; 32]) -> H::Domain {\n    let mut to_hash = [0; 64];\n    to_hash[..32].copy_from_slice(&prover_id);\n    to_hash[32..].copy_from_slice(&sector_id);\n\n    H::Function::hash_leaf(&to_hash)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use ff::Field;\n    use paired::bls12_381::Fr;\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::{\n        cache_key::CacheKey,\n        drgraph::{BucketGraph, BASE_DEGREE},\n        fr32::fr_into_bytes,\n        hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher},\n        merkle::{BinaryMerkleTree, MerkleTreeTrait},\n        table_tests,\n        test_helper::setup_replica,\n        util::{data_at_node, default_rows_to_discard},\n    };\n    use tempfile;\n\n    use crate::stacked::BINARY_ARITY;\n\n    fn test_extract_all<Tree: MerkleTreeTrait>() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let replica_id: <Tree::Hasher as Hasher>::Domain =\n            <Tree::Hasher as Hasher>::Domain::random(rng);\n        let nodes = 4;\n        let data = vec![2u8; 32 * nodes];\n\n        // MT for original data is always named tree-d, and it will be\n        // referenced later in the process as such.\n        let cache_dir = tempfile::tempdir().unwrap();\n        let config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            default_rows_to_discard(nodes, BINARY_ARITY),\n        );\n\n        // Generate a replica path.\n        let replica_path = cache_dir.path().join(\"replica-path\");\n        let mut mmapped_data = setup_replica(&data, &replica_path);\n\n        let sp = SetupParams {\n            drg: DrgParams {\n                nodes,\n                degree: BASE_DEGREE,\n                expansion_degree: 0,\n                porep_id: [32; 32],\n            },\n            private: false,\n            challenges_count: 1,\n        };\n\n        let pp: PublicParams<Tree::Hasher, BucketGraph<Tree::Hasher>> =\n            DrgPoRep::setup(&sp).expect(\"setup failed\");\n\n        DrgPoRep::replicate(\n            &pp,\n            &replica_id,\n            (mmapped_data.as_mut()).into(),\n            None,\n            config.clone(),\n            replica_path.clone(),\n        )\n        .expect(\"replication failed\");\n\n        let mut copied = vec![0; data.len()];\n        copied.copy_from_slice(&mmapped_data);\n        assert_ne!(data, copied, \"replication did not change data\");\n\n        let decoded_data = DrgPoRep::<Tree::Hasher, _>::extract_all(\n            &pp,\n            &replica_id,\n            mmapped_data.as_mut(),\n            Some(config.clone()),\n        )\n        .unwrap_or_else(|e| {\n            panic!(\"Failed to extract data from `DrgPoRep`: {}\", e);\n        });\n\n        assert_eq!(data, decoded_data.as_slice(), \"failed to extract data\");\n\n        cache_dir.close().expect(\"Failed to remove cache dir\");\n    }\n\n    #[test]\n    fn extract_all_pedersen() {\n        test_extract_all::<BinaryMerkleTree<PedersenHasher>>();\n    }\n\n    #[test]\n    fn extract_all_sha256() {\n        test_extract_all::<BinaryMerkleTree<Sha256Hasher>>();\n    }\n\n    #[test]\n    fn extract_all_blake2s() {\n        test_extract_all::<BinaryMerkleTree<Blake2sHasher>>();\n    }\n\n    fn test_extract<Tree: MerkleTreeTrait>() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let replica_id: <Tree::Hasher as Hasher>::Domain =\n            <Tree::Hasher as Hasher>::Domain::random(rng);\n        let nodes = 4;\n        let data = vec![2u8; 32 * nodes];\n\n        // MT for original data is always named tree-d, and it will be\n        // referenced later in the process as such.\n        let cache_dir = tempfile::tempdir().unwrap();\n        let config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            default_rows_to_discard(nodes, BINARY_ARITY),\n        );\n\n        // Generate a replica path.\n        let replica_path = cache_dir.path().join(\"replica-path\");\n        let mut mmapped_data = setup_replica(&data, &replica_path);\n\n        let sp = SetupParams {\n            drg: DrgParams {\n                nodes: data.len() / 32,\n                degree: BASE_DEGREE,\n                expansion_degree: 0,\n                porep_id: [32; 32],\n            },\n            private: false,\n            challenges_count: 1,\n        };\n\n        let pp =\n            DrgPoRep::<Tree::Hasher, BucketGraph<Tree::Hasher>>::setup(&sp).expect(\"setup failed\");\n\n        DrgPoRep::replicate(\n            &pp,\n            &replica_id,\n            (mmapped_data.as_mut()).into(),\n            None,\n            config.clone(),\n            replica_path.clone(),\n        )\n        .expect(\"replication failed\");\n\n        let mut copied = vec![0; data.len()];\n        copied.copy_from_slice(&mmapped_data);\n        assert_ne!(data, copied, \"replication did not change data\");\n\n        for i in 0..nodes {\n            let decoded_data =\n                DrgPoRep::extract(&pp, &replica_id, &mmapped_data, i, Some(config.clone()))\n                    .expect(\"failed to extract node data from PoRep\");\n\n            let original_data = data_at_node(&data, i).unwrap();\n\n            assert_eq!(\n                original_data,\n                decoded_data.as_slice(),\n                \"failed to extract data\"\n            );\n        }\n    }\n\n    #[test]\n    fn extract_pedersen() {\n        test_extract::<BinaryMerkleTree<PedersenHasher>>();\n    }\n\n    #[test]\n    fn extract_sha256() {\n        test_extract::<BinaryMerkleTree<Sha256Hasher>>();\n    }\n\n    #[test]\n    fn extract_blake2s() {\n        test_extract::<BinaryMerkleTree<Blake2sHasher>>();\n    }\n\n    fn prove_verify_aux<Tree: MerkleTreeTrait>(\n        nodes: usize,\n        i: usize,\n        use_wrong_challenge: bool,\n        use_wrong_parents: bool,\n    ) {\n        assert!(i < nodes);\n\n        // The loop is here in case we need to retry because of an edge case in the test design.\n        loop {\n            let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n            let degree = BASE_DEGREE;\n            let expansion_degree = 0;\n\n            let replica_id: <Tree::Hasher as Hasher>::Domain =\n                <Tree::Hasher as Hasher>::Domain::random(rng);\n            let data: Vec<u8> = (0..nodes)\n                .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n                .collect();\n\n            // MT for original data is always named tree-d, and it will be\n            // referenced later in the process as such.\n            let cache_dir = tempfile::tempdir().unwrap();\n            let config = StoreConfig::new(\n                cache_dir.path(),\n                CacheKey::CommDTree.to_string(),\n                default_rows_to_discard(nodes, BINARY_ARITY),\n            );\n\n            // Generate a replica path.\n            let replica_path = cache_dir.path().join(\"replica-path\");\n            let mut mmapped_data = setup_replica(&data, &replica_path);\n\n            let challenge = i;\n\n            let sp = SetupParams {\n                drg: DrgParams {\n                    nodes,\n                    degree,\n                    expansion_degree,\n                    porep_id: [32; 32],\n                },\n                private: false,\n                challenges_count: 2,\n            };\n\n            let pp = DrgPoRep::<Tree::Hasher, BucketGraph<_>>::setup(&sp).expect(\"setup failed\");\n\n            let (tau, aux) = DrgPoRep::<Tree::Hasher, _>::replicate(\n                &pp,\n                &replica_id,\n                (mmapped_data.as_mut()).into(),\n                None,\n                config,\n                replica_path.clone(),\n            )\n            .expect(\"replication failed\");\n\n            let mut copied = vec![0; data.len()];\n            copied.copy_from_slice(&mmapped_data);\n            assert_ne!(data, copied, \"replication did not change data\");\n\n            let pub_inputs = PublicInputs::<<Tree::Hasher as Hasher>::Domain> {\n                replica_id: Some(replica_id),\n                challenges: vec![challenge, challenge],\n                tau: Some(tau.clone().into()),\n            };\n\n            let priv_inputs = PrivateInputs::<Tree::Hasher> {\n                tree_d: &aux.tree_d,\n                tree_r: &aux.tree_r,\n                tree_r_config_rows_to_discard: default_rows_to_discard(nodes, BINARY_ARITY),\n            };\n\n            let real_proof = DrgPoRep::<Tree::Hasher, _>::prove(&pp, &pub_inputs, &priv_inputs)\n                .expect(\"proving failed\");\n\n            if use_wrong_parents {\n                // Only one 'wrong' option will be tested at a time.\n                assert!(!use_wrong_challenge);\n                let real_parents = real_proof.replica_parents;\n\n                // Parent vector claiming the wrong parents.\n                let fake_parents = vec![real_parents[0]\n                    .iter()\n                    // Incrementing each parent node will give us a different parent set.\n                    // It's fine to be out of range, since this only needs to fail.\n                    .map(|(i, data_proof)| (i + 1, data_proof.clone()))\n                    .collect::<Vec<_>>()];\n\n                let proof = Proof::new(\n                    real_proof.replica_nodes.clone(),\n                    fake_parents,\n                    real_proof.nodes.clone().into(),\n                );\n\n                let is_valid =\n                    DrgPoRep::verify(&pp, &pub_inputs, &proof).expect(\"verification failed\");\n\n                assert!(!is_valid, \"verified in error -- with wrong parents\");\n\n                let mut all_same = true;\n                for (p, _) in &real_parents[0] {\n                    if *p != real_parents[0][0].0 {\n                        all_same = false;\n                    }\n                }\n\n                if all_same {\n                    println!(\"invalid test data can't scramble proofs with all same parents.\");\n\n                    // If for some reason, we hit this condition because of the data passed in,\n                    // try again.\n                    continue;\n                }\n\n                // Parent vector claiming the right parents but providing valid proofs for different\n                // parents.\n                let fake_proof_parents = vec![real_parents[0]\n                    .iter()\n                    .enumerate()\n                    .map(|(i, (p, _))| {\n                        // Rotate the real parent proofs.\n                        let x = (i + 1) % real_parents[0].len();\n                        let j = real_parents[0][x].0;\n                        (*p, real_parents[0][j as usize].1.clone())\n                    })\n                    .collect::<Vec<_>>()];\n\n                let proof2 = Proof::new(\n                    real_proof.replica_nodes,\n                    fake_proof_parents,\n                    real_proof.nodes.into(),\n                );\n\n                assert!(\n                    !DrgPoRep::<Tree::Hasher, _>::verify(&pp, &pub_inputs, &proof2).unwrap_or_else(\n                        |e| {\n                            panic!(\"Verification failed: {}\", e);\n                        }\n                    ),\n                    \"verified in error -- with wrong parent proofs\"\n                );\n\n                return ();\n            }\n\n            let proof = real_proof;\n\n            if use_wrong_challenge {\n                let pub_inputs_with_wrong_challenge_for_proof =\n                    PublicInputs::<<Tree::Hasher as Hasher>::Domain> {\n                        replica_id: Some(replica_id),\n                        challenges: vec![if challenge == 1 { 2 } else { 1 }],\n                        tau: Some(tau.into()),\n                    };\n                let verified = DrgPoRep::<Tree::Hasher, _>::verify(\n                    &pp,\n                    &pub_inputs_with_wrong_challenge_for_proof,\n                    &proof,\n                )\n                .expect(\"Verification failed\");\n                assert!(\n                    !verified,\n                    \"wrongly verified proof which does not match challenge in public input\"\n                );\n            } else {\n                assert!(\n                    DrgPoRep::<Tree::Hasher, _>::verify(&pp, &pub_inputs, &proof)\n                        .expect(\"verification failed\"),\n                    \"failed to verify\"\n                );\n            }\n\n            cache_dir.close().expect(\"Failed to remove cache dir\");\n\n            // Normally, just run once.\n            break;\n        }\n    }\n\n    fn prove_verify(n: usize, i: usize) {\n        prove_verify_aux::<BinaryMerkleTree<PedersenHasher>>(n, i, false, false);\n        prove_verify_aux::<BinaryMerkleTree<Sha256Hasher>>(n, i, false, false);\n        prove_verify_aux::<BinaryMerkleTree<Blake2sHasher>>(n, i, false, false);\n    }\n\n    fn prove_verify_wrong_challenge(n: usize, i: usize) {\n        prove_verify_aux::<BinaryMerkleTree<PedersenHasher>>(n, i, true, false);\n        prove_verify_aux::<BinaryMerkleTree<Sha256Hasher>>(n, i, true, false);\n        prove_verify_aux::<BinaryMerkleTree<Blake2sHasher>>(n, i, true, false);\n    }\n\n    fn prove_verify_wrong_parents(n: usize, i: usize) {\n        prove_verify_aux::<BinaryMerkleTree<PedersenHasher>>(n, i, false, true);\n        prove_verify_aux::<BinaryMerkleTree<Sha256Hasher>>(n, i, false, true);\n        prove_verify_aux::<BinaryMerkleTree<Blake2sHasher>>(n, i, false, true);\n    }\n\n    table_tests! {\n        prove_verify {\n            prove_verify_32_16_1(16, 1);\n\n            prove_verify_32_64_1(64, 1);\n            prove_verify_32_64_2(64, 2);\n\n            prove_verify_32_256_1(256, 1);\n            prove_verify_32_256_2(256, 2);\n            prove_verify_32_256_3(256, 3);\n            prove_verify_32_256_4(256, 4);\n            prove_verify_32_256_5(256, 5);\n        }\n    }\n\n    #[test]\n    fn test_drgporep_verifies_using_challenge() {\n        prove_verify_wrong_challenge(8, 1);\n    }\n\n    #[test]\n    fn test_drgporep_verifies_parents() {\n        // Challenge a node (3) that doesn't have all the same parents.\n        prove_verify_wrong_parents(8, 5);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/encode.rs",
    "content": "use ff::Field;\nuse paired::bls12_381::Fr;\nuse storage_proofs_core::hasher::Domain;\n\npub fn encode<T: Domain>(key: T, value: T) -> T {\n    let mut result: Fr = value.into();\n    let key: Fr = key.into();\n\n    result.add_assign(&key);\n    result.into()\n}\n\npub fn decode<T: Domain>(key: T, value: T) -> T {\n    let mut result: Fr = value.into();\n    let key: Fr = key.into();\n\n    result.sub_assign(&key);\n    result.into()\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/lib.rs",
    "content": "#[macro_use]\nmod macros;\n\npub mod drg;\npub mod nse;\npub mod stacked;\n\nmod encode;\n\nuse std::path::PathBuf;\n\nuse merkletree::store::StoreConfig;\nuse storage_proofs_core::{\n    error::Result, hasher::Hasher, merkle::BinaryMerkleTree, proof::ProofScheme, Data,\n};\n\npub trait PoRep<'a, H: Hasher, G: Hasher>: ProofScheme<'a> {\n    type Tau;\n    type ProverAux;\n\n    fn replicate(\n        pub_params: &'a Self::PublicParams,\n        replica_id: &H::Domain,\n        data: Data<'a>,\n        data_tree: Option<BinaryMerkleTree<G>>,\n        config: StoreConfig,\n        replica_path: PathBuf,\n    ) -> Result<(Self::Tau, Self::ProverAux)>;\n\n    fn extract_all(\n        pub_params: &'a Self::PublicParams,\n        replica_id: &H::Domain,\n        replica: &[u8],\n        config: Option<StoreConfig>,\n    ) -> Result<Vec<u8>>;\n\n    fn extract(\n        pub_params: &'a Self::PublicParams,\n        replica_id: &H::Domain,\n        replica: &[u8],\n        node: usize,\n        config: Option<StoreConfig>,\n    ) -> Result<Vec<u8>>;\n}\n\n#[cfg(test)]\npub(crate) const TEST_SEED: [u8; 16] = [\n    0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, 0xe5,\n];\n"
  },
  {
    "path": "storage-proofs/porep/src/macros.rs",
    "content": "/// Checks that the two passed values are equal. If they are not equal it prints a debug and returns `false`.\nmacro_rules! check_eq {\n    ($left:expr , $right:expr,) => ({\n        check_eq!($left, $right)\n    });\n    ($left:expr , $right:expr) => ({\n        match (&($left), &($right)) {\n            (left_val, right_val) => {\n                if !(*left_val == *right_val) {\n                    log::debug!(\"check failed: `(left == right)`\\\n                          \\n\\\n                          \\n{}\\\n                          \\n\",\n                           pretty_assertions::Comparison::new(left_val, right_val));\n                    return false;\n                }\n            }\n        }\n    });\n    ($left:expr , $right:expr, $($arg:tt)*) => ({\n        match (&($left), &($right)) {\n            (left_val, right_val) => {\n                if !(*left_val == *right_val) {\n                    log::debug!(\"check failed: `(left == right)`: {}\\\n                          \\n\\\n                          \\n{}\\\n                          \\n\",\n                           format_args!($($arg)*),\n                           pretty_assertions::Comparison::new(left_val, right_val));\n                    return false;\n                }\n            }\n        }\n    });\n}\n\n/// Checks that the passed in value is true. If they are not equal it prints a debug and returns `false`.\nmacro_rules! check {\n    ($val:expr) => {\n        if !$val {\n            log::debug!(\"expected {:?} to be true\", dbg!($val));\n            return false;\n        }\n    };\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/nse/mod.rs",
    "content": "mod vanilla;\n\npub use self::vanilla::*;\n"
  },
  {
    "path": "storage-proofs/porep/src/nse/vanilla/batch_hasher.rs",
    "content": "//! Implementation of batched hashing using Sha256.\n\nuse byteorder::{ByteOrder, LittleEndian};\nuse ff::PrimeField;\nuse itertools::Itertools;\nuse paired::bls12_381::{Fr, FrRepr};\nuse sha2raw::Sha256;\nuse storage_proofs_core::{hasher::Domain, util::NODE_SIZE};\n\nuse super::Parent;\n\n/// Hashes the provided, non expanded, parents.\n///\n/// The provided data must be such that the parents expanded by `k` can not overreach\n/// and alread bit padded, such that each 32 byte chunk is a valid Fr.\npub fn batch_hash(\n    k: usize,\n    degree: usize,\n    mut hasher: Sha256,\n    parents: &[Parent],\n    data: &[u8],\n) -> [u8; 32] {\n    assert!(parents.len() % 2 == 0, \"number of parents must be even\");\n    assert_eq!(parents.len(), degree * k, \"invalid number of parents\");\n    let modulus = Fr::char();\n\n    for (i, j) in (0..degree).tuples() {\n        let k = k as u32;\n\n        let (el1, el2) = (0..k).fold(\n            (FrRepr::from(0), FrRepr::from(0)),\n            |(mut el1, mut el2), l| {\n                let y1 = i + (l as usize * degree as usize);\n                let parent1 = parents[y1 as usize];\n                let current1 = read_at(data, parent1 as usize);\n                let y2 = j + (l as usize * degree as usize);\n                let parent2 = parents[y2 as usize];\n                let current2 = read_at(data, parent2 as usize);\n\n                add_assign(&mut el1, &current1, &modulus);\n                add_assign(&mut el2, &current2, &modulus);\n\n                (el1, el2)\n            },\n        );\n\n        // hash two 32 byte chunks at once\n        hasher.input(&[&fr_repr_as_bytes(&el1), &fr_repr_as_bytes(&el2)]);\n    }\n\n    let mut hash = hasher.finish();\n    truncate_hash(&mut hash);\n\n    hash\n}\n\n/// Hashes the provided, expanded, parents.\npub fn batch_hash_expanded<D: Domain>(\n    k: usize,\n    degree: usize,\n    mut hasher: Sha256,\n    parents_data: &[D],\n) -> [u8; 32] {\n    assert!(\n        parents_data.len() % 2 == 0,\n        \"number of parents must be even\"\n    );\n    assert_eq!(parents_data.len(), degree * k, \"invalid number of parents\");\n    let modulus = Fr::char();\n\n    for (i, j) in (0..degree).tuples() {\n        let mut el1 = FrRepr::from(0);\n        let mut el2 = FrRepr::from(0);\n        let k = k as u32;\n\n        for l in 0..k {\n            let y1 = i + (l as usize * degree as usize);\n            let current1 = parents_data[y1].into_repr();\n            add_assign(&mut el1, &current1, &modulus);\n\n            let y2 = j + (l as usize * degree as usize);\n            let current2 = parents_data[y2].into_repr();\n            add_assign(&mut el2, &current2, &modulus);\n        }\n\n        // hash two 32 byte chunks at once\n        hasher.input(&[&fr_repr_as_bytes(&el1), &fr_repr_as_bytes(&el2)]);\n    }\n\n    let mut hash = hasher.finish();\n    truncate_hash(&mut hash);\n\n    hash\n}\n\n/// Truncate a given 32 byte hash, into a valid Fr bytes representation.\n#[inline]\npub fn truncate_hash(hash: &mut [u8]) {\n    assert_eq!(hash.len(), 32);\n    // strip last two bits, to ensure result is in Fr.\n    hash[31] &= 0b0011_1111;\n}\n\n/// Read an `FrRepr` at the given index.\n#[inline]\nfn read_at(data: &[u8], index: usize) -> FrRepr {\n    let slice = &data[index * NODE_SIZE..(index + 1) * NODE_SIZE];\n    fr_repr_from_slice(slice)\n}\n\n/// Reads the first 32 bytes from the given slice and\n/// processes them as `FrRepr`. Does not validate that\n/// they are valid FrReprs.\n#[inline]\nfn fr_repr_from_slice(r: &[u8]) -> FrRepr {\n    let repr = [\n        LittleEndian::read_u64(&r[..8]),\n        LittleEndian::read_u64(&r[8..16]),\n        LittleEndian::read_u64(&r[16..24]),\n        LittleEndian::read_u64(&r[24..]),\n    ];\n    FrRepr(repr)\n}\n\n/// Adds two `FrRepr`.\n/// This avoids converting to Montgomery form, which is only needed to do multiplications, and\n/// happens when converting into and from an `Fr`.\n#[inline]\n#[cfg(not(target_arch = \"x86_64\"))]\nfn add_assign(a: &mut FrRepr, b: &FrRepr, modulus: &FrRepr) {\n    a.add_nocarry(b);\n\n    // check if we need to reduce by the modulus\n    if !(&a < modulus) {\n        a.sub_noborrow(&Fr::char());\n    }\n}\n\n/// Adds two `FrRepr`.\n/// This avoids converting to Montgomery form, which is only needed to do multiplications, and\n/// happens when converting into and from an `Fr`.\n#[inline]\n#[cfg(target_arch = \"x86_64\")]\nfn add_assign(a: &mut FrRepr, b: &FrRepr, modulus: &FrRepr) {\n    use std::arch::x86_64::*;\n\n    unsafe {\n        let mut carry = _addcarry_u64(0, a.0[0], b.0[0], &mut a.0[0]);\n        carry = _addcarry_u64(carry, a.0[1], b.0[1], &mut a.0[1]);\n        carry = _addcarry_u64(carry, a.0[2], b.0[2], &mut a.0[2]);\n        _addcarry_u64(carry, a.0[3], b.0[3], &mut a.0[3]);\n\n        let mut s_sub = [0u64; 4];\n\n        carry = _subborrow_u64(0, a.0[0], modulus.0[0], &mut s_sub[0]);\n        carry = _subborrow_u64(carry, a.0[1], modulus.0[1], &mut s_sub[1]);\n        carry = _subborrow_u64(carry, a.0[2], modulus.0[2], &mut s_sub[2]);\n        carry = _subborrow_u64(carry, a.0[3], modulus.0[3], &mut s_sub[3]);\n\n        if carry == 0 {\n            a.0 = s_sub;\n        }\n    }\n}\n\n#[inline(always)]\nfn fr_repr_as_bytes(a: &FrRepr) -> [u8; 32] {\n    let mut res = [0u8; 32];\n\n    res[..8].copy_from_slice(&a.0[0].to_le_bytes()[..]);\n    res[8..16].copy_from_slice(&a.0[1].to_le_bytes()[..]);\n    res[16..24].copy_from_slice(&a.0[2].to_le_bytes()[..]);\n    res[24..].copy_from_slice(&a.0[3].to_le_bytes()[..]);\n\n    res\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use ff::Field;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::fr32::{bytes_into_fr, fr_into_bytes};\n\n    #[test]\n    fn test_read_at() {\n        let data = [0u8; 64];\n\n        let v0 = read_at(&data, 0);\n        assert_eq!(v0, FrRepr::from(0));\n        let v1 = read_at(&data, 1);\n        assert_eq!(v1, FrRepr::from(0));\n    }\n\n    #[test]\n    fn test_truncate_hash() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for _ in 0..1000 {\n            // random bytes\n            let mut input: [u8; 32] = rng.gen();\n            truncate_hash(&mut input);\n\n            // check for valid Fr\n            bytes_into_fr(&input).expect(\"invalid fr created\");\n        }\n    }\n\n    #[test]\n    fn test_add_assign() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n        let modulus = Fr::char();\n        for _ in 0..1000 {\n            let mut a = Fr::random(rng);\n            let b = Fr::random(rng);\n\n            let mut a_repr = a.clone().into_repr();\n            let b_repr = b.clone().into_repr();\n\n            add_assign(&mut a_repr, &b_repr, &modulus);\n            a.add_assign(&b);\n\n            let a_back = Fr::from_repr(a_repr).unwrap();\n            assert_eq!(a, a_back);\n        }\n    }\n\n    #[test]\n    fn test_fr_repr_from_slice() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for _ in 0..1000 {\n            let a = Fr::random(rng);\n            let a_repr = a.clone().into_repr();\n            let slice = fr_into_bytes(&a);\n\n            let a_repr_back = fr_repr_from_slice(&slice);\n            assert_eq!(a_repr, a_repr_back);\n        }\n    }\n\n    #[test]\n    fn test_fr_repr_as_bytes() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for _ in 0..1000 {\n            let a = Fr::random(rng);\n            let a_repr = a.clone().into_repr();\n            let slice = fr_repr_as_bytes(&a_repr);\n\n            let a_repr_back = fr_repr_from_slice(&slice);\n            assert_eq!(a_repr, a_repr_back);\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/nse/vanilla/butterfly_graph.rs",
    "content": "use super::config::Config;\nuse super::Parent;\n\n/// The butterfly graph which provides the parents for the butterfly layers.\n#[derive(Debug)]\npub struct ButterflyGraph {\n    /// The degree of the graph.\n    pub degree: usize,\n    /// The number of nodes in a window. Must be a power of 2.\n    pub num_nodes_window: u32,\n    /// Total number of layers.\n    pub num_layers: u32,\n    /// Number of butterfly layers.\n    pub num_butterfly_layers: u32,\n}\n\nimpl ButterflyGraph {\n    /// Calculates the parents for the node at the given `index`, at the\n    /// given `layer`.\n    pub fn parents(&self, index: u32, layer: u32) -> ButterflyGraphParentsIter {\n        assert!(layer <= self.num_layers);\n        assert!(layer >= self.num_layers - self.num_butterfly_layers);\n        ButterflyGraphParentsIter::new(self, index, layer)\n    }\n}\n\n/// Iterator created by the [`parents`] method.\n#[derive(Debug)]\npub struct ButterflyGraphParentsIter<'a> {\n    graph: &'a ButterflyGraph,\n    /// The index of the node.\n    node: u32,\n    /// The index of the parent to yield next.\n    pos: u32,\n    /// The constant factor of `butterfly_degree ** L - l`\n    factor: u32,\n}\n\nimpl<'a> ButterflyGraphParentsIter<'a> {\n    fn new(graph: &'a ButterflyGraph, node: u32, layer: u32) -> Self {\n        let factor = graph.degree.pow(graph.num_layers - layer) as u32;\n\n        ButterflyGraphParentsIter {\n            graph,\n            node,\n            pos: 0,\n            factor,\n        }\n    }\n}\n\nimpl<'a> Iterator for ButterflyGraphParentsIter<'a> {\n    type Item = Parent;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        if self.pos >= self.graph.degree as u32 {\n            return None;\n        }\n\n        let parent_raw = self.node + self.pos * self.factor;\n        // mod N\n        let parent = parent_raw & (self.graph.num_nodes_window - 1);\n\n        self.pos += 1;\n        Some(parent)\n    }\n\n    fn size_hint(&self) -> (usize, Option<usize>) {\n        (self.graph.degree, Some(self.graph.degree))\n    }\n}\n\nimpl From<&Config> for ButterflyGraph {\n    fn from(config: &Config) -> Self {\n        assert!(config.num_nodes_window < std::u32::MAX as usize);\n        assert!(config.num_nodes_window.is_power_of_two());\n\n        let num_layers = config.num_butterfly_layers + config.num_expander_layers;\n        assert!(num_layers < std::u32::MAX as usize);\n        let num_butterfly_layers = config.num_butterfly_layers;\n        assert!(num_butterfly_layers < std::u32::MAX as usize);\n\n        Self {\n            degree: config.degree_butterfly,\n            num_nodes_window: config.num_nodes_window as u32,\n            num_layers: num_layers as u32,\n            num_butterfly_layers: num_butterfly_layers as u32,\n        }\n    }\n}\n\nimpl From<Config> for ButterflyGraph {\n    fn from(config: Config) -> Self {\n        (&config).into()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_from_config() {\n        let config = Config {\n            k: 8,\n            num_nodes_window: (4 * 1024 * 1024 * 1024) / 32,\n            degree_expander: 384,\n            degree_butterfly: 16,\n            num_expander_layers: 8,\n            num_butterfly_layers: 7,\n            sector_size: 1024 * 1024 * 1024 * 1024,\n        };\n\n        let graph: ButterflyGraph = config.into();\n\n        assert_eq!(graph.degree, 16, \"invalid degree\");\n        assert_eq!(graph.num_layers, 15, \"invalid degree\");\n    }\n\n    #[test]\n    fn test_parents() {\n        let config = Config {\n            k: 8,\n            num_nodes_window: (4 * 1024 * 1024 * 1024) / 32,\n            degree_expander: 384,\n            degree_butterfly: 16,\n            num_expander_layers: 8,\n            num_butterfly_layers: 7,\n            sector_size: 1024 * 1024 * 1024 * 1024,\n        };\n\n        let graph: ButterflyGraph = config.into();\n\n        let parents0_9: Vec<Parent> = graph.parents(0, 9).collect();\n        let parents1_9: Vec<Parent> = graph.parents(1, 9).collect();\n        let parents0_15: Vec<Parent> = graph.parents(0, 15).collect();\n\n        assert_eq!(parents0_9.len(), graph.degree);\n        assert_eq!(parents1_9.len(), graph.degree);\n        assert_eq!(parents0_15.len(), graph.degree);\n        assert_ne!(&parents0_9, &parents1_9, \"must not be equal\");\n        assert_ne!(&parents0_9, &parents0_15, \"must not be equal\");\n\n        for ((a, b), c) in parents0_9\n            .iter()\n            .zip(parents1_9.iter())\n            .zip(parents0_15.iter())\n        {\n            assert!(*a < graph.num_nodes_window);\n            assert!(*b < graph.num_nodes_window);\n            assert!(*c < graph.num_nodes_window);\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/nse/vanilla/challenges.rs",
    "content": "use num_bigint::BigUint;\nuse num_traits::cast::ToPrimitive;\nuse sha2::{Digest, Sha256};\nuse storage_proofs_core::hasher::Domain;\n\nuse super::Config;\n\n#[derive(Debug, Default)]\npub struct ChallengeRequirements {\n    pub minimum_challenges: usize,\n}\n\n/// An iterator which yields a fixed number of challenges over all windows.\n///\n/// Each challenge, challenges across all layers in the selected window.\n#[derive(Debug)]\npub struct Challenges<D: Domain> {\n    /// The number of layers.\n    num_layers: usize,\n    /// The number of windows.\n    num_windows: usize,\n    /// The number of challenges per window.\n    num_challenges_per_window: usize,\n    /// Number of nodes in a single window.\n    num_nodes_window: usize,\n    /// Replica ID, to make the challenges unique to the replica.\n    replica_id: D,\n    /// Randomness seed\n    seed: [u8; 32],\n    /// Currently challenged window index.\n    current_window: usize,\n    /// Currently challenged node index for the window.\n    current_challenge: usize,\n}\n\nimpl<D: Domain> Challenges<D> {\n    pub fn new(\n        config: &Config,\n        num_challenges_per_window: usize,\n        replica_id: &D,\n        seed: [u8; 32],\n    ) -> Self {\n        Self {\n            num_layers: config.num_layers(),\n            num_windows: config.num_windows(),\n            num_challenges_per_window,\n            num_nodes_window: config.num_nodes_window,\n            replica_id: *replica_id,\n            seed,\n            current_window: 0,\n            current_challenge: 0,\n        }\n    }\n}\n\n#[derive(Debug)]\npub struct Challenge {\n    /// Index for the challenged window.\n    pub window: usize,\n    /// Index for the challenge node.\n    pub node: usize,\n    /// Index for the challenged layer.\n    pub layer: usize,\n}\n\nimpl<D: Domain> Iterator for Challenges<D> {\n    type Item = Challenge;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        if self.current_challenge == self.num_challenges_per_window\n            && self.current_window == self.num_windows - 1\n        {\n            return None;\n        }\n\n        if self.current_challenge == self.num_challenges_per_window\n            && self.current_window <= self.num_windows\n        {\n            self.current_challenge = 0;\n            self.current_window += 1;\n        }\n\n        // Generate a challenge into any layer of the current window.\n        let range = self.num_nodes_window * self.num_layers;\n        let challenge_index = self.current_window * self.num_nodes_window + self.current_challenge;\n        let hash = Sha256::new()\n            .chain(self.replica_id.into_bytes())\n            .chain(self.seed)\n            .chain(&(challenge_index as u64).to_le_bytes())\n            .result();\n\n        let big_challenge = BigUint::from_bytes_le(hash.as_ref());\n\n        // For now, we cannot try to prove the first or last node, so make sure the challenge can never be 0.\n        let big_mod_challenge = big_challenge % (range - 1);\n        let big_mod_challenge = big_mod_challenge\n            .to_usize()\n            .expect(\"`big_mod_challenge` exceeds size of `usize`\");\n        let challenged_node = big_mod_challenge + 1;\n        let layer = challenged_node / self.num_nodes_window;\n        let node = challenged_node % self.num_nodes_window;\n\n        self.current_challenge += 1;\n\n        Some(Challenge {\n            window: self.current_window,\n            node,\n            layer: layer + 1, // layers are 1-indexed\n        })\n    }\n\n    fn size_hint(&self) -> (usize, Option<usize>) {\n        let size = self.num_windows * self.num_challenges_per_window;\n        (size, Some(size))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::hasher::{Hasher, PoseidonHasher};\n\n    #[test]\n    fn test_challenges_smoke() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let config = Config {\n            k: 8,\n            num_nodes_window: 2048 / 32,\n            degree_expander: 12,\n            degree_butterfly: 4,\n            num_expander_layers: 6,\n            num_butterfly_layers: 4,\n            sector_size: 2048 * 8,\n        };\n\n        let replica_id = <PoseidonHasher as Hasher>::Domain::random(rng);\n        let seed = rng.gen();\n        let num_challenges_per_window = 2;\n        let challenges = Challenges::new(&config, num_challenges_per_window, &replica_id, seed);\n\n        let list: Vec<_> = challenges.collect();\n        assert_eq!(list.len(), num_challenges_per_window * config.num_windows());\n\n        for (window, chunk) in list.chunks(num_challenges_per_window).enumerate() {\n            for challenge in chunk {\n                assert_eq!(challenge.window, window, \"incorrect window\");\n                assert!(challenge.layer > 0, \"layers are 1-indexed\");\n                assert!(\n                    challenge.layer <= config.num_layers(),\n                    \"layer too large: {}, {}\",\n                    challenge.layer,\n                    config.num_layers()\n                );\n                assert!(challenge.node > 1, \"cannot challenge node 0\");\n                assert!(\n                    challenge.node < config.num_nodes_window,\n                    \"challenge too large\"\n                );\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/nse/vanilla/config.rs",
    "content": "use storage_proofs_core::util::NODE_SIZE;\n\n/// The configuration parameters for NSE.\n#[derive(Debug, Clone)]\npub struct Config {\n    /// Batch hashing factor.\n    pub k: u32,\n    /// Number of nodes per window.\n    pub num_nodes_window: usize,\n    /// Degree of the expander graph.\n    pub degree_expander: usize,\n    /// Degree of the butterfly graph.\n    pub degree_butterfly: usize,\n    /// Number of expander layers.\n    pub num_expander_layers: usize,\n    /// Number of butterfly layers.\n    pub num_butterfly_layers: usize,\n    /// Sector size in bytes.\n    pub sector_size: usize,\n}\n\nimpl Config {\n    /// Total number of layers.\n    pub fn num_layers(&self) -> usize {\n        self.num_expander_layers + self.num_butterfly_layers\n    }\n\n    /// Number of bytes in a single window.\n    pub fn window_size(&self) -> usize {\n        self.num_nodes_window * NODE_SIZE\n    }\n\n    /// Number of windows.\n    pub fn num_windows(&self) -> usize {\n        self.sector_size / self.window_size()\n    }\n\n    pub fn num_nodes_sector(&self) -> usize {\n        self.sector_size / NODE_SIZE\n    }\n\n    /// Returns `true` if the passed in layer index is an expander layer, `false` otherwise.\n    pub fn is_layer_expander(&self, layer: usize) -> bool {\n        layer <= self.num_expander_layers\n    }\n\n    /// Returns `true` if the passed in layer index is the last layer, `false` otherwise.\n    pub fn is_layer_replica(&self, layer: usize) -> bool {\n        layer == self.num_layers()\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/nse/vanilla/expander_graph.rs",
    "content": "use sha2raw::Sha256;\n\nuse super::config::Config;\nuse super::Parent;\n\n/// The expander graph which provides the parents for the expander layers.\n#[derive(Debug)]\npub struct ExpanderGraph {\n    /// The number of bits required to identify a single parent.\n    pub bits: u32,\n    /// Batching hashing factor.\n    pub k: u32,\n    /// The degree of the graph.\n    pub degree: usize,\n}\n\nimpl ExpanderGraph {\n    /// Calculates the parents for the node at the given `index`.\n    pub fn parents(&self, index: u32) -> ExpanderGraphParentsIter {\n        ExpanderGraphParentsIter::new(self, index)\n    }\n\n    /// Calculates the expanded parents for the node at the given `index`.\n    pub fn expanded_parents(&self, index: u32) -> impl Iterator<Item = Parent> + '_ {\n        let k = self.k;\n        let p = self.parents(index);\n        p.flat_map(move |parent| (0..k).map(move |i| parent * k + i))\n    }\n}\n\n/// Iterator created by the [`parents`] method.\n#[derive(Debug)]\npub struct ExpanderGraphParentsIter<'a> {\n    graph: &'a ExpanderGraph,\n    /// The index of the node.\n    node: u32,\n    /// The index of the parent to yield next.\n    pos: usize,\n    /// The current index into the stream.\n    counter: u32,\n    /// Index into the hash.\n    hash_index: usize,\n    /// The current hash.\n    hash: [u8; 32],\n}\n\nimpl<'a> ExpanderGraphParentsIter<'a> {\n    fn new(graph: &'a ExpanderGraph, node: u32) -> Self {\n        assert!(graph.bits < 32 * 8, \"too many btis in the requested graph\");\n\n        let mut iter = ExpanderGraphParentsIter {\n            graph,\n            node,\n            pos: 0,\n            counter: 0,\n            hash_index: 0,\n            hash: [0u8; 32],\n        };\n        iter.update_hash();\n        iter\n    }\n\n    /// Update the current hash value, based on the current `pos`.\n    fn update_hash(&mut self) {\n        // node index - 4 bytes\n        self.hash[..4].copy_from_slice(&self.node.to_be_bytes());\n        // counter - 4 bytes\n        self.hash[4..8].copy_from_slice(&self.counter.to_be_bytes());\n        // padding 0 - 24 bytes\n        for i in 8..32 {\n            self.hash[i] = 0;\n        }\n\n        let mut hasher = Sha256::new();\n        hasher.input(&[&self.hash[..], &[0u8; 32]]);\n        self.hash = hasher.finish();\n\n        // update inner counter\n        self.counter += 1;\n        self.hash_index = 0;\n    }\n}\n\nimpl<'a> Iterator for ExpanderGraphParentsIter<'a> {\n    type Item = Parent;\n\n    fn next(&mut self) -> Option<Self::Item> {\n        if self.pos >= self.graph.degree {\n            // The iterator yields exactly degree number parents.\n            return None;\n        }\n\n        const INDEX_BYTES: usize = 4;\n        const HASH_BYTES: usize = 32;\n        let parent_bytes = (self.graph.bits as usize / 8).max(1);\n        debug_assert!(parent_bytes > 0);\n        debug_assert!(parent_bytes <= INDEX_BYTES);\n\n        // Need more bits, all in the next hash.\n        if self.hash_index == HASH_BYTES {\n            self.update_hash();\n        }\n\n        let hash_end = HASH_BYTES - self.hash_index;\n\n        let mut parent = if parent_bytes <= hash_end {\n            // Enough bits in the current hash.\n            let mut parent = [0u8; INDEX_BYTES];\n\n            parent[..parent_bytes]\n                .copy_from_slice(&self.hash[self.hash_index..self.hash_index + parent_bytes]);\n            self.hash_index += parent_bytes;\n            u32::from_le_bytes(parent)\n        } else {\n            let mut parent = [0u8; INDEX_BYTES];\n            // Copy rest from the current value.\n            debug_assert!(hash_end > 0);\n            debug_assert!(hash_end <= INDEX_BYTES);\n\n            parent[..hash_end].copy_from_slice(&self.hash[self.hash_index..]);\n\n            self.update_hash();\n\n            // Copy the second part.\n            let len = parent_bytes - hash_end;\n            parent[hash_end..parent_bytes].copy_from_slice(&self.hash[..len]);\n            self.hash_index += len;\n            u32::from_le_bytes(parent)\n        };\n\n        if self.graph.bits < 8 {\n            // Smaller than 1 byte, need to manually mod\n            parent = parent % (2u64.pow(self.graph.bits) as u32);\n        }\n\n        // The parent should already be in the correct range based on the construction.\n        debug_assert!(\n            parent <= (2u64.pow(self.graph.bits) - 1) as u32,\n            \"{} <= {}\",\n            parent,\n            (2u64.pow(self.graph.bits) - 1) as u32\n        );\n\n        self.pos += 1;\n\n        Some(parent)\n    }\n\n    fn size_hint(&self) -> (usize, Option<usize>) {\n        (self.graph.degree, Some(self.graph.degree))\n    }\n}\n\nimpl From<&Config> for ExpanderGraph {\n    fn from(config: &Config) -> Self {\n        let bits = (config.num_nodes_window as f64 / config.k as f64).log2() as u32;\n        assert!(bits > 0);\n        Self {\n            bits,\n            k: config.k,\n            degree: config.degree_expander,\n        }\n    }\n}\n\nimpl From<Config> for ExpanderGraph {\n    fn from(config: Config) -> Self {\n        (&config).into()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_from_config() {\n        let config = Config {\n            k: 8,\n            num_nodes_window: (4 * 1024 * 1024 * 1024) / 32,\n            degree_expander: 384,\n            degree_butterfly: 16,\n            num_expander_layers: 8,\n            num_butterfly_layers: 7,\n            sector_size: 1024 * 1024 * 1024 * 1024,\n        };\n\n        let egraph: ExpanderGraph = config.into();\n\n        assert_eq!(egraph.k, 8, \"invalid k\");\n        assert_eq!(egraph.degree, 384, \"invalid degree\");\n        assert_eq!(egraph.bits, 24, \"invalid bits\");\n    }\n\n    #[test]\n    fn test_parents() {\n        let graph = ExpanderGraph {\n            k: 8,\n            bits: 24,\n            degree: 384,\n        };\n\n        let parents0: Vec<Parent> = graph.parents(0).collect();\n        let parents1: Vec<Parent> = graph.parents(1).collect();\n\n        assert_eq!(parents0.len(), graph.degree);\n        assert_eq!(parents1.len(), graph.degree);\n        assert_ne!(&parents0, &parents1, \"must not be equal\");\n\n        let l = (graph.bits as usize * graph.degree) / 32;\n        let expected_parents_hash: Vec<u8> = (0..l)\n            .flat_map(|i| {\n                let mut input = [0u8; 32];\n                input[..4].copy_from_slice(&0u32.to_be_bytes());\n                input[4..8].copy_from_slice(&(i as u32).to_be_bytes());\n                Sha256::digest(&[&input[..], &[0u8; 32][..]]).to_vec()\n            })\n            .collect();\n        assert_eq!(\n            expected_parents_hash.len(),\n            graph.degree * graph.bits as usize\n        );\n\n        for (actual_parent, expected_parent_hash) in parents0\n            .into_iter()\n            .zip(expected_parents_hash.chunks(24 / 8))\n        {\n            let mut raw = [0u8; 4];\n            raw[..3].copy_from_slice(expected_parent_hash);\n            let expected_parent = u32::from_le_bytes(raw);\n            assert_eq!(actual_parent, expected_parent);\n        }\n    }\n\n    #[test]\n    fn test_expanded_parents() {\n        let graph = ExpanderGraph {\n            k: 8,\n            bits: 24,\n            degree: 384,\n        };\n\n        let parents0: Vec<Parent> = graph.expanded_parents(0).collect();\n        let parents1: Vec<Parent> = graph.expanded_parents(1).collect();\n\n        assert_eq!(parents0.len(), graph.degree * graph.k as usize);\n        assert_eq!(parents1.len(), graph.degree * graph.k as usize);\n        assert_ne!(&parents0, &parents1, \"must not be equal\");\n    }\n\n    #[test]\n    fn test_expand_flatten() {\n        let graphs = [\n            ExpanderGraph {\n                k: 8,\n                bits: 24,\n                degree: 384,\n            },\n            ExpanderGraph {\n                k: 2,\n                bits: 8,\n                degree: 12,\n            },\n        ];\n        for graph in &graphs {\n            for i in 0..graph.degree {\n                let parents: Vec<Parent> = graph.parents(i as u32).collect();\n                let exp_parents: Vec<Parent> = graph.expanded_parents(i as u32).collect();\n\n                let m = graph.degree as usize;\n                let k = graph.k as usize;\n\n                for j in 0..k {\n                    let y = i + (j * m);\n                    assert_eq!(parents[y / k] as usize * k + y % k, exp_parents[y] as usize);\n                }\n\n                assert!(\n                    !parents.iter().all(|p| p == &0),\n                    \"parents must not be all 0\"\n                );\n                assert!(\n                    !graph.expanded_parents(i as u32).all(|p| p == 0),\n                    \"exp parents must not be all 0\"\n                );\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/nse/vanilla/labels.rs",
    "content": "use anyhow::{ensure, Context, Result};\nuse generic_array::typenum::U0;\nuse itertools::Itertools;\nuse log::debug;\nuse merkletree::store::{StoreConfig, StoreConfigDataVersion};\nuse rayon::prelude::*;\nuse sha2raw::Sha256;\nuse storage_proofs_core::{\n    hasher::{Domain, Hasher},\n    merkle::{DiskTree, LCTree, MerkleTreeTrait, MerkleTreeWrapper},\n    util::NODE_SIZE,\n};\n\nuse super::{\n    batch_hasher::{batch_hash, truncate_hash},\n    butterfly_graph::ButterflyGraph,\n    expander_graph::ExpanderGraph,\n    Config,\n};\nuse crate::encode;\n\npub type LCMerkleTree<Tree> =\n    LCTree<<Tree as MerkleTreeTrait>::Hasher, <Tree as MerkleTreeTrait>::Arity, U0, U0>;\npub type MerkleTree<Tree> =\n    DiskTree<<Tree as MerkleTreeTrait>::Hasher, <Tree as MerkleTreeTrait>::Arity, U0, U0>;\n\n/// Encodes the provided data and returns the replica and a list of merkle trees for each layer.\npub fn encode_with_trees<Tree: 'static + MerkleTreeTrait>(\n    config: &Config,\n    mut store_configs: Vec<StoreConfig>,\n    window_index: u32,\n    replica_id: &<Tree::Hasher as Hasher>::Domain,\n    data: &mut [u8],\n) -> Result<(Vec<MerkleTree<Tree>>, LCMerkleTree<Tree>)> {\n    let num_layers = config.num_layers();\n    let mut trees = Vec::with_capacity(num_layers);\n\n    assert_eq!(store_configs.len(), num_layers);\n    let mut previous_layer = vec![0u8; config.window_size()];\n    let mut current_layer = vec![0u8; config.window_size()];\n\n    // 1. Construct the mask\n    debug!(\"mask layer: {}\", 1);\n    mask_layer(config, window_index, replica_id, &mut previous_layer)\n        .context(\"failed to construct the mask layer\")?;\n\n    let mask_config = store_configs.remove(0);\n\n    debug!(\"mask layer tree\");\n    let mask_tree = tree_from_slice::<Tree>(&previous_layer, mask_config)\n        .context(\"failed to construct merkle tree for the mask layer\")?;\n    trees.push(mask_tree);\n\n    // 2. Construct expander layers\n    for layer_index in 2..=(config.num_expander_layers as u32) {\n        debug!(\"expander layer: {}\", layer_index);\n        expander_layer(\n            config,\n            window_index,\n            replica_id,\n            layer_index,\n            &previous_layer,\n            &mut current_layer,\n        )\n        .context(\"failed to construct expander layer\")?;\n\n        let store_config = store_configs.remove(0);\n        debug!(\"expander layer tree\");\n        let tree = tree_from_slice::<Tree>(&current_layer, store_config)\n            .context(\"failed to construct merkle tree for expander layer\")?;\n        trees.push(tree);\n\n        // swap layers to reuse memory\n        std::mem::swap(&mut previous_layer, &mut current_layer);\n    }\n\n    // 3. Construct butterfly layers\n    for layer_index in (1 + config.num_expander_layers as u32)..(num_layers as u32) {\n        debug!(\"butterfly layer: {}\", layer_index);\n        butterfly_layer(\n            config,\n            window_index,\n            replica_id,\n            layer_index,\n            &previous_layer,\n            &mut current_layer,\n        )\n        .context(\"failed to construct butterfly layer\")?;\n\n        let store_config = store_configs.remove(0);\n        debug!(\"butterfly layer tree\");\n        let tree = tree_from_slice::<Tree>(&current_layer, store_config)\n            .context(\"failed to construct merkle tree for butterfly layer\")?;\n        trees.push(tree);\n\n        // swap layers to reuse memory\n        std::mem::swap(&mut previous_layer, &mut current_layer);\n    }\n\n    // drop current, to reduce memory usage immediately\n    drop(current_layer);\n\n    // 4. Construct butterfly encoding layer\n    let layer_index = num_layers as u32;\n\n    debug!(\"replica layer: {}\", layer_index);\n\n    butterfly_encode_layer(\n        config,\n        window_index,\n        replica_id,\n        layer_index,\n        &previous_layer,\n        data,\n    )\n    .context(\"failed to construct butterfly encoding layer\")?;\n\n    // drop previous, to reduce memory usage immediately\n    drop(previous_layer);\n\n    let store_config = store_configs.remove(0);\n    debug!(\"replica layer tree\");\n    let replica_tree = lc_tree_from_slice::<Tree>(data, store_config)\n        .context(\"failed to construct merkle tree for butterfly encoding layer\")?;\n\n    Ok((trees, replica_tree))\n}\n\n/// Decodes the provided `encoded_data`, returning the decoded data.\npub fn decode<H: Hasher>(\n    config: &Config,\n    window_index: u32,\n    replica_id: &H::Domain,\n    encoded_data: &mut [u8],\n) -> Result<()> {\n    let num_layers = config.num_layers();\n\n    let mut previous_layer = vec![0u8; config.window_size()];\n    let mut current_layer = vec![0u8; config.window_size()];\n\n    // 1. Construct the mask\n    mask_layer(config, window_index, replica_id, &mut previous_layer)\n        .context(\"failed to construct mask\")?;\n\n    // 2. Construct expander layers\n    for layer_index in 2..=(config.num_expander_layers as u32) {\n        expander_layer(\n            config,\n            window_index,\n            replica_id,\n            layer_index,\n            &previous_layer,\n            &mut current_layer,\n        )\n        .context(\"failed to construct expander layer\")?;\n\n        // swap layers to reuse memory\n        std::mem::swap(&mut previous_layer, &mut current_layer);\n    }\n\n    // 3. Construct butterfly layers\n    for layer_index in (1 + config.num_expander_layers as u32)..(num_layers as u32) {\n        butterfly_layer(\n            config,\n            window_index,\n            replica_id,\n            layer_index,\n            &previous_layer,\n            &mut current_layer,\n        )\n        .context(\"failed to construct butterfly layer\")?;\n\n        // swap layers to reuse memory\n        std::mem::swap(&mut previous_layer, &mut current_layer);\n    }\n\n    // 4. Construct butterfly encoding layer\n    {\n        let layer_index = num_layers as u32;\n\n        butterfly_decode_layer(\n            config,\n            window_index,\n            replica_id,\n            layer_index,\n            &previous_layer,\n            encoded_data,\n        )\n        .context(\"failed to construct butterfly decoding layer\")?;\n    }\n\n    Ok(())\n}\n\n/// Generate the mask layer, for one window.\nfn mask_layer<D: Domain>(\n    config: &Config,\n    window_index: u32,\n    replica_id: &D,\n    layer_out: &mut [u8],\n) -> Result<()> {\n    ensure!(\n        layer_out.len() == config.window_size(),\n        \"layer_out must be of size {}, got {}\",\n        config.window_size(),\n        layer_out.len()\n    );\n\n    // The mask layer is always layer 1.\n    const LAYER_INDEX: u32 = 1;\n\n    // Construct the mask\n    layer_out\n        .par_chunks_mut(NODE_SIZE)\n        .enumerate()\n        .for_each(|(node_index, node)| {\n            let prefix = hash_prefix(LAYER_INDEX, node_index as u32, window_index);\n            let hash = Sha256::digest(&[&prefix[..], AsRef::<[u8]>::as_ref(replica_id)]);\n            node.copy_from_slice(&hash);\n            truncate_hash(node);\n        });\n\n    Ok(())\n}\n\n/// Generate a single expander layer, for one window.\npub fn expander_layer<D: Domain>(\n    config: &Config,\n    window_index: u32,\n    replica_id: &D,\n    layer_index: u32,\n    layer_in: &[u8],\n    layer_out: &mut [u8],\n) -> Result<()> {\n    ensure!(\n        layer_in.len() == layer_out.len(),\n        \"layer_in and layer_out must of the same size\"\n    );\n    ensure!(\n        layer_out.len() == config.window_size(),\n        \"layer_out must be of size {}, got {}\",\n        config.window_size(),\n        layer_out.len()\n    );\n    ensure!(\n        layer_index > 1 && layer_index as usize <= config.num_expander_layers,\n        \"layer index must be in range (1, {}], got {}\",\n        config.num_expander_layers,\n        layer_index,\n    );\n\n    let graph: ExpanderGraph = config.into();\n\n    // Iterate over each node.\n    layer_out\n        .par_chunks_mut(NODE_SIZE)\n        .enumerate()\n        .for_each(|(node_index, node)| {\n            if node_index % (1024 * 1024) == 0 {\n                debug!(\n                    \"expander {} - {}/{}\",\n                    layer_index, node_index, config.num_nodes_window\n                );\n            }\n            let node_index = node_index as u32;\n\n            // Compute the parents for this node.\n            let parents: Vec<_> = graph.expanded_parents(node_index).collect();\n\n            let mut hasher = Sha256::new();\n\n            // Hash prefix + replica id, each 32 bytes.\n            let prefix = hash_prefix(layer_index, node_index, window_index);\n            hasher.input(&[&prefix[..], AsRef::<[u8]>::as_ref(replica_id)]);\n\n            // Compute batch hash of the parents.\n            let hash = batch_hash(\n                config.k as usize,\n                config.degree_expander,\n                hasher,\n                &parents,\n                layer_in,\n            );\n            node.copy_from_slice(&hash);\n        });\n\n    Ok(())\n}\n\n/// Generate a single butterfly layer.\npub fn butterfly_layer<D: Domain>(\n    config: &Config,\n    window_index: u32,\n    replica_id: &D,\n    layer_index: u32,\n    layer_in: &[u8],\n    layer_out: &mut [u8],\n) -> Result<()> {\n    ensure!(\n        layer_in.len() == layer_out.len(),\n        \"layer_in and layer_out must of the same size\"\n    );\n    ensure!(\n        layer_out.len() == config.window_size(),\n        \"layer_out must be of size {}, got {}\",\n        config.window_size(),\n        layer_out.len()\n    );\n    ensure!(\n        layer_index as usize > config.num_expander_layers\n            && (layer_index as usize) < config.num_expander_layers + config.num_butterfly_layers,\n        \"layer index must be in range ({}, {}), got {}\",\n        config.num_expander_layers,\n        config.num_expander_layers + config.num_butterfly_layers,\n        layer_index,\n    );\n\n    let graph: ButterflyGraph = config.into();\n\n    // Iterate over each node.\n    layer_out\n        .par_chunks_mut(NODE_SIZE)\n        .enumerate()\n        .for_each(|(node_index, node)| {\n            let node_index = node_index as u32;\n\n            let mut hasher = Sha256::new();\n\n            // Hash prefix + replica id, each 32 bytes.\n            let prefix = hash_prefix(layer_index, node_index, window_index);\n            hasher.input(&[&prefix[..], AsRef::<[u8]>::as_ref(replica_id)]);\n\n            // Compute hash of the parents.\n            for (parent_a, parent_b) in graph.parents(node_index, layer_index).tuples() {\n                let parent_a = parent_a as usize;\n                let parent_b = parent_b as usize;\n                let parent_a_value = &layer_in[parent_a * NODE_SIZE..(parent_a + 1) * NODE_SIZE];\n                let parent_b_value = &layer_in[parent_b * NODE_SIZE..(parent_b + 1) * NODE_SIZE];\n\n                hasher.input(&[parent_a_value, parent_b_value]);\n            }\n\n            let hash = hasher.finish();\n            node.copy_from_slice(&hash);\n            truncate_hash(node);\n        });\n\n    Ok(())\n}\n\n/// Generate a butterfly layer which additionally encodes using the data.\npub fn butterfly_encode_layer<D: Domain>(\n    config: &Config,\n    window_index: u32,\n    replica_id: &D,\n    layer_index: u32,\n    layer_in: &[u8],\n    data: &mut [u8],\n) -> Result<()> {\n    butterfly_encode_decode_layer(\n        config,\n        window_index,\n        replica_id,\n        layer_index,\n        layer_in,\n        data,\n        encode::encode,\n    )\n}\n\n/// Generate a butterfly layer which additionally decodes using the data.\npub fn butterfly_decode_layer<D: Domain>(\n    config: &Config,\n    window_index: u32,\n    replica_id: &D,\n    layer_index: u32,\n    layer_in: &[u8],\n    data: &mut [u8],\n) -> Result<()> {\n    butterfly_encode_decode_layer(\n        config,\n        window_index,\n        replica_id,\n        layer_index,\n        layer_in,\n        data,\n        encode::decode,\n    )\n}\n\n/// Generate a butterfly layer which additionally encodes or decodes using the data.\nfn butterfly_encode_decode_layer<D: Domain, F: Fn(D, D) -> D>(\n    config: &Config,\n    window_index: u32,\n    replica_id: &D,\n    layer_index: u32,\n    layer_in: &[u8],\n    data: &mut [u8],\n    op: F,\n) -> Result<()> {\n    ensure!(\n        layer_in.len() == data.len(),\n        \"layer_in and data must of the same size\"\n    );\n    ensure!(\n        layer_in.len() == config.window_size(),\n        \"layer_in must be of size {}, got {}\",\n        config.window_size(),\n        layer_in.len()\n    );\n    ensure!(\n        layer_index as usize == config.num_expander_layers + config.num_butterfly_layers,\n        \"encoding must be on the last layer\"\n    );\n\n    let graph: ButterflyGraph = config.into();\n\n    // Iterate over each node.\n    for (node_index, data_node) in data.chunks_mut(NODE_SIZE).enumerate() {\n        let node_index = node_index as u32;\n\n        let mut hasher = Sha256::new();\n\n        // Hash prefix + replica id, each 32 bytes.\n        let prefix = hash_prefix(layer_index, node_index, window_index);\n        hasher.input(&[&prefix[..], AsRef::<[u8]>::as_ref(replica_id)]);\n\n        // Compute hash of the parents.\n        for (parent_a, parent_b) in graph.parents(node_index, layer_index).tuples() {\n            let parent_a = parent_a as usize;\n            let parent_b = parent_b as usize;\n            let parent_a_value = &layer_in[parent_a * NODE_SIZE..(parent_a + 1) * NODE_SIZE];\n            let parent_b_value = &layer_in[parent_b * NODE_SIZE..(parent_b + 1) * NODE_SIZE];\n\n            hasher.input(&[parent_a_value, parent_b_value]);\n        }\n\n        let mut key = hasher.finish();\n        truncate_hash(&mut key);\n\n        // encode\n        let key = D::try_from_bytes(&key)?;\n        let data_node_fr = D::try_from_bytes(data_node)?;\n        let encoded_node = op(key, data_node_fr);\n\n        // write result\n        data_node.copy_from_slice(AsRef::<[u8]>::as_ref(&encoded_node));\n    }\n\n    Ok(())\n}\n\n/// Constructs the first 32 byte prefix for hashing any node.\npub fn hash_prefix(layer: u32, node_index: u32, window_index: u32) -> [u8; 32] {\n    let mut prefix = [0u8; 32];\n    // layer: 32bits\n    prefix[..4].copy_from_slice(&layer.to_be_bytes());\n    // node_index: 32bits\n    prefix[4..8].copy_from_slice(&node_index.to_be_bytes());\n    // window_index: 32bits\n    prefix[8..12].copy_from_slice(&window_index.to_be_bytes());\n    // 0 padding for the rest\n\n    prefix\n}\n\n/// Construct a tree from the given byte slice.\nfn lc_tree_from_slice<Tree: MerkleTreeTrait>(\n    data: &[u8],\n    config: StoreConfig,\n) -> Result<LCMerkleTree<Tree>> {\n    MerkleTreeWrapper::from_par_iter_with_config(\n        data.par_chunks(NODE_SIZE)\n            .map(|node| <Tree::Hasher as Hasher>::Domain::try_from_bytes(node).unwrap()),\n        config,\n    )\n}\n\n/// Construct a tree from the given byte slice.\nfn tree_from_slice<Tree: MerkleTreeTrait>(\n    data: &[u8],\n    config: StoreConfig,\n) -> Result<MerkleTree<Tree>> {\n    let mut tree = MerkleTreeWrapper::from_par_iter_with_config(\n        data.par_chunks(NODE_SIZE)\n            .map(|node| <Tree::Hasher as Hasher>::Domain::try_from_bytes(node).unwrap()),\n        config.clone(),\n    )?;\n\n    // compact the thing\n    tree.compact(config, StoreConfigDataVersion::One as u32)?;\n\n    Ok(tree)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use ff::Field;\n    use paired::bls12_381::Fr;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::{\n        cache_key::CacheKey,\n        fr32::fr_into_bytes,\n        hasher::{PoseidonDomain, PoseidonHasher, Sha256Domain},\n        merkle::{split_config, OctLCMerkleTree},\n    };\n\n    fn sample_config() -> Config {\n        Config {\n            k: 8,\n            num_nodes_window: 2048 / 32,\n            degree_expander: 12,\n            degree_butterfly: 4,\n            num_expander_layers: 6,\n            num_butterfly_layers: 4,\n            sector_size: 2048 * 8,\n        }\n    }\n\n    #[test]\n    fn test_mask_layer() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let config = sample_config();\n        let replica_id: Sha256Domain = Fr::random(rng).into();\n        let window_index = rng.gen();\n\n        let mut layer: Vec<u8> = (0..config.window_size()).map(|_| rng.gen()).collect();\n\n        mask_layer(&config, window_index, &replica_id, &mut layer).unwrap();\n\n        assert!(!layer.iter().all(|&byte| byte == 0), \"must not all be zero\");\n    }\n\n    #[test]\n    fn test_expander_layer() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let config = sample_config();\n        let replica_id: Sha256Domain = Fr::random(rng).into();\n        let window_index = rng.gen();\n        let layer_index = rng.gen_range(2, config.num_expander_layers as u32);\n\n        let layer_in: Vec<u8> = (0..config.num_nodes_window)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n        let mut layer_out = vec![0u8; config.window_size()];\n\n        expander_layer(\n            &config,\n            window_index,\n            &replica_id,\n            layer_index,\n            &layer_in,\n            &mut layer_out,\n        )\n        .unwrap();\n\n        assert!(\n            !layer_out.iter().all(|&byte| byte == 0),\n            \"must not all be zero\"\n        );\n    }\n\n    #[test]\n    fn test_butterfly_layer() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let config = sample_config();\n        let replica_id: Sha256Domain = Fr::random(rng).into();\n        let window_index = rng.gen();\n        let layer_index = rng.gen_range(\n            config.num_expander_layers,\n            config.num_expander_layers + config.num_butterfly_layers,\n        ) as u32;\n\n        let layer_in: Vec<u8> = (0..config.num_nodes_window)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n        let mut layer_out = vec![0u8; config.window_size()];\n\n        butterfly_layer(\n            &config,\n            window_index,\n            &replica_id,\n            layer_index,\n            &layer_in,\n            &mut layer_out,\n        )\n        .unwrap();\n\n        assert!(\n            !layer_out.iter().all(|&byte| byte == 0),\n            \"must not all be zero\"\n        );\n    }\n\n    #[test]\n    fn test_butterfly_encode_decode_layer() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let config = sample_config();\n        let replica_id: Sha256Domain = Fr::random(rng).into();\n        let window_index = rng.gen();\n        let layer_index = (config.num_expander_layers + config.num_butterfly_layers) as u32;\n\n        let data: Vec<u8> = (0..config.num_nodes_window)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n\n        let layer_in: Vec<u8> = (0..config.num_nodes_window)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n\n        let mut layer_out = data.clone();\n\n        butterfly_encode_layer(\n            &config,\n            window_index,\n            &replica_id,\n            layer_index,\n            &layer_in,\n            &mut layer_out,\n        )\n        .unwrap();\n\n        assert!(\n            !layer_out.iter().all(|&byte| byte == 0),\n            \"must not all be zero\"\n        );\n\n        butterfly_decode_layer(\n            &config,\n            window_index,\n            &replica_id,\n            layer_index,\n            &layer_in,\n            &mut layer_out,\n        )\n        .unwrap();\n        assert_eq!(data, layer_out, \"failed to decode\");\n    }\n\n    #[test]\n    fn test_encode_decode_layer() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let config = sample_config();\n        let replica_id: PoseidonDomain = Fr::random(rng).into();\n        let window_index = rng.gen();\n\n        let data: Vec<u8> = (0..config.num_nodes_window)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n\n        let cache_dir = tempfile::tempdir().unwrap();\n        let store_config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            StoreConfig::default_rows_to_discard(config.num_nodes_window as usize, 8),\n        );\n        let mut encoded_data = data.clone();\n\n        let store_configs = split_config(store_config.clone(), config.num_layers()).unwrap();\n\n        let (trees, _replica_tree) = encode_with_trees::<OctLCMerkleTree<PoseidonHasher>>(\n            &config,\n            store_configs,\n            window_index,\n            &replica_id,\n            &mut encoded_data,\n        )\n        .unwrap();\n        assert_eq!(\n            trees.len(),\n            config.num_expander_layers + config.num_butterfly_layers - 1\n        );\n        assert_ne!(data, encoded_data, \"failed to encode\");\n\n        decode::<PoseidonHasher>(&config, window_index, &replica_id, &mut encoded_data).unwrap();\n        assert_eq!(data, encoded_data, \"failed to decode\");\n    }\n\n    #[test]\n    fn test_hash_prefix() {\n        assert_eq!(hash_prefix(0, 0, 0), [0u8; 32]);\n        assert_eq!(\n            hash_prefix(1, 2, 3),\n            [\n                0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n                0, 0, 0, 0\n            ]\n        );\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/nse/vanilla/mod.rs",
    "content": "mod batch_hasher;\nmod butterfly_graph;\nmod challenges;\nmod config;\nmod expander_graph;\nmod labels;\nmod nse;\nmod porep;\nmod proof_scheme;\n\n/// A single parent index.\npub type Parent = u32;\n\npub use self::batch_hasher::batch_hash;\npub use self::butterfly_graph::*;\npub use self::challenges::*;\npub use self::config::Config;\npub use self::expander_graph::*;\npub use self::labels::*;\npub use self::nse::*;\n"
  },
  {
    "path": "storage-proofs/porep/src/nse/vanilla/nse.rs",
    "content": "use std::marker::PhantomData;\nuse std::path::{Path, PathBuf};\n\nuse anyhow::{Context, Result};\nuse generic_array::typenum::{Unsigned, U2};\nuse merkletree::merkle::get_merkle_tree_leafs;\nuse merkletree::store::{Store, StoreConfig};\nuse paired::bls12_381::Fr;\nuse serde::{Deserialize, Serialize};\nuse storage_proofs_core::{\n    hasher::{Domain, HashFunction, Hasher, PoseidonDomain, PoseidonFunction, PoseidonMDArity},\n    merkle::{\n        split_config, split_config_and_replica, BinaryMerkleTree, DiskStore, LCStore, MerkleProof,\n        MerkleTreeTrait, MerkleTreeWrapper,\n    },\n    parameter_cache::ParameterSetMetadata,\n};\n\nuse super::Config;\n\n/// Implementation of  Narrow Stacked Expander PoRep (NSE).\n#[derive(Debug, Default)]\npub struct NarrowStackedExpander<'a, Tree: MerkleTreeTrait, G: Hasher> {\n    _tree: PhantomData<&'a Tree>,\n    _g: PhantomData<G>,\n}\n\n#[derive(Debug, Clone)]\npub struct SetupParams {\n    pub config: Config,\n    /// Number of challengs per window.\n    pub num_challenges_window: usize,\n}\n\n#[derive(Debug)]\npub struct PublicParams<Tree> {\n    pub config: Config,\n    /// Number of challengs per window.\n    pub num_challenges_window: usize,\n    _tree: PhantomData<Tree>,\n}\n\nimpl<Tree> Clone for PublicParams<Tree> {\n    fn clone(&self) -> Self {\n        Self {\n            config: self.config.clone(),\n            num_challenges_window: self.num_challenges_window,\n            _tree: Default::default(),\n        }\n    }\n}\n\nimpl<Tree> From<SetupParams> for PublicParams<Tree> {\n    fn from(setup_params: SetupParams) -> Self {\n        Self {\n            config: setup_params.config,\n            num_challenges_window: setup_params.num_challenges_window,\n            _tree: Default::default(),\n        }\n    }\n}\n\nimpl<Tree: MerkleTreeTrait> ParameterSetMetadata for PublicParams<Tree> {\n    fn identifier(&self) -> String {\n        format!(\n            \"nse::PublicParams{{ config: {:?}, challenges/window {}, tree: {} }}\",\n            self.config,\n            self.num_challenges_window,\n            Tree::display()\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        self.config.sector_size as u64\n    }\n}\n\n/// Stored along side the sector on disk.\n#[derive(Default, Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]\npub struct PersistentAux<D> {\n    /// The commitments for the individual layers.\n    pub comm_layers: Vec<D>,\n    /// The commitment of the replica.\n    pub comm_replica: D,\n}\n\n#[derive(Debug, Serialize, Deserialize)]\npub struct TemporaryAux<Tree: MerkleTreeTrait, G: Hasher> {\n    pub layer_config: StoreConfig,\n    /// Data tree config.\n    pub tree_d_config: StoreConfig,\n    _tree: PhantomData<Tree>,\n    _g: PhantomData<G>,\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> Clone for TemporaryAux<Tree, G> {\n    fn clone(&self) -> Self {\n        Self {\n            layer_config: self.layer_config.clone(),\n            tree_d_config: self.tree_d_config.clone(),\n            _tree: Default::default(),\n            _g: Default::default(),\n        }\n    }\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> TemporaryAux<Tree, G> {\n    /// Create a new TemporaryAux from the required store configs.\n    pub fn new(layer_config: StoreConfig, tree_d_config: StoreConfig) -> Self {\n        Self {\n            layer_config,\n            tree_d_config,\n            _tree: Default::default(),\n            _g: Default::default(),\n        }\n    }\n\n    // Discards all persisted merkle and layer data that is no longer required.\n    pub fn clear_temp(self) -> Result<()> {\n        let cached = |config: &StoreConfig| {\n            Path::new(&StoreConfig::data_path(&config.path, &config.id)).exists()\n        };\n\n        if cached(&self.tree_d_config) {\n            let tree_d_size = self\n                .tree_d_config\n                .size\n                .context(\"tree_d config has no size\")?;\n            let tree_d_store: DiskStore<G::Domain> =\n                DiskStore::new_from_disk(tree_d_size, U2::to_usize(), &self.tree_d_config)\n                    .context(\"tree_d\")?;\n            // Note: from_data_store requires the base tree leaf count\n            let tree_d = BinaryMerkleTree::<G>::from_data_store(\n                tree_d_store,\n                get_merkle_tree_leafs(tree_d_size, U2::to_usize())?,\n            )\n            .context(\"tree_d\")?;\n\n            tree_d.delete(self.tree_d_config).context(\"tree_d\")?;\n        }\n\n        // TODO: use split\n        // for configs in self.layer_configs.into_iter() {\n        //     for config in configs.into_iter() {\n        //         if cached(&config) {\n        //             DiskStore::<<Tree::Hasher as Hasher>::Domain>::delete(config)?;\n        //         }\n        //     }\n        // }\n\n        Ok(())\n    }\n}\n\n/// Tau.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct Tau<D: Domain, E: Domain> {\n    /// The commitment for the original data.\n    pub comm_d: E,\n    /// The commitment to the full replica.\n    pub comm_r: D,\n}\n\n#[derive(Debug, Clone)]\npub struct PublicInputs<D: Domain, S: Domain> {\n    pub replica_id: D,\n    pub seed: [u8; 32],\n    pub tau: Option<Tau<D, S>>,\n    /// Partition index\n    pub k: Option<usize>,\n}\n\n#[derive(Debug)]\npub struct PrivateInputs<Tree: MerkleTreeTrait, G: Hasher> {\n    pub p_aux: PersistentAux<<Tree::Hasher as Hasher>::Domain>,\n    pub t_aux: TemporaryAuxCache<Tree, G>,\n}\n\n#[derive(Debug)]\npub struct TemporaryAuxCache<Tree: MerkleTreeTrait, G: Hasher> {\n    /// The merkle trees for each layer.\n    pub layers: Vec<\n        MerkleTreeWrapper<\n            Tree::Hasher,\n            LCStore<<Tree::Hasher as Hasher>::Domain>,\n            Tree::Arity,\n            Tree::SubTreeArity,\n            Tree::TopTreeArity,\n        >,\n    >,\n\n    pub tree_replica: MerkleTreeWrapper<\n        Tree::Hasher,\n        LCStore<<Tree::Hasher as Hasher>::Domain>,\n        Tree::Arity,\n        Tree::SubTreeArity,\n        Tree::TopTreeArity,\n    >,\n\n    /// The merkle tree for the original data .\n    pub tree_d: BinaryMerkleTree<G>,\n\n    // Store the 'default_rows_to_discard' value from the `StoreConfig` for later use (i.e. proof generation).\n    pub tree_rows_to_discard: usize,\n\n    pub t_aux: TemporaryAux<Tree, G>,\n\n    /// The path to the replica.\n    pub replica_path: PathBuf,\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> TemporaryAuxCache<Tree, G> {\n    pub fn new(\n        config: &Config,\n        t_aux: &TemporaryAux<Tree, G>,\n        replica_path: PathBuf,\n    ) -> Result<Self> {\n        // tree_d_size stored in the config is the base tree size\n        let tree_d_size = t_aux.tree_d_config.size.unwrap();\n        let tree_d_leafs = get_merkle_tree_leafs(tree_d_size, U2::to_usize())?;\n\n        let tree_d_store: DiskStore<G::Domain> =\n            DiskStore::new_from_disk(tree_d_size, U2::to_usize(), &t_aux.tree_d_config)\n                .context(\"tree_d_store\")?;\n        let tree_d =\n            BinaryMerkleTree::<G>::from_data_store(tree_d_store, tree_d_leafs).context(\"tree_d\")?;\n\n        // split configs for layer level\n        let store_configs = split_config(t_aux.layer_config.clone(), config.num_layers())?;\n\n        let mut layers = store_configs\n            .into_iter()\n            .enumerate()\n            .map(|(layer_index, store_config)| {\n                if layer_index < config.num_layers() - 1 {\n                    // split for window level\n                    let store_configs = split_config(store_config, config.num_windows())?;\n\n                    // create tree for this layer\n                    MerkleTreeWrapper::<\n                        Tree::Hasher,\n                        LCStore<<Tree::Hasher as Hasher>::Domain>,\n                        Tree::Arity,\n                        Tree::SubTreeArity,\n                        Tree::TopTreeArity,\n                    >::from_store_configs(\n                        config.num_nodes_window, &store_configs\n                    )\n                } else {\n                    // with replica, last layer\n                    // split for window level\n                    let (store_configs, replica_config) = split_config_and_replica(\n                        store_config,\n                        replica_path.clone(),\n                        config.num_nodes_window,\n                        config.num_windows(),\n                    )?;\n\n                    // create tree for this layer\n                    MerkleTreeWrapper::<\n                        Tree::Hasher,\n                        LCStore<<Tree::Hasher as Hasher>::Domain>,\n                        Tree::Arity,\n                        Tree::SubTreeArity,\n                        Tree::TopTreeArity,\n                    >::from_store_configs_and_replica(\n                        config.num_nodes_window,\n                        &store_configs,\n                        &replica_config,\n                    )\n                }\n            })\n            .collect::<Result<Vec<_>>>()?;\n\n        let tree_replica = layers.pop().unwrap(); // replica tree is the last one\n        let tree_rows_to_discard = t_aux.layer_config.rows_to_discard;\n\n        Ok(TemporaryAuxCache {\n            layers,\n            tree_replica,\n            tree_d,\n            tree_rows_to_discard,\n            replica_path,\n            t_aux: t_aux.clone(),\n        })\n    }\n}\n\n#[derive(Debug, Serialize, Deserialize)]\npub struct Proof<Tree: MerkleTreeTrait, G: Hasher> {\n    #[serde(bound(\n        serialize = \"MerkleProof<G, U2>: Serialize\",\n        deserialize = \"MerkleProof<G, U2>: Deserialize<'de>\"\n    ))]\n    pub data_proof: MerkleProof<G, U2>,\n    #[serde(bound(\n        serialize = \"MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>: Serialize\",\n        deserialize = \"MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>: Deserialize<'de>\"\n    ))]\n    pub layer_proof: MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n    #[serde(bound(\n        serialize = \"MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>: Serialize\",\n        deserialize = \"MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>: Deserialize<'de>\"\n    ))]\n    pub parents_proofs:\n        Vec<MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>,\n    /// The roots of the merkle tree layers, including the replica layer.\n    pub comm_layers: Vec<<Tree::Hasher as Hasher>::Domain>,\n    _tree: PhantomData<Tree>,\n    _g: PhantomData<G>,\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> Proof<Tree, G> {\n    pub fn new(\n        data_proof: MerkleProof<G, U2>,\n        layer_proof: MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n        parents_proofs: Vec<\n            MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n        >,\n        comm_layers: Vec<<Tree::Hasher as Hasher>::Domain>,\n    ) -> Self {\n        Self {\n            data_proof,\n            layer_proof,\n            parents_proofs,\n            comm_layers,\n            _tree: Default::default(),\n            _g: Default::default(),\n        }\n    }\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> Clone for Proof<Tree, G> {\n    fn clone(&self) -> Self {\n        Self {\n            data_proof: self.data_proof.clone(),\n            layer_proof: self.layer_proof.clone(),\n            parents_proofs: self.parents_proofs.clone(),\n            comm_layers: self.comm_layers.clone(),\n            _tree: Default::default(),\n            _g: Default::default(),\n        }\n    }\n}\n\n/// Calculate the comm_r hash.\npub fn hash_comm_r<D: Domain>(comm_layers: &[D], comm_replica: D) -> Fr {\n    let arity = PoseidonMDArity::to_usize();\n    let mut data: Vec<PoseidonDomain> = Vec::with_capacity(arity);\n    data.extend(comm_layers.iter().map(|v| {\n        let fr: Fr = (*v).into();\n        let d: PoseidonDomain = fr.into();\n        d\n    }));\n    let comm_replica_fr: Fr = comm_replica.into();\n    data.push(comm_replica_fr.into());\n\n    // pad for MD\n    while data.len() % arity != 0 {\n        data.push(PoseidonDomain::default());\n    }\n\n    PoseidonFunction::hash_md(&data).into()\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/nse/vanilla/porep.rs",
    "content": "use std::path::PathBuf;\n\nuse generic_array::typenum::{Unsigned, U2};\nuse merkletree::{merkle::get_merkle_tree_len, store::StoreConfig};\nuse rayon::prelude::*;\nuse storage_proofs_core::{\n    cache_key::CacheKey,\n    error::Result,\n    hasher::{Domain, Hasher},\n    merkle::{split_config, BinaryMerkleTree, MerkleTreeTrait, MerkleTreeWrapper},\n    util::NODE_SIZE,\n    Data,\n};\n\nuse super::{\n    hash_comm_r, labels, Config, NarrowStackedExpander, PersistentAux, PublicParams, Tau,\n    TemporaryAux,\n};\nuse crate::PoRep;\n\nimpl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> NarrowStackedExpander<'a, Tree, G> {\n    fn generate_data_tree_config(\n        config: &Config,\n        store_config: &StoreConfig,\n    ) -> Result<StoreConfig> {\n        let num_nodes_sector = config.num_nodes_sector();\n        let mut data_tree_config = StoreConfig::from_config(\n            &store_config,\n            CacheKey::CommDTree.to_string(),\n            Some(get_merkle_tree_len(num_nodes_sector, U2::to_usize())?),\n        );\n        data_tree_config.rows_to_discard =\n            StoreConfig::default_rows_to_discard(num_nodes_sector, U2::to_usize());\n\n        Ok(data_tree_config)\n    }\n\n    /// Construct the merkle tree for the orginal data, for the whole sector.\n    fn build_data_tree(data: &[u8], data_tree_config: StoreConfig) -> Result<BinaryMerkleTree<G>> {\n        BinaryMerkleTree::from_par_iter_with_config(\n            data.par_chunks(NODE_SIZE)\n                .map(|chunk| G::Domain::try_from_bytes(chunk).expect(\"invalid data\")),\n            data_tree_config,\n        )\n    }\n\n    fn generate_store_configs(\n        config: &Config,\n        store_config: &StoreConfig,\n    ) -> Result<(Vec<Vec<StoreConfig>>, StoreConfig)> {\n        let mut layer_store_config = StoreConfig::from_config(\n            &store_config,\n            CacheKey::LabelLayer.to_string(),\n            Some(get_merkle_tree_len(\n                config.num_nodes_window as usize,\n                Tree::Arity::to_usize(),\n            )?),\n        );\n        // Ensure the right levels are set for the config.\n        layer_store_config.rows_to_discard =\n            StoreConfig::default_rows_to_discard(config.num_nodes_window, Tree::Arity::to_usize());\n\n        let layered_store_configs = split_config(layer_store_config.clone(), config.num_layers())?;\n\n        let mut windowed_store_configs =\n            vec![Vec::with_capacity(config.num_layers()); config.num_windows()];\n\n        for store_config in layered_store_configs.into_iter() {\n            let configs = split_config(store_config, config.num_windows())?;\n            for (window_index, store_config) in configs.into_iter().enumerate() {\n                windowed_store_configs[window_index].push(store_config);\n            }\n        }\n\n        Ok((windowed_store_configs, layer_store_config))\n    }\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> PoRep<'a, Tree::Hasher, G>\n    for NarrowStackedExpander<'a, Tree, G>\n{\n    type Tau = Tau<<Tree::Hasher as Hasher>::Domain, <G as Hasher>::Domain>;\n    type ProverAux = (\n        PersistentAux<<Tree::Hasher as Hasher>::Domain>,\n        TemporaryAux<Tree, G>,\n    );\n\n    fn replicate(\n        pp: &'a PublicParams<Tree>,\n        replica_id: &<Tree::Hasher as Hasher>::Domain,\n        mut data: Data<'a>,\n        data_tree: Option<BinaryMerkleTree<G>>,\n        store_config: StoreConfig,\n        _replica_path: PathBuf,\n    ) -> Result<(Self::Tau, Self::ProverAux)> {\n        let config = &pp.config;\n        let num_nodes_sector = config.num_nodes_sector();\n\n        assert_eq!(num_nodes_sector % config.num_nodes_window, 0);\n        assert_eq!(data.len(), config.sector_size);\n        assert_eq!(config.sector_size % config.window_size(), 0);\n\n        data.ensure_data()?;\n\n        // Construct the data tree.\n        let data_tree_config = Self::generate_data_tree_config(config, &store_config)?;\n        let data_tree = match data_tree {\n            Some(tree) => tree,\n            None => Self::build_data_tree(data.as_ref(), data_tree_config.clone())?,\n        };\n\n        // Generate labeling layers and encode the data.\n\n        let (windowed_store_configs, layer_store_config) =\n            Self::generate_store_configs(config, &store_config)?;\n\n        let trees = data\n            .as_mut()\n            .par_chunks_mut(config.window_size())\n            .enumerate()\n            .zip(windowed_store_configs.into_par_iter())\n            .map(|((window_index, window_data), store_configs)| {\n                let (trees, replica_tree) = labels::encode_with_trees::<Tree>(\n                    config,\n                    store_configs,\n                    window_index as u32,\n                    replica_id,\n                    window_data,\n                )?;\n                Ok((trees, replica_tree))\n            })\n            .collect::<Result<Vec<(Vec<_>, _)>>>()?;\n\n        debug_assert_eq!(trees.len(), config.num_windows());\n        data.drop_data();\n\n        let mut layered_trees: Vec<Vec<_>> = (0..config.num_layers() - 1)\n            .map(|_| Vec::with_capacity(config.num_windows()))\n            .collect();\n        let mut replica_trees = Vec::with_capacity(config.num_windows());\n\n        for (window_trees, replica_tree) in trees.into_iter() {\n            debug_assert_eq!(window_trees.len(), config.num_layers() - 1);\n            for (layer_index, trees) in window_trees.into_iter().enumerate() {\n                layered_trees[layer_index].push(trees);\n            }\n            replica_trees.push(replica_tree);\n        }\n\n        // Build main trees for each layer\n        let mut trees_layers = Vec::new();\n        for trees in layered_trees.into_iter() {\n            debug_assert_eq!(trees.len(), config.num_windows());\n            trees_layers.push(MerkleTreeWrapper::<\n                Tree::Hasher,\n                _,\n                Tree::Arity,\n                Tree::SubTreeArity,\n                Tree::TopTreeArity,\n            >::from_trees(trees)?);\n        }\n\n        // Build main tree for the replica layer\n        let replica_tree = MerkleTreeWrapper::<\n            Tree::Hasher,\n            _,\n            Tree::Arity,\n            Tree::SubTreeArity,\n            Tree::TopTreeArity,\n        >::from_trees(replica_trees)?;\n\n        let p_aux = PersistentAux {\n            comm_layers: trees_layers.iter().map(|tree| tree.root()).collect(),\n            comm_replica: replica_tree.root(),\n        };\n\n        // Calculate comm_r\n        let comm_r = hash_comm_r(&p_aux.comm_layers, p_aux.comm_replica);\n\n        let tau = Tau::<<Tree::Hasher as Hasher>::Domain, G::Domain> {\n            comm_d: data_tree.root(),\n            comm_r: comm_r.into(),\n        };\n\n        let t_aux = TemporaryAux::new(layer_store_config, data_tree_config);\n\n        Ok((tau, (p_aux, t_aux)))\n    }\n\n    fn extract_all<'b>(\n        pp: &'b PublicParams<Tree>,\n        replica_id: &'b <Tree::Hasher as Hasher>::Domain,\n        data: &'b [u8],\n        _config: Option<StoreConfig>,\n    ) -> Result<Vec<u8>> {\n        let config = &pp.config;\n        let mut result = data.to_vec();\n\n        result\n            .par_chunks_mut(config.window_size())\n            .enumerate()\n            .try_for_each(|(window_index, window_data)| {\n                labels::decode::<Tree::Hasher>(config, window_index as u32, replica_id, window_data)\n            })?;\n\n        Ok(result)\n    }\n\n    fn extract(\n        _pp: &PublicParams<Tree>,\n        _replica_id: &<Tree::Hasher as Hasher>::Domain,\n        _data: &[u8],\n        _node: usize,\n        _config: Option<StoreConfig>,\n    ) -> Result<Vec<u8>> {\n        todo!()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use generic_array::typenum::{U0, U8};\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::{\n        hasher::{PoseidonHasher, Sha256Hasher},\n        merkle::LCTree,\n        proof::ProofScheme,\n        util::NODE_SIZE,\n    };\n\n    use super::super::{Config, SetupParams};\n\n    #[test]\n    #[ignore]\n    fn test_bench_encode() {\n        type Tree = LCTree<PoseidonHasher, U8, U8, U0>;\n        femme::start(log::LevelFilter::Debug).ok();\n\n        let sector_size = 1024 * 1024 * 1024 * 4;\n        let num_windows = 1;\n\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n        let replica_id = <PoseidonHasher as Hasher>::Domain::random(rng);\n        let config = Config {\n            k: 8,\n            num_nodes_window: (sector_size / num_windows) / NODE_SIZE,\n            degree_expander: 384,\n            degree_butterfly: 16,\n            num_expander_layers: 8,\n            num_butterfly_layers: 7,\n            sector_size,\n        };\n        assert_eq!(config.num_windows(), num_windows);\n\n        let mut data: Vec<u8> = (0..config.num_nodes_sector())\n            .flat_map(|_| {\n                let v = <PoseidonHasher as Hasher>::Domain::random(rng);\n                v.into_bytes()\n            })\n            .collect();\n        assert_eq!(config.sector_size, data.len());\n\n        let sp = SetupParams {\n            config: config.clone(),\n            num_challenges_window: 2,\n        };\n\n        let pp = NarrowStackedExpander::<Tree, Sha256Hasher>::setup(&sp).expect(\"setup failed\");\n\n        // MT for original data is always named tree-d, and it will be\n        // referenced later in the process as such.\n        let cache_dir = tempfile::tempdir().unwrap();\n        let store_config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            StoreConfig::default_rows_to_discard(config.num_nodes_sector(), U2::to_usize()),\n        );\n\n        // Generate a replica path.\n        let temp_dir = tempdir::TempDir::new(\"test-extract-all\").unwrap();\n        let temp_path = temp_dir.path();\n        let replica_path = temp_path.join(\"replica-path\");\n\n        println!(\n            \"replication start: {}GiB\",\n            config.sector_size / 1024 / 1024 / 1024\n        );\n        let now = std::time::Instant::now();\n        NarrowStackedExpander::<Tree, Sha256Hasher>::replicate(\n            &pp,\n            &replica_id,\n            (&mut data[..]).into(),\n            None,\n            store_config.clone(),\n            replica_path.clone(),\n        )\n        .expect(\"replication failed\");\n\n        println!(\n            \"replicated {:02}GiB in {:04}s\",\n            config.sector_size as f64 / 1024. / 1024. / 1024.,\n            now.elapsed().as_millis() as f64 / 1000.\n        );\n    }\n\n    #[test]\n    fn test_extract_all() {\n        type Tree = LCTree<PoseidonHasher, U8, U8, U0>;\n        // femme::pretty::Logger::new()\n        //     .start(log::LevelFilter::Trace)\n        //     .ok();\n\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n        let replica_id = <PoseidonHasher as Hasher>::Domain::random(rng);\n        let config = Config {\n            k: 8,\n            num_nodes_window: 64,\n            degree_expander: 12,\n            degree_butterfly: 8,\n            num_expander_layers: 3,\n            num_butterfly_layers: 3,\n            sector_size: 64 * 32 * 8,\n        };\n\n        let data: Vec<u8> = (0..config.num_nodes_sector())\n            .flat_map(|_| {\n                let v = <PoseidonHasher as Hasher>::Domain::random(rng);\n                v.into_bytes()\n            })\n            .collect();\n\n        // create a copy, so we can compare roundtrips\n        let mut data_copy = data.clone();\n\n        let sp = SetupParams {\n            config: config.clone(),\n            num_challenges_window: 2,\n        };\n\n        let pp = NarrowStackedExpander::<Tree, Sha256Hasher>::setup(&sp).expect(\"setup failed\");\n\n        // MT for original data is always named tree-d, and it will be\n        // referenced later in the process as such.\n        let cache_dir = tempfile::tempdir().unwrap();\n        let config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            StoreConfig::default_rows_to_discard(config.num_nodes_sector(), U2::to_usize()),\n        );\n\n        // Generate a replica path.\n        let temp_dir = tempdir::TempDir::new(\"test-extract-all\").unwrap();\n        let temp_path = temp_dir.path();\n        let replica_path = temp_path.join(\"replica-path\");\n\n        NarrowStackedExpander::<Tree, Sha256Hasher>::replicate(\n            &pp,\n            &replica_id,\n            (&mut data_copy[..]).into(),\n            None,\n            config.clone(),\n            replica_path.clone(),\n        )\n        .expect(\"replication failed\");\n\n        assert_ne!(data, data_copy);\n\n        let decoded_data = NarrowStackedExpander::<Tree, Sha256Hasher>::extract_all(\n            &pp,\n            &replica_id,\n            data_copy.as_mut_slice(),\n            Some(config.clone()),\n        )\n        .expect(\"failed to extract data\");\n\n        assert_eq!(data, decoded_data);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/nse/vanilla/proof_scheme.rs",
    "content": "use anyhow::{ensure, Context};\nuse log::trace;\nuse paired::bls12_381::Fr;\nuse sha2raw::Sha256;\nuse storage_proofs_core::{\n    error::Result,\n    hasher::{Domain, Hasher},\n    merkle::{MerkleProofTrait, MerkleTreeTrait},\n    proof::ProofScheme,\n};\n\nuse super::{\n    batch_hasher::{batch_hash_expanded, truncate_hash},\n    hash_comm_r, hash_prefix, ChallengeRequirements, Challenges, NarrowStackedExpander, Parent,\n    PrivateInputs, Proof, PublicInputs, PublicParams, SetupParams,\n};\nuse crate::encode;\n\nimpl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> ProofScheme<'a>\n    for NarrowStackedExpander<'a, Tree, G>\n{\n    type PublicParams = PublicParams<Tree>;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<<Tree::Hasher as Hasher>::Domain, <G as Hasher>::Domain>;\n    type PrivateInputs = PrivateInputs<Tree, G>;\n    type Proof = Vec<Proof<Tree, G>>;\n    type Requirements = ChallengeRequirements;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        Ok(sp.clone().into())\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        let proofs = Self::prove_all_partitions(pub_params, pub_inputs, priv_inputs, 1)?;\n        let k = pub_inputs.k.unwrap_or_default();\n\n        // Because partition proofs require a common setup, the general ProofScheme implementation,\n        // which makes use of `ProofScheme::prove` cannot be used here. Instead, we need to prove all\n        // partitions in one pass, as implemented by `prove_all_partitions` below.\n        assert!(\n            k < 1,\n            \"It is a programmer error to call NarrowStackedExpander::prove with more than one partition.\"\n        );\n\n        Ok(proofs[k].to_owned())\n    }\n\n    fn prove_all_partitions<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n        partition_count: usize,\n    ) -> Result<Vec<Self::Proof>> {\n        ensure!(partition_count > 0, \"partitions must not be 0\");\n\n        let config = &pub_params.config;\n        let challenges = Challenges::new(\n            config,\n            pub_params.num_challenges_window,\n            &pub_inputs.replica_id,\n            pub_inputs.seed,\n        );\n\n        assert_eq!(\n            partition_count, 1,\n            \"multiple partitions are not implemented yet\"\n        );\n\n        let mut proofs = Vec::new();\n\n        let butterfly_parents = super::ButterflyGraph::from(config);\n        let exp_parents = super::ExpanderGraph::from(config);\n\n        for challenge in challenges {\n            // the index of the challenge is adjusted, as the trees span the whole sector, not just a single window.\n            let absolute_challenge = challenge.window * config.num_nodes_window + challenge.node;\n\n            // Data Inclusion Proof\n            let data_proof = priv_inputs\n                .t_aux\n                .tree_d\n                .gen_proof(absolute_challenge)\n                .context(\"failed to create data proof\")?;\n\n            // Layer Inclusion Proof\n            let layer_tree = if challenge.layer == config.num_layers() {\n                &priv_inputs.t_aux.tree_replica\n            } else {\n                &priv_inputs.t_aux.layers[challenge.layer - 1]\n            };\n            let rows_to_discard = priv_inputs.t_aux.tree_rows_to_discard;\n            let layer_proof = layer_tree\n                .gen_cached_proof(absolute_challenge, Some(rows_to_discard))\n                .context(\"failed to create layer proof\")?;\n\n            // Labeling Proofs\n            let parents: Vec<Parent> = if config.is_layer_expander(challenge.layer) {\n                exp_parents\n                    .expanded_parents(challenge.node as u32)\n                    .collect()\n            } else {\n                butterfly_parents\n                    .parents(challenge.node as u32, challenge.layer as u32)\n                    .collect()\n            };\n\n            let parents_proofs = if challenge.layer == 1 {\n                // no parents for layer 1\n                Vec::new()\n            } else {\n                let parents_tree = &priv_inputs.t_aux.layers[challenge.layer - 2];\n                parents\n                    .iter()\n                    .map(|parent| {\n                        // challenge is adjusted as the trees span all windows\n                        parents_tree\n                            .gen_cached_proof(\n                                challenge.window * config.num_nodes_window + *parent as usize,\n                                Some(rows_to_discard),\n                            )\n                            .context(\"failed to create parent proof\")\n                    })\n                    .collect::<Result<_>>()?\n            };\n\n            // roots for the layers\n            let mut comm_layers = priv_inputs.p_aux.comm_layers.clone();\n            comm_layers.push(priv_inputs.p_aux.comm_replica);\n\n            proofs.push(Proof::new(\n                data_proof,\n                layer_proof,\n                parents_proofs,\n                comm_layers,\n            ));\n        }\n\n        Ok(vec![proofs])\n    }\n\n    fn verify_all_partitions(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        partition_proofs: &[Self::Proof],\n    ) -> Result<bool> {\n        let config = &pub_params.config;\n\n        let butterfly_parents = super::ButterflyGraph::from(config);\n        let exp_parents = super::ExpanderGraph::from(config);\n\n        let is_valid = partition_proofs.iter().enumerate().all(|(k, proofs)| {\n            let pub_inputs = Self::with_partition(pub_inputs.clone(), Some(k));\n            let tau = pub_inputs.tau.as_ref().expect(\"missing tau\");\n\n            let challenges = Challenges::new(\n                config,\n                pub_params.num_challenges_window,\n                &pub_inputs.replica_id,\n                pub_inputs.seed,\n            );\n\n            for (proof, challenge) in proofs.iter().zip(challenges) {\n                trace!(\"verifying challenge {:?}\", challenge);\n\n                // verify comm_r\n                let last = proof.comm_layers.len() - 1;\n                let comm_r: <Tree::Hasher as Hasher>::Domain =\n                    hash_comm_r(&proof.comm_layers[..last], proof.comm_layers[last]).into();\n                check_eq!(comm_r, tau.comm_r);\n\n                // verify data inclusion\n                check!(proof.data_proof.verify());\n                check_eq!(proof.data_proof.root(), tau.comm_d);\n\n                // verify layer inclusion\n                check!(proof.layer_proof.verify());\n                check_eq!(\n                    proof.layer_proof.root(),\n                    proof.comm_layers[challenge.layer - 1]\n                );\n\n                // Verify labeling\n                for parent_proof in &proof.parents_proofs {\n                    check!(parent_proof.verify());\n                    check_eq!(parent_proof.root(), proof.comm_layers[challenge.layer - 2]);\n                }\n                let parent_indices = proof\n                    .parents_proofs\n                    .iter()\n                    .map(|p| p.path_index())\n                    .collect::<Vec<usize>>();\n\n                if challenge.layer == 1 {\n                    // no parents for the mask layer\n                    check_eq!(proof.parents_proofs.len(), 0, \"mask parents length\");\n                } else if config.is_layer_expander(challenge.layer) {\n                    check_eq!(\n                        proof.parents_proofs.len(),\n                        config.k as usize * config.degree_expander,\n                        \"expander parents length\"\n                    );\n                    check_eq!(\n                        &parent_indices,\n                        &exp_parents\n                            .expanded_parents(challenge.node as u32)\n                            .map(|p| challenge.window * config.num_nodes_window + p as usize)\n                            .collect::<Vec<_>>(),\n                        \"expander parent indices\"\n                    );\n                } else {\n                    check_eq!(\n                        proof.parents_proofs.len(),\n                        config.degree_butterfly,\n                        \"butterfly parents length\"\n                    );\n                    check_eq!(\n                        &parent_indices,\n                        &butterfly_parents\n                            .parents(challenge.node as u32, challenge.layer as u32)\n                            .map(|p| challenge.window * config.num_nodes_window + p as usize)\n                            .collect::<Vec<_>>(),\n                        \"butterfly parent indices\"\n                    );\n                }\n\n                // actual labeling\n                let data: Vec<_> = proof\n                    .parents_proofs\n                    .iter()\n                    .map(|parent_proof| parent_proof.leaf())\n                    .collect();\n                let prefix = hash_prefix(\n                    challenge.layer as u32,\n                    challenge.node as u32,\n                    challenge.window as u32,\n                );\n\n                let mut hasher = Sha256::new();\n                // Hash prefix + replica id, each 32 bytes.\n                hasher.input(&[&prefix[..], AsRef::<[u8]>::as_ref(&pub_inputs.replica_id)]);\n\n                let label = if challenge.layer == 1 {\n                    // Mask layer hashing\n                    let mut label = hasher.finish();\n                    truncate_hash(&mut label);\n                    label\n                } else if config.is_layer_expander(challenge.layer) {\n                    // Expander \"batch\" hashing\n                    batch_hash_expanded(config.k as usize, config.degree_expander, hasher, &data)\n                } else {\n                    // Butterfly hashing\n                    for chunk in data.chunks(2) {\n                        hasher.input(&[\n                            AsRef::<[u8]>::as_ref(&chunk[0]),\n                            AsRef::<[u8]>::as_ref(&chunk[1]),\n                        ]);\n                    }\n                    let mut label = hasher.finish();\n                    truncate_hash(&mut label);\n                    label\n                };\n\n                let expected_value = proof.layer_proof.leaf();\n                if config.is_layer_replica(challenge.layer) {\n                    let key = <Tree::Hasher as Hasher>::Domain::try_from_bytes(&label).unwrap();\n                    let data_node_fr: Fr = proof.data_proof.leaf().into();\n                    let data_node = data_node_fr.into();\n\n                    let encoded = encode::encode(key, data_node);\n                    check_eq!(\n                        AsRef::<[u8]>::as_ref(&encoded),\n                        AsRef::<[u8]>::as_ref(&expected_value),\n                        \"encoding check: {:?}\",\n                        challenge,\n                    );\n                } else {\n                    check_eq!(\n                        &label,\n                        AsRef::<[u8]>::as_ref(&expected_value),\n                        \"labeling check: {:?}\",\n                        challenge,\n                    );\n                }\n            }\n\n            true\n        });\n\n        Ok(is_valid)\n    }\n\n    fn with_partition(mut pub_in: Self::PublicInputs, k: Option<usize>) -> Self::PublicInputs {\n        pub_in.k = k;\n        pub_in\n    }\n\n    fn satisfies_requirements(\n        _public_params: &PublicParams<Tree>,\n        _requirements: &ChallengeRequirements,\n        _partitions: usize,\n    ) -> bool {\n        todo!()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use generic_array::typenum::{Unsigned, U0, U2, U8};\n    use merkletree::store::StoreConfig;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::cache_key::CacheKey;\n    use storage_proofs_core::{\n        hasher::{Domain, PoseidonHasher, Sha256Hasher},\n        merkle::LCTree,\n        proof::ProofScheme,\n    };\n\n    use super::super::{Config, TemporaryAuxCache};\n    use crate::PoRep;\n\n    #[test]\n    fn test_prove_verify() {\n        // femme::pretty::Logger::new()\n        //     .start(log::LevelFilter::Debug)\n        //     .ok();\n\n        type Tree = LCTree<PoseidonHasher, U8, U8, U0>;\n\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n        let replica_id = <PoseidonHasher as Hasher>::Domain::random(rng);\n        let config = Config {\n            k: 4,\n            num_nodes_window: 64,\n            degree_expander: 12,\n            degree_butterfly: 8,\n            num_expander_layers: 4,\n            num_butterfly_layers: 3,\n            sector_size: 64 * 32 * 8,\n        };\n\n        let data: Vec<u8> = (0..config.num_nodes_sector())\n            .flat_map(|_| {\n                let v = <PoseidonHasher as Hasher>::Domain::random(rng);\n                v.into_bytes()\n            })\n            .collect();\n\n        // create a copy, so we can compare roundtrips\n        let mut data_copy = data.clone();\n\n        let sp = SetupParams {\n            config: config.clone(),\n            num_challenges_window: 2,\n        };\n\n        let pp = NarrowStackedExpander::<Tree, Sha256Hasher>::setup(&sp).expect(\"setup failed\");\n\n        // MT for original data is always named tree-d, and it will be\n        // referenced later in the process as such.\n        let cache_dir = tempfile::tempdir().unwrap();\n        let store_config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            StoreConfig::default_rows_to_discard(config.num_nodes_sector(), U2::to_usize()),\n        );\n\n        // Generate a replica path.\n        let temp_dir = tempdir::TempDir::new(\"test-extract-all\").unwrap();\n        let temp_path = temp_dir.path();\n        let replica_path = temp_path.join(\"replica-path\");\n\n        let (tau, (p_aux, t_aux)) = NarrowStackedExpander::<Tree, Sha256Hasher>::replicate(\n            &pp,\n            &replica_id,\n            (&mut data_copy[..]).into(),\n            None,\n            store_config.clone(),\n            replica_path.clone(),\n        )\n        .expect(\"replication failed\");\n        assert_ne!(data, data_copy);\n\n        std::fs::write(&replica_path, &data_copy).expect(\"failed to store replica\");\n\n        let seed = rng.gen();\n\n        let pub_inputs = PublicInputs::<\n            <<Tree as MerkleTreeTrait>::Hasher as Hasher>::Domain,\n            <Sha256Hasher as Hasher>::Domain,\n        > {\n            replica_id,\n            seed,\n            tau: Some(tau),\n            k: None,\n        };\n\n        // Store a copy of the t_aux for later resource deletion.\n        let t_aux_orig = t_aux.clone();\n\n        // Convert TemporaryAux to TemporaryAuxCache, which instantiates all\n        // elements based on the configs stored in TemporaryAux.\n        let t_aux = TemporaryAuxCache::<Tree, Sha256Hasher>::new(&config, &t_aux, replica_path)\n            .expect(\"failed to restore contents of t_aux\");\n\n        let priv_inputs = PrivateInputs { p_aux, t_aux };\n        let partitions = 1;\n\n        let all_partition_proofs =\n            &NarrowStackedExpander::<Tree, Sha256Hasher>::prove_all_partitions(\n                &pp,\n                &pub_inputs,\n                &priv_inputs,\n                partitions,\n            )\n            .expect(\"failed to generate partition proofs\");\n\n        let proofs_are_valid = NarrowStackedExpander::<Tree, Sha256Hasher>::verify_all_partitions(\n            &pp,\n            &pub_inputs,\n            all_partition_proofs,\n        )\n        .expect(\"failed to verify partition proofs\");\n\n        // Discard cached MTs that are no longer needed.\n        t_aux_orig.clear_temp().expect(\"t_aux delete failed\");\n\n        assert!(proofs_are_valid);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/circuit/column.rs",
    "content": "use bellperson::gadgets::num;\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse paired::bls12_381::{Bls12, Fr};\nuse storage_proofs_core::{hasher::Hasher, merkle::MerkleTreeTrait};\n\nuse super::hash::hash_single_column;\nuse crate::stacked::{Column as VanillaColumn, PublicParams};\n\n#[derive(Debug, Clone)]\npub struct Column {\n    rows: Vec<Option<Fr>>,\n}\n\n#[derive(Clone)]\npub struct AllocatedColumn {\n    rows: Vec<num::AllocatedNum<Bls12>>,\n}\n\nimpl<H: Hasher> From<VanillaColumn<H>> for Column {\n    fn from(other: VanillaColumn<H>) -> Self {\n        let VanillaColumn { rows, .. } = other;\n\n        Column {\n            rows: rows.into_iter().map(|r| Some(r.into())).collect(),\n        }\n    }\n}\n\nimpl Column {\n    /// Create an empty `Column`, used in `blank_circuit`s.\n    pub fn empty<Tree: MerkleTreeTrait>(params: &PublicParams<Tree>) -> Self {\n        Column {\n            rows: vec![None; params.layer_challenges.layers()],\n        }\n    }\n\n    /// Consume this column, and allocate its values in the circuit.\n    pub fn alloc<CS: ConstraintSystem<Bls12>>(\n        self,\n        mut cs: CS,\n    ) -> Result<AllocatedColumn, SynthesisError> {\n        let Self { rows } = self;\n\n        let rows = rows\n            .into_iter()\n            .enumerate()\n            .map(|(i, val)| {\n                num::AllocatedNum::alloc(cs.namespace(|| format!(\"column_num_row_{}\", i)), || {\n                    val.ok_or_else(|| SynthesisError::AssignmentMissing)\n                })\n            })\n            .collect::<Result<Vec<_>, _>>()?;\n\n        Ok(AllocatedColumn { rows })\n    }\n}\n\nimpl AllocatedColumn {\n    /// Creates the column hash of this column.\n    pub fn hash<CS: ConstraintSystem<Bls12>>(\n        &self,\n        cs: CS,\n    ) -> Result<num::AllocatedNum<Bls12>, SynthesisError> {\n        hash_single_column(cs, &self.rows)\n    }\n\n    pub fn get_value(&self, layer: usize) -> &num::AllocatedNum<Bls12> {\n        assert!(layer > 0, \"layers are 1 indexed\");\n        assert!(\n            layer <= self.rows.len(),\n            \"layer {} out of range: 1..={}\",\n            layer,\n            self.rows.len()\n        );\n        &self.rows[layer - 1]\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/circuit/column_proof.rs",
    "content": "use bellperson::{ConstraintSystem, SynthesisError};\nuse paired::bls12_381::Bls12;\nuse storage_proofs_core::{\n    drgraph::Graph,\n    gadgets::por::AuthPath,\n    hasher::{Hasher, PoseidonArity},\n    merkle::{MerkleProofTrait, MerkleTreeTrait, Store},\n};\n\nuse super::column::{AllocatedColumn, Column};\nuse crate::stacked::{ColumnProof as VanillaColumnProof, PublicParams};\n\n#[derive(Debug, Clone)]\npub struct ColumnProof<\n    H: Hasher,\n    U: 'static + PoseidonArity,\n    V: 'static + PoseidonArity,\n    W: 'static + PoseidonArity,\n> {\n    column: Column,\n    inclusion_path: AuthPath<H, U, V, W>,\n}\n\nimpl<\n        H: 'static + Hasher,\n        U: 'static + PoseidonArity,\n        V: 'static + PoseidonArity,\n        W: 'static + PoseidonArity,\n    > ColumnProof<H, U, V, W>\n{\n    /// Create an empty `ColumnProof`, used in `blank_circuit`s.\n    pub fn empty<\n        S: Store<H::Domain>,\n        Tree: MerkleTreeTrait<Hasher = H, Store = S, Arity = U, SubTreeArity = V, TopTreeArity = W>,\n    >(\n        params: &PublicParams<Tree>,\n    ) -> Self {\n        ColumnProof {\n            column: Column::empty(params),\n            inclusion_path: AuthPath::blank(params.graph.size()),\n        }\n    }\n\n    /// Allocate the private inputs for this column proof, and return the inclusion path for verification.\n    pub fn alloc<CS: ConstraintSystem<Bls12>>(\n        self,\n        mut cs: CS,\n    ) -> Result<(AllocatedColumn, AuthPath<H, U, V, W>), SynthesisError> {\n        let ColumnProof {\n            inclusion_path,\n            column,\n        } = self;\n\n        let column = column.alloc(cs.namespace(|| \"column\"))?;\n\n        Ok((column, inclusion_path))\n    }\n}\n\nimpl<Proof: MerkleProofTrait> From<VanillaColumnProof<Proof>>\n    for ColumnProof<Proof::Hasher, Proof::Arity, Proof::SubTreeArity, Proof::TopTreeArity>\n{\n    fn from(vanilla_proof: VanillaColumnProof<Proof>) -> Self {\n        let VanillaColumnProof {\n            column,\n            inclusion_proof,\n        } = vanilla_proof;\n\n        ColumnProof {\n            column: column.into(),\n            inclusion_path: inclusion_proof.as_options().into(),\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/circuit/create_label.rs",
    "content": "use bellperson::gadgets::{\n    boolean::Boolean,\n    sha256::sha256 as sha256_circuit,\n    {multipack, num},\n};\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse ff::PrimeField;\nuse fil_sapling_crypto::jubjub::JubjubEngine;\nuse storage_proofs_core::gadgets::uint64;\n\nuse super::super::vanilla::TOTAL_PARENTS;\n\n/// Compute a single label.\npub fn create_label_circuit<E, CS>(\n    mut cs: CS,\n    replica_id: &[Boolean],\n    parents: Vec<Vec<Boolean>>,\n    node: uint64::UInt64,\n) -> Result<num::AllocatedNum<E>, SynthesisError>\nwhere\n    E: JubjubEngine,\n    CS: ConstraintSystem<E>,\n{\n    assert!(replica_id.len() <= 256, \"replica id is too large\");\n    assert_eq!(parents.len(), TOTAL_PARENTS, \"invalid sized parents\");\n\n    // ciphertexts will become a buffer of the layout\n    // id | node | parent_node_0 | parent_node_1 | ...\n\n    let mut ciphertexts = replica_id.to_vec();\n\n    // pad to 32 bytes\n    while ciphertexts.len() < 256 {\n        ciphertexts.push(Boolean::constant(false));\n    }\n\n    ciphertexts.extend_from_slice(&node.to_bits_be());\n    // pad to 64 bytes\n    while ciphertexts.len() < 512 {\n        ciphertexts.push(Boolean::constant(false));\n    }\n\n    for parent in parents.iter() {\n        ciphertexts.extend_from_slice(parent);\n\n        // pad such that each parents take 32 bytes\n        while ciphertexts.len() % 256 != 0 {\n            ciphertexts.push(Boolean::constant(false));\n        }\n    }\n\n    // 32b replica id\n    // 32b node\n    // 37 * 32b  = 1184b parents\n    assert_eq!(ciphertexts.len(), (1 + 1 + TOTAL_PARENTS) * 32 * 8);\n\n    // Compute Sha256\n    let alloc_bits = sha256_circuit(cs.namespace(|| \"hash\"), &ciphertexts[..])?;\n\n    // Convert the hash result into a single Fr.\n    let fr = if alloc_bits[0].get_value().is_some() {\n        let be_bits = alloc_bits\n            .iter()\n            .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing))\n            .collect::<Result<Vec<bool>, SynthesisError>>()?;\n\n        let le_bits = be_bits\n            .chunks(8)\n            .flat_map(|chunk| chunk.iter().rev())\n            .copied()\n            .take(E::Fr::CAPACITY as usize)\n            .collect::<Vec<bool>>();\n\n        Ok(multipack::compute_multipacking::<E>(&le_bits)[0])\n    } else {\n        Err(SynthesisError::AssignmentMissing)\n    };\n\n    num::AllocatedNum::<E>::alloc(cs.namespace(|| \"result_num\"), || fr)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use bellperson::gadgets::boolean::Boolean;\n    use bellperson::ConstraintSystem;\n    use ff::Field;\n    use paired::bls12_381::{Bls12, Fr};\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::{\n        drgraph::{new_seed, Graph, BASE_DEGREE},\n        fr32::{bytes_into_fr, fr_into_bytes},\n        gadgets::TestConstraintSystem,\n        hasher::Sha256Hasher,\n        util::bytes_into_boolean_vec_be,\n        util::{data_at_node, NODE_SIZE},\n    };\n\n    use crate::stacked::vanilla::{StackedBucketGraph, EXP_DEGREE, TOTAL_PARENTS};\n\n    #[test]\n    fn test_create_label() {\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let size = 64;\n\n        let graph = StackedBucketGraph::<Sha256Hasher>::new_stacked(\n            size,\n            BASE_DEGREE,\n            EXP_DEGREE,\n            new_seed(),\n        )\n        .unwrap();\n\n        let id_fr = Fr::random(rng);\n        let id: Vec<u8> = fr_into_bytes(&id_fr);\n        let node = 22;\n\n        let mut data: Vec<u8> = (0..2 * size)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n\n        let mut parents = vec![0; BASE_DEGREE + EXP_DEGREE];\n        graph.parents(node, &mut parents).unwrap();\n\n        let raw_parents_bytes: Vec<Vec<u8>> = parents\n            .iter()\n            .enumerate()\n            .map(|(i, p)| {\n                if i < BASE_DEGREE {\n                    // base\n                    data_at_node(&data[..size * NODE_SIZE], *p as usize)\n                        .unwrap()\n                        .to_vec()\n                } else {\n                    // exp\n                    data_at_node(&data[size * NODE_SIZE..], *p as usize)\n                        .unwrap()\n                        .to_vec()\n                }\n            })\n            .collect();\n\n        let mut parents_bytes = raw_parents_bytes.clone(); // 14\n        parents_bytes.extend_from_slice(&raw_parents_bytes); // 28\n        parents_bytes.extend_from_slice(&raw_parents_bytes[..9]); // 37\n\n        assert_eq!(parents_bytes.len(), TOTAL_PARENTS);\n        let parents_bits: Vec<Vec<Boolean>> = parents_bytes\n            .iter()\n            .enumerate()\n            .map(|(i, p)| {\n                let mut cs = cs.namespace(|| format!(\"parents {}\", i));\n                bytes_into_boolean_vec_be(&mut cs, Some(p), p.len()).unwrap()\n            })\n            .collect();\n\n        let id_bits: Vec<Boolean> = {\n            let mut cs = cs.namespace(|| \"id\");\n            bytes_into_boolean_vec_be(&mut cs, Some(id.as_slice()), id.len()).unwrap()\n        };\n\n        let node_alloc = uint64::UInt64::constant(node as u64);\n\n        let out = create_label_circuit(\n            cs.namespace(|| \"create_label\"),\n            &id_bits,\n            parents_bits.clone(),\n            node_alloc,\n        )\n        .expect(\"key derivation function failed\");\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n        assert_eq!(cs.num_constraints(), 532_024);\n\n        let (l1, l2) = data.split_at_mut(size * NODE_SIZE);\n        super::super::super::vanilla::create_label_exp(&graph, &id_fr.into(), &*l2, l1, node)\n            .unwrap();\n        let expected_raw = data_at_node(&l1, node).unwrap();\n        let expected = bytes_into_fr(expected_raw).unwrap();\n\n        assert_eq!(\n            expected,\n            out.get_value().unwrap(),\n            \"circuit and non circuit do not match\"\n        );\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/circuit/hash.rs",
    "content": "use bellperson::gadgets::num;\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse generic_array::typenum;\nuse neptune::circuit::poseidon_hash;\nuse paired::bls12_381::Bls12;\n\n/// Hash a list of bits.\npub fn hash_single_column<CS>(\n    cs: CS,\n    column: &[num::AllocatedNum<Bls12>],\n) -> Result<num::AllocatedNum<Bls12>, SynthesisError>\nwhere\n    CS: ConstraintSystem<Bls12>,\n{\n    match column.len() {\n        2 => poseidon_hash::<CS, Bls12, typenum::U2>(\n            cs,\n            column.to_vec(),\n            &*storage_proofs_core::hasher::types::POSEIDON_CONSTANTS_2,\n        ),\n        11 => poseidon_hash::<CS, Bls12, typenum::U11>(\n            cs,\n            column.to_vec(),\n            &*storage_proofs_core::hasher::types::POSEIDON_CONSTANTS_11,\n        ),\n        _ => panic!(\"unsupported column size: {}\", column.len()),\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use bellperson::ConstraintSystem;\n    use ff::Field;\n    use paired::bls12_381::{Bls12, Fr};\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::gadgets::TestConstraintSystem;\n    use storage_proofs_core::hasher::{HashFunction, Hasher, PedersenHasher};\n\n    use crate::stacked::vanilla::hash::hash_single_column as vanilla_hash_single_column;\n\n    #[test]\n    fn test_hash2_circuit() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for _ in 0..10 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let a = Fr::random(rng);\n            let b = Fr::random(rng);\n\n            let a_num = {\n                let mut cs = cs.namespace(|| \"a\");\n                num::AllocatedNum::alloc(&mut cs, || Ok(a)).unwrap()\n            };\n\n            let b_num = {\n                let mut cs = cs.namespace(|| \"b\");\n                num::AllocatedNum::alloc(&mut cs, || Ok(b)).unwrap()\n            };\n\n            let out = <PedersenHasher as Hasher>::Function::hash2_circuit(\n                cs.namespace(|| \"hash2\"),\n                &a_num,\n                &b_num,\n            )\n            .expect(\"hash2 function failed\");\n\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n            assert_eq!(cs.num_constraints(), 1_371);\n\n            let expected: Fr =\n                <PedersenHasher as Hasher>::Function::hash2(&a.into(), &b.into()).into();\n\n            assert_eq!(\n                expected,\n                out.get_value().unwrap(),\n                \"circuit and non circuit do not match\"\n            );\n        }\n    }\n\n    #[test]\n    fn test_hash_single_column_circuit() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        for _ in 0..1 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let vals = vec![Fr::random(rng); 11];\n            let vals_opt = vals\n                .iter()\n                .enumerate()\n                .map(|(i, v)| {\n                    num::AllocatedNum::alloc(cs.namespace(|| format!(\"num_{}\", i)), || Ok(*v))\n                        .unwrap()\n                })\n                .collect::<Vec<_>>();\n\n            let out = hash_single_column(cs.namespace(|| \"hash_single_column\"), &vals_opt)\n                .expect(\"hash_single_column function failed\");\n\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n            assert_eq!(cs.num_constraints(), 601);\n\n            let expected: Fr = vanilla_hash_single_column(&vals).into();\n\n            assert_eq!(\n                expected,\n                out.get_value().unwrap(),\n                \"circuit and non circuit do not match\"\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/circuit/mod.rs",
    "content": "mod column;\nmod column_proof;\nmod create_label;\nmod hash;\nmod params;\nmod proof;\n\npub use self::create_label::*;\npub use self::proof::{StackedCircuit, StackedCompound};\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/circuit/params.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellperson::gadgets::{boolean::Boolean, num, uint32};\nuse bellperson::{ConstraintSystem, SynthesisError};\nuse generic_array::typenum::{U0, U2};\nuse paired::bls12_381::{Bls12, Fr};\nuse storage_proofs_core::{\n    drgraph::Graph,\n    gadgets::por::{AuthPath, PoRCircuit},\n    gadgets::{encode::encode, uint64, variables::Root},\n    hasher::{Hasher, PoseidonArity},\n    merkle::{DiskStore, MerkleProofTrait, MerkleTreeTrait, MerkleTreeWrapper},\n    util::reverse_bit_numbering,\n};\n\nuse super::{\n    column_proof::ColumnProof, create_label_circuit as create_label, hash::hash_single_column,\n};\nuse crate::stacked::{\n    Proof as VanillaProof, PublicParams, ReplicaColumnProof as VanillaReplicaColumnProof,\n};\n\ntype TreeAuthPath<T> = AuthPath<\n    <T as MerkleTreeTrait>::Hasher,\n    <T as MerkleTreeTrait>::Arity,\n    <T as MerkleTreeTrait>::SubTreeArity,\n    <T as MerkleTreeTrait>::TopTreeArity,\n>;\n\ntype TreeColumnProof<T> = ColumnProof<\n    <T as MerkleTreeTrait>::Hasher,\n    <T as MerkleTreeTrait>::Arity,\n    <T as MerkleTreeTrait>::SubTreeArity,\n    <T as MerkleTreeTrait>::TopTreeArity,\n>;\n\n/// Proof for a single challenge.\n#[derive(Debug)]\npub struct Proof<Tree: MerkleTreeTrait, G: Hasher> {\n    /// Inclusion path for the challenged data node in tree D.\n    pub comm_d_path: AuthPath<G, U2, U0, U0>,\n    /// The value of the challenged data node.\n    pub data_leaf: Option<Fr>,\n    /// The index of the challenged node.\n    pub challenge: Option<u64>,\n    /// Inclusion path of the challenged replica node in tree R.\n    pub comm_r_last_path: TreeAuthPath<Tree>,\n    /// Inclusion path of the column hash of the challenged node  in tree C.\n    pub comm_c_path: TreeAuthPath<Tree>,\n    /// Column proofs for the drg parents.\n    pub drg_parents_proofs: Vec<TreeColumnProof<Tree>>,\n    /// Column proofs for the expander parents.\n    pub exp_parents_proofs: Vec<TreeColumnProof<Tree>>,\n    _t: PhantomData<Tree>,\n}\n\nimpl<Tree: MerkleTreeTrait, G: 'static + Hasher> Proof<Tree, G> {\n    /// Create an empty proof, used in `blank_circuit`s.\n    pub fn empty(params: &PublicParams<Tree>) -> Self {\n        Proof {\n            comm_d_path: AuthPath::blank(params.graph.size()),\n            data_leaf: None,\n            challenge: None,\n            comm_r_last_path: AuthPath::blank(params.graph.size()),\n            comm_c_path: AuthPath::blank(params.graph.size()),\n            drg_parents_proofs: vec![\n                ColumnProof::empty(params);\n                params.graph.base_graph().degree()\n            ],\n            exp_parents_proofs: vec![ColumnProof::empty(params); params.graph.expansion_degree()],\n            _t: PhantomData,\n        }\n    }\n\n    /// Circuit synthesis.\n    #[allow(clippy::too_many_arguments)]\n    pub fn synthesize<CS: ConstraintSystem<Bls12>>(\n        self,\n        mut cs: CS,\n        layers: usize,\n        comm_d: &num::AllocatedNum<Bls12>,\n        comm_c: &num::AllocatedNum<Bls12>,\n        comm_r_last: &num::AllocatedNum<Bls12>,\n        replica_id: &[Boolean],\n    ) -> Result<(), SynthesisError> {\n        let Proof {\n            comm_d_path,\n            data_leaf,\n            challenge,\n            comm_r_last_path,\n            comm_c_path,\n            drg_parents_proofs,\n            exp_parents_proofs,\n            ..\n        } = self;\n\n        // -- verify initial data layer\n\n        // PrivateInput: data_leaf\n        let data_leaf_num = num::AllocatedNum::alloc(cs.namespace(|| \"data_leaf\"), || {\n            data_leaf.ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        // enforce inclusion of the data leaf in the tree D\n        enforce_inclusion(\n            cs.namespace(|| \"comm_d_inclusion\"),\n            comm_d_path,\n            comm_d,\n            &data_leaf_num,\n        )?;\n\n        // -- verify replica column openings\n\n        // Private Inputs for the DRG parent nodes.\n        let mut drg_parents = Vec::new();\n\n        for (i, parent) in drg_parents_proofs.into_iter().enumerate() {\n            let (parent_col, inclusion_path) =\n                parent.alloc(cs.namespace(|| format!(\"drg_parent_{}_num\", i)))?;\n\n            // calculate column hash\n            let val = parent_col.hash(cs.namespace(|| format!(\"drg_parent_{}_constraint\", i)))?;\n            // enforce inclusion of the column hash in the tree C\n            enforce_inclusion(\n                cs.namespace(|| format!(\"drg_parent_{}_inclusion\", i)),\n                inclusion_path,\n                comm_c,\n                &val,\n            )?;\n            drg_parents.push(parent_col);\n        }\n\n        // Private Inputs for the Expander parent nodes.\n        let mut exp_parents = Vec::new();\n\n        for (i, parent) in exp_parents_proofs.into_iter().enumerate() {\n            let (parent_col, inclusion_path) =\n                parent.alloc(cs.namespace(|| format!(\"exp_parent_{}_num\", i)))?;\n\n            // calculate column hash\n            let val = parent_col.hash(cs.namespace(|| format!(\"exp_parent_{}_constraint\", i)))?;\n            // enforce inclusion of the column hash in the tree C\n            enforce_inclusion(\n                cs.namespace(|| format!(\"exp_parent_{}_inclusion\", i)),\n                inclusion_path,\n                comm_c,\n                &val,\n            )?;\n            exp_parents.push(parent_col);\n        }\n\n        // -- Verify labeling and encoding\n\n        // stores the labels of the challenged column\n        let mut column_labels = Vec::new();\n\n        // PublicInput: challenge index\n        let challenge_num = uint64::UInt64::alloc(cs.namespace(|| \"challenge\"), challenge)?;\n        challenge_num.pack_into_input(cs.namespace(|| \"challenge input\"))?;\n\n        for layer in 1..=layers {\n            let layer_num = uint32::UInt32::constant(layer as u32);\n\n            let mut cs = cs.namespace(|| format!(\"labeling_{}\", layer));\n\n            // Collect the parents\n            let mut parents = Vec::new();\n\n            // all layers have drg parents\n            for parent_col in &drg_parents {\n                let parent_val_num = parent_col.get_value(layer);\n                let parent_val_bits =\n                    reverse_bit_numbering(parent_val_num.to_bits_le(\n                        cs.namespace(|| format!(\"drg_parent_{}_bits\", parents.len())),\n                    )?);\n                parents.push(parent_val_bits);\n            }\n\n            // the first layer does not contain expander parents\n            if layer > 1 {\n                for parent_col in &exp_parents {\n                    // subtract 1 from the layer index, as the exp parents, are shifted by one, as they\n                    // do not store a value for the first layer\n                    let parent_val_num = parent_col.get_value(layer - 1);\n                    let parent_val_bits = reverse_bit_numbering(parent_val_num.to_bits_le(\n                        cs.namespace(|| format!(\"exp_parent_{}_bits\", parents.len())),\n                    )?);\n                    parents.push(parent_val_bits);\n                }\n            }\n\n            // Duplicate parents, according to the hashing algorithm.\n            let mut expanded_parents = parents.clone();\n            if layer > 1 {\n                expanded_parents.extend_from_slice(&parents); // 28\n                expanded_parents.extend_from_slice(&parents[..9]); // 37\n            } else {\n                // layer 1 only has drg parents\n                expanded_parents.extend_from_slice(&parents); // 12\n                expanded_parents.extend_from_slice(&parents); // 18\n                expanded_parents.extend_from_slice(&parents); // 24\n                expanded_parents.extend_from_slice(&parents); // 30\n                expanded_parents.extend_from_slice(&parents); // 36\n                expanded_parents.push(parents[0].clone()); // 37\n            };\n\n            // Reconstruct the label\n            let label = create_label(\n                cs.namespace(|| \"create_label\"),\n                replica_id,\n                expanded_parents,\n                layer_num,\n                challenge_num.clone(),\n            )?;\n            column_labels.push(label);\n        }\n\n        // -- encoding node\n        {\n            // encode the node\n\n            // key is the last label\n            let key = &column_labels[column_labels.len() - 1];\n            let encoded_node = encode(cs.namespace(|| \"encode_node\"), key, &data_leaf_num)?;\n\n            // verify inclusion of the encoded node\n            enforce_inclusion(\n                cs.namespace(|| \"comm_r_last_data_inclusion\"),\n                comm_r_last_path,\n                comm_r_last,\n                &encoded_node,\n            )?;\n        }\n\n        // -- ensure the column hash of the labels is included\n        {\n            // calculate column_hash\n            let column_hash =\n                hash_single_column(cs.namespace(|| \"c_x_column_hash\"), &column_labels)?;\n\n            // enforce inclusion of the column hash in the tree C\n            enforce_inclusion(\n                cs.namespace(|| \"c_x_inclusion\"),\n                comm_c_path,\n                comm_c,\n                &column_hash,\n            )?;\n        }\n\n        Ok(())\n    }\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> From<VanillaProof<Tree, G>> for Proof<Tree, G>\nwhere\n    Tree::Hasher: 'static,\n{\n    fn from(vanilla_proof: VanillaProof<Tree, G>) -> Self {\n        let VanillaProof {\n            comm_d_proofs,\n            comm_r_last_proof,\n            replica_column_proofs,\n            labeling_proofs,\n            ..\n        } = vanilla_proof;\n        let VanillaReplicaColumnProof {\n            c_x,\n            drg_parents,\n            exp_parents,\n        } = replica_column_proofs;\n\n        let data_leaf = Some(comm_d_proofs.leaf().into());\n\n        Proof {\n            comm_d_path: comm_d_proofs.as_options().into(),\n            data_leaf,\n            challenge: Some(labeling_proofs[0].node),\n            comm_r_last_path: comm_r_last_proof.as_options().into(),\n            comm_c_path: c_x.inclusion_proof.as_options().into(),\n            drg_parents_proofs: drg_parents.into_iter().map(|p| p.into()).collect(),\n            exp_parents_proofs: exp_parents.into_iter().map(|p| p.into()).collect(),\n            _t: PhantomData,\n        }\n    }\n}\n\n/// Enforce the inclusion of the given path, to the given leaf and the root.\nfn enforce_inclusion<H, U, V, W, CS: ConstraintSystem<Bls12>>(\n    cs: CS,\n    path: AuthPath<H, U, V, W>,\n    root: &num::AllocatedNum<Bls12>,\n    leaf: &num::AllocatedNum<Bls12>,\n) -> Result<(), SynthesisError>\nwhere\n    H: 'static + Hasher,\n    U: 'static + PoseidonArity,\n    V: 'static + PoseidonArity,\n    W: 'static + PoseidonArity,\n{\n    let root = Root::from_allocated::<CS>(root.clone());\n    let leaf = Root::from_allocated::<CS>(leaf.clone());\n\n    PoRCircuit::<MerkleTreeWrapper<H, DiskStore<H::Domain>, U, V, W>>::synthesize(\n        cs, leaf, path, root, true,\n    )?;\n\n    Ok(())\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/circuit/proof.rs",
    "content": "use std::marker::PhantomData;\n\nuse anyhow::ensure;\nuse bellperson::gadgets::num;\nuse bellperson::{Circuit, ConstraintSystem, SynthesisError};\nuse paired::bls12_381::{Bls12, Fr};\nuse storage_proofs_core::{\n    compound_proof::{CircuitComponent, CompoundProof},\n    drgraph::Graph,\n    error::Result,\n    fr32::u64_into_fr,\n    gadgets::constraint,\n    gadgets::por::PoRCompound,\n    hasher::{HashFunction, Hasher},\n    merkle::{BinaryMerkleTree, MerkleTreeTrait},\n    parameter_cache::{CacheableParameters, ParameterSetMetadata},\n    por,\n    proof::ProofScheme,\n    util::reverse_bit_numbering,\n};\n\nuse super::params::Proof;\nuse crate::stacked::StackedDrg;\n\n/// Stacked DRG based Proof of Replication.\n///\n/// # Fields\n///\n/// * `params` - parameters for the curve\n///\npub struct StackedCircuit<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> {\n    public_params: <StackedDrg<'a, Tree, G> as ProofScheme<'a>>::PublicParams,\n    replica_id: Option<<Tree::Hasher as Hasher>::Domain>,\n    comm_d: Option<G::Domain>,\n    comm_r: Option<<Tree::Hasher as Hasher>::Domain>,\n    comm_r_last: Option<<Tree::Hasher as Hasher>::Domain>,\n    comm_c: Option<<Tree::Hasher as Hasher>::Domain>,\n\n    // one proof per challenge\n    proofs: Vec<Proof<Tree, G>>,\n}\n\nimpl<'a, Tree: MerkleTreeTrait, G: Hasher> CircuitComponent for StackedCircuit<'a, Tree, G> {\n    type ComponentPrivateInputs = ();\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedCircuit<'a, Tree, G> {\n    #[allow(clippy::too_many_arguments)]\n    pub fn synthesize<CS>(\n        mut cs: CS,\n        public_params: <StackedDrg<'a, Tree, G> as ProofScheme<'a>>::PublicParams,\n        replica_id: Option<<Tree::Hasher as Hasher>::Domain>,\n        comm_d: Option<G::Domain>,\n        comm_r: Option<<Tree::Hasher as Hasher>::Domain>,\n        comm_r_last: Option<<Tree::Hasher as Hasher>::Domain>,\n        comm_c: Option<<Tree::Hasher as Hasher>::Domain>,\n        proofs: Vec<Proof<Tree, G>>,\n    ) -> Result<(), SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>,\n    {\n        let circuit = StackedCircuit::<'a, Tree, G> {\n            public_params,\n            replica_id,\n            comm_d,\n            comm_r,\n            comm_r_last,\n            comm_c,\n            proofs,\n        };\n\n        circuit.synthesize(&mut cs)\n    }\n}\n\nimpl<'a, Tree: MerkleTreeTrait, G: Hasher> Circuit<Bls12> for StackedCircuit<'a, Tree, G> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let StackedCircuit {\n            public_params,\n            proofs,\n            replica_id,\n            comm_r,\n            comm_d,\n            comm_r_last,\n            comm_c,\n            ..\n        } = self;\n\n        // Allocate replica_id\n        let replica_id_num = num::AllocatedNum::alloc(cs.namespace(|| \"replica_id\"), || {\n            replica_id\n                .map(Into::into)\n                .ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        // make replica_id a public input\n        replica_id_num.inputize(cs.namespace(|| \"replica_id_input\"))?;\n\n        let replica_id_bits =\n            reverse_bit_numbering(replica_id_num.to_bits_le(cs.namespace(|| \"replica_id_bits\"))?);\n\n        // Allocate comm_d as Fr\n        let comm_d_num = num::AllocatedNum::alloc(cs.namespace(|| \"comm_d\"), || {\n            comm_d\n                .map(Into::into)\n                .ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        // make comm_d a public input\n        comm_d_num.inputize(cs.namespace(|| \"comm_d_input\"))?;\n\n        // Allocate comm_r as Fr\n        let comm_r_num = num::AllocatedNum::alloc(cs.namespace(|| \"comm_r\"), || {\n            comm_r\n                .map(Into::into)\n                .ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        // make comm_r a public input\n        comm_r_num.inputize(cs.namespace(|| \"comm_r_input\"))?;\n\n        // Allocate comm_r_last as Fr\n        let comm_r_last_num = num::AllocatedNum::alloc(cs.namespace(|| \"comm_r_last\"), || {\n            comm_r_last\n                .map(Into::into)\n                .ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        // Allocate comm_c as Fr\n        let comm_c_num = num::AllocatedNum::alloc(cs.namespace(|| \"comm_c\"), || {\n            comm_c\n                .map(Into::into)\n                .ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        // Verify comm_r = H(comm_c || comm_r_last)\n        {\n            let hash_num = <Tree::Hasher as Hasher>::Function::hash2_circuit(\n                cs.namespace(|| \"H_comm_c_comm_r_last\"),\n                &comm_c_num,\n                &comm_r_last_num,\n            )?;\n\n            // Check actual equality\n            constraint::equal(\n                cs,\n                || \"enforce comm_r = H(comm_c || comm_r_last)\",\n                &comm_r_num,\n                &hash_num,\n            );\n        }\n\n        for (i, proof) in proofs.into_iter().enumerate() {\n            proof.synthesize(\n                &mut cs.namespace(|| format!(\"challenge_{}\", i)),\n                public_params.layer_challenges.layers(),\n                &comm_d_num,\n                &comm_c_num,\n                &comm_r_last_num,\n                &replica_id_bits,\n            )?;\n        }\n\n        Ok(())\n    }\n}\n\n#[allow(dead_code)]\npub struct StackedCompound<Tree: MerkleTreeTrait, G: Hasher> {\n    partitions: Option<usize>,\n    _t: PhantomData<Tree>,\n    _g: PhantomData<G>,\n}\n\nimpl<C: Circuit<Bls12>, P: ParameterSetMetadata, Tree: MerkleTreeTrait, G: Hasher>\n    CacheableParameters<C, P> for StackedCompound<Tree, G>\n{\n    fn cache_prefix() -> String {\n        format!(\n            \"stacked-proof-of-replication-{}-{}\",\n            Tree::display(),\n            G::name()\n        )\n    }\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher>\n    CompoundProof<'a, StackedDrg<'a, Tree, G>, StackedCircuit<'a, Tree, G>>\n    for StackedCompound<Tree, G>\n{\n    fn generate_public_inputs(\n        pub_in: &<StackedDrg<Tree, G> as ProofScheme>::PublicInputs,\n        pub_params: &<StackedDrg<Tree, G> as ProofScheme>::PublicParams,\n        k: Option<usize>,\n    ) -> Result<Vec<Fr>> {\n        let graph = &pub_params.graph;\n\n        let mut inputs = Vec::new();\n\n        let replica_id = pub_in.replica_id;\n        inputs.push(replica_id.into());\n\n        let comm_d = pub_in.tau.as_ref().expect(\"missing tau\").comm_d;\n        inputs.push(comm_d.into());\n\n        let comm_r = pub_in.tau.as_ref().expect(\"missing tau\").comm_r;\n        inputs.push(comm_r.into());\n\n        let por_setup_params = por::SetupParams {\n            leaves: graph.size(),\n            private: true,\n        };\n\n        let por_params = por::PoR::<Tree>::setup(&por_setup_params)?;\n        let por_params_d = por::PoR::<BinaryMerkleTree<G>>::setup(&por_setup_params)?;\n\n        let all_challenges = pub_in.challenges(&pub_params.layer_challenges, graph.size(), k);\n\n        for challenge in all_challenges.into_iter() {\n            // comm_d inclusion proof for the data leaf\n            inputs.extend(generate_inclusion_inputs::<BinaryMerkleTree<G>>(\n                &por_params_d,\n                challenge,\n                k,\n            )?);\n\n            // drg parents\n            let mut drg_parents = vec![0; graph.base_graph().degree()];\n            graph.base_graph().parents(challenge, &mut drg_parents)?;\n\n            // Inclusion Proofs: drg parent node in comm_c\n            for parent in drg_parents.into_iter() {\n                inputs.extend(generate_inclusion_inputs::<Tree>(\n                    &por_params,\n                    parent as usize,\n                    k,\n                )?);\n            }\n\n            // exp parents\n            let mut exp_parents = vec![0; graph.expansion_degree()];\n            graph.expanded_parents(challenge, &mut exp_parents)?;\n\n            // Inclusion Proofs: expander parent node in comm_c\n            for parent in exp_parents.into_iter() {\n                inputs.extend(generate_inclusion_inputs::<Tree>(\n                    &por_params,\n                    parent as usize,\n                    k,\n                )?);\n            }\n\n            inputs.push(u64_into_fr(challenge as u64));\n\n            // Inclusion Proof: encoded node in comm_r_last\n            inputs.extend(generate_inclusion_inputs::<Tree>(\n                &por_params,\n                challenge,\n                k,\n            )?);\n\n            // Inclusion Proof: column hash of the challenged node in comm_c\n            inputs.extend(generate_inclusion_inputs::<Tree>(\n                &por_params,\n                challenge,\n                k,\n            )?);\n        }\n\n        Ok(inputs)\n    }\n\n    fn circuit<'b>(\n        public_inputs: &'b <StackedDrg<Tree, G> as ProofScheme>::PublicInputs,\n        _component_private_inputs: <StackedCircuit<'a, Tree, G> as CircuitComponent>::ComponentPrivateInputs,\n        vanilla_proof: &'b <StackedDrg<Tree, G> as ProofScheme>::Proof,\n        public_params: &'b <StackedDrg<Tree, G> as ProofScheme>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Result<StackedCircuit<'a, Tree, G>> {\n        ensure!(\n            !vanilla_proof.is_empty(),\n            \"Cannot create a circuit with no vanilla proofs\"\n        );\n\n        let comm_r_last = vanilla_proof[0].comm_r_last();\n        let comm_c = vanilla_proof[0].comm_c();\n\n        // ensure consistency\n        ensure!(\n            vanilla_proof.iter().all(|p| p.comm_r_last() == comm_r_last),\n            \"inconsistent comm_r_lasts\"\n        );\n        ensure!(\n            vanilla_proof.iter().all(|p| p.comm_c() == comm_c),\n            \"inconsistent comm_cs\"\n        );\n\n        Ok(StackedCircuit {\n            public_params: public_params.clone(),\n            replica_id: Some(public_inputs.replica_id),\n            comm_d: public_inputs.tau.as_ref().map(|t| t.comm_d),\n            comm_r: public_inputs.tau.as_ref().map(|t| t.comm_r),\n            comm_r_last: Some(comm_r_last),\n            comm_c: Some(comm_c),\n            proofs: vanilla_proof.iter().cloned().map(|p| p.into()).collect(),\n        })\n    }\n\n    fn blank_circuit(\n        public_params: &<StackedDrg<Tree, G> as ProofScheme>::PublicParams,\n    ) -> StackedCircuit<'a, Tree, G> {\n        StackedCircuit {\n            public_params: public_params.clone(),\n            replica_id: None,\n            comm_d: None,\n            comm_r: None,\n            comm_r_last: None,\n            comm_c: None,\n            proofs: (0..public_params.layer_challenges.challenges_count_all())\n                .map(|_challenge_index| Proof::empty(public_params))\n                .collect(),\n        }\n    }\n}\n\n/// Helper to generate public inputs for inclusion proofs.\nfn generate_inclusion_inputs<Tree: 'static + MerkleTreeTrait>(\n    por_params: &por::PublicParams,\n    challenge: usize,\n    k: Option<usize>,\n) -> Result<Vec<Fr>> {\n    let pub_inputs = por::PublicInputs::<<Tree::Hasher as Hasher>::Domain> {\n        challenge,\n        commitment: None,\n    };\n\n    PoRCompound::<Tree>::generate_public_inputs(&pub_inputs, por_params, k)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use ff::Field;\n    use generic_array::typenum::{U0, U2, U4, U8};\n    use merkletree::store::StoreConfig;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::{\n        cache_key::CacheKey,\n        compound_proof,\n        drgraph::BASE_DEGREE,\n        fr32::fr_into_bytes,\n        gadgets::{MetricCS, TestConstraintSystem},\n        hasher::{Hasher, PedersenHasher, PoseidonHasher, Sha256Hasher},\n        merkle::{get_base_tree_count, DiskTree, MerkleTreeTrait},\n        proof::ProofScheme,\n        test_helper::setup_replica,\n        util::default_rows_to_discard,\n    };\n\n    use crate::stacked::{\n        ChallengeRequirements, LayerChallenges, PrivateInputs, PublicInputs, SetupParams,\n        TemporaryAux, TemporaryAuxCache, BINARY_ARITY, EXP_DEGREE,\n    };\n    use crate::PoRep;\n\n    #[test]\n    fn stacked_input_circuit_pedersen_base_2() {\n        stacked_input_circuit::<DiskTree<PedersenHasher, U2, U0, U0>>(22, 1_258_152);\n    }\n\n    #[test]\n    fn stacked_input_circuit_poseidon_base_2() {\n        stacked_input_circuit::<DiskTree<PoseidonHasher, U2, U0, U0>>(22, 1_206_212);\n    }\n\n    #[test]\n    fn stacked_input_circuit_poseidon_base_8() {\n        stacked_input_circuit::<DiskTree<PoseidonHasher, U8, U0, U0>>(22, 1_199_620);\n    }\n\n    #[test]\n    fn stacked_input_circuit_poseidon_sub_8_4() {\n        stacked_input_circuit::<DiskTree<PoseidonHasher, U8, U4, U0>>(22, 1_296_576);\n    }\n\n    #[test]\n    fn stacked_input_circuit_poseidon_top_8_4_2() {\n        stacked_input_circuit::<DiskTree<PoseidonHasher, U8, U4, U2>>(22, 1_346_982);\n    }\n\n    fn stacked_input_circuit<Tree: MerkleTreeTrait + 'static>(\n        expected_inputs: usize,\n        expected_constraints: usize,\n    ) {\n        let nodes = 8 * get_base_tree_count::<Tree>();\n        let degree = BASE_DEGREE;\n        let expansion_degree = EXP_DEGREE;\n        let num_layers = 2;\n        let layer_challenges = LayerChallenges::new(num_layers, 1);\n\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let replica_id: Fr = Fr::random(rng);\n        let data: Vec<u8> = (0..nodes)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n\n        // MT for original data is always named tree-d, and it will be\n        // referenced later in the process as such.\n        let cache_dir = tempfile::tempdir().unwrap();\n        let config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            default_rows_to_discard(nodes, BINARY_ARITY),\n        );\n\n        // Generate a replica path.\n        let replica_path = cache_dir.path().join(\"replica-path\");\n        let mut mmapped_data = setup_replica(&data, &replica_path);\n\n        let arbitrary_porep_id = [44; 32];\n        let sp = SetupParams {\n            nodes,\n            degree,\n            expansion_degree,\n            porep_id: arbitrary_porep_id,\n            layer_challenges: layer_challenges.clone(),\n        };\n\n        let pp = StackedDrg::<Tree, Sha256Hasher>::setup(&sp).expect(\"setup failed\");\n        let (tau, (p_aux, t_aux)) = StackedDrg::<Tree, Sha256Hasher>::replicate(\n            &pp,\n            &replica_id.into(),\n            (mmapped_data.as_mut()).into(),\n            None,\n            config,\n            replica_path.clone(),\n        )\n        .expect(\"replication failed\");\n\n        let mut copied = vec![0; data.len()];\n        copied.copy_from_slice(&mmapped_data);\n        assert_ne!(data, copied, \"replication did not change data\");\n\n        let seed = rng.gen();\n        let pub_inputs =\n            PublicInputs::<<Tree::Hasher as Hasher>::Domain, <Sha256Hasher as Hasher>::Domain> {\n                replica_id: replica_id.into(),\n                seed,\n                tau: Some(tau.into()),\n                k: None,\n            };\n\n        // Store copy of original t_aux for later resource deletion.\n        let t_aux_orig = t_aux.clone();\n\n        // Convert TemporaryAux to TemporaryAuxCache, which instantiates all\n        // elements based on the configs stored in TemporaryAux.\n        let t_aux = TemporaryAuxCache::<Tree, Sha256Hasher>::new(&t_aux, replica_path.clone())\n            .expect(\"failed to restore contents of t_aux\");\n\n        let priv_inputs = PrivateInputs::<Tree, Sha256Hasher> {\n            p_aux: p_aux.into(),\n            t_aux: t_aux.into(),\n        };\n\n        let proofs = StackedDrg::<Tree, Sha256Hasher>::prove_all_partitions(\n            &pp,\n            &pub_inputs,\n            &priv_inputs,\n            1,\n        )\n        .expect(\"failed to generate partition proofs\");\n\n        let proofs_are_valid =\n            StackedDrg::<Tree, Sha256Hasher>::verify_all_partitions(&pp, &pub_inputs, &proofs)\n                .expect(\"failed while trying to verify partition proofs\");\n\n        assert!(proofs_are_valid);\n\n        // Discard cached MTs that are no longer needed.\n        TemporaryAux::<Tree, Sha256Hasher>::clear_temp(t_aux_orig).expect(\"t_aux delete failed\");\n\n        {\n            // Verify that MetricCS returns the same metrics as TestConstraintSystem.\n            let mut cs = MetricCS::<Bls12>::new();\n\n            StackedCompound::<Tree, Sha256Hasher>::circuit(\n                &pub_inputs,\n                <StackedCircuit<Tree, Sha256Hasher> as CircuitComponent>::ComponentPrivateInputs::default(),\n                &proofs[0],\n                &pp,\n                None,\n            )\n            .expect(\"circuit failed\")\n            .synthesize(&mut cs.namespace(|| \"stacked drgporep\"))\n            .expect(\"failed to synthesize circuit\");\n\n            assert_eq!(cs.num_inputs(), expected_inputs, \"wrong number of inputs\");\n            assert_eq!(\n                cs.num_constraints(),\n                expected_constraints,\n                \"wrong number of constraints\"\n            );\n        }\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n\n        StackedCompound::<Tree, Sha256Hasher>::circuit(\n            &pub_inputs,\n            <StackedCircuit<Tree, Sha256Hasher> as CircuitComponent>::ComponentPrivateInputs::default(),\n            &proofs[0],\n            &pp,\n            None,\n        )\n        .expect(\"circuit failed\")\n        .synthesize(&mut cs.namespace(|| \"stacked drgporep\"))\n        .expect(\"failed to synthesize circuit\");\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n        assert_eq!(cs.num_inputs(), expected_inputs, \"wrong number of inputs\");\n        assert_eq!(\n            cs.num_constraints(),\n            expected_constraints,\n            \"wrong number of constraints\"\n        );\n\n        assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n\n        let generated_inputs = <StackedCompound<Tree, Sha256Hasher> as CompoundProof<\n            StackedDrg<Tree, Sha256Hasher>,\n            _,\n        >>::generate_public_inputs(&pub_inputs, &pp, None)\n        .expect(\"failed to generate public inputs\");\n        let expected_inputs = cs.get_inputs();\n\n        for ((input, label), generated_input) in\n            expected_inputs.iter().skip(1).zip(generated_inputs.iter())\n        {\n            assert_eq!(input, generated_input, \"{}\", label);\n        }\n\n        assert_eq!(\n            generated_inputs.len(),\n            expected_inputs.len() - 1,\n            \"inputs are not the same length\"\n        );\n\n        cache_dir.close().expect(\"Failed to remove cache dir\");\n    }\n\n    #[test]\n    #[ignore]\n    fn test_stacked_compound_pedersen() {\n        stacked_test_compound::<DiskTree<PedersenHasher, U2, U0, U0>>();\n    }\n\n    #[test]\n    #[ignore]\n    fn test_stacked_compound_poseidon_base_8() {\n        stacked_test_compound::<DiskTree<PoseidonHasher, U8, U0, U0>>();\n    }\n\n    #[test]\n    #[ignore]\n    fn test_stacked_compound_poseidon_sub_8_4() {\n        stacked_test_compound::<DiskTree<PoseidonHasher, U8, U4, U0>>();\n    }\n\n    #[test]\n    #[ignore]\n    fn test_stacked_compound_poseidon_top_8_4_2() {\n        stacked_test_compound::<DiskTree<PoseidonHasher, U8, U4, U2>>();\n    }\n\n    fn stacked_test_compound<Tree: 'static + MerkleTreeTrait>() {\n        let nodes = 8 * get_base_tree_count::<Tree>();\n\n        let degree = BASE_DEGREE;\n        let expansion_degree = EXP_DEGREE;\n        let num_layers = 2;\n        let layer_challenges = LayerChallenges::new(num_layers, 1);\n        let partition_count = 1;\n\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let replica_id: Fr = Fr::random(rng);\n        let data: Vec<u8> = (0..nodes)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n\n        let arbitrary_porep_id = [55; 32];\n        let setup_params = compound_proof::SetupParams {\n            vanilla_params: SetupParams {\n                nodes,\n                degree,\n                expansion_degree,\n                porep_id: arbitrary_porep_id,\n                layer_challenges: layer_challenges.clone(),\n            },\n            partitions: Some(partition_count),\n            priority: false,\n        };\n\n        // MT for original data is always named tree-d, and it will be\n        // referenced later in the process as such.\n        let cache_dir = tempfile::tempdir().unwrap();\n        let config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            default_rows_to_discard(nodes, BINARY_ARITY),\n        );\n\n        // Generate a replica path.\n        let replica_path = cache_dir.path().join(\"replica-path\");\n        let mut mmapped_data = setup_replica(&data, &replica_path);\n\n        let public_params = StackedCompound::setup(&setup_params).expect(\"setup failed\");\n        let (tau, (p_aux, t_aux)) = StackedDrg::<Tree, _>::replicate(\n            &public_params.vanilla_params,\n            &replica_id.into(),\n            (mmapped_data.as_mut()).into(),\n            None,\n            config,\n            replica_path.clone(),\n        )\n        .expect(\"replication failed\");\n\n        let mut copied = vec![0; data.len()];\n        copied.copy_from_slice(&mmapped_data);\n        assert_ne!(data, copied, \"replication did not change data\");\n\n        let seed = rng.gen();\n        let public_inputs =\n            PublicInputs::<<Tree::Hasher as Hasher>::Domain, <Sha256Hasher as Hasher>::Domain> {\n                replica_id: replica_id.into(),\n                seed,\n                tau: Some(tau),\n                k: None,\n            };\n\n        // Store a copy of the t_aux for later resource deletion.\n        let t_aux_orig = t_aux.clone();\n\n        // Convert TemporaryAux to TemporaryAuxCache, which instantiates all\n        // elements based on the configs stored in TemporaryAux.\n        let t_aux = TemporaryAuxCache::<Tree, _>::new(&t_aux, replica_path.clone())\n            .expect(\"failed to restore contents of t_aux\");\n\n        let private_inputs = PrivateInputs::<Tree, Sha256Hasher> { p_aux, t_aux };\n\n        {\n            let (circuit, inputs) =\n                StackedCompound::circuit_for_test(&public_params, &public_inputs, &private_inputs)\n                    .unwrap();\n\n            let mut cs = TestConstraintSystem::new();\n\n            circuit.synthesize(&mut cs).expect(\"failed to synthesize\");\n\n            if !cs.is_satisfied() {\n                panic!(\n                    \"failed to satisfy: {:?}\",\n                    cs.which_is_unsatisfied().unwrap()\n                );\n            }\n            assert!(\n                cs.verify(&inputs),\n                \"verification failed with TestContraintSystem and generated inputs\"\n            );\n        }\n\n        // Use this to debug differences between blank and regular circuit generation.\n        {\n            let (circuit1, _inputs) =\n                StackedCompound::circuit_for_test(&public_params, &public_inputs, &private_inputs)\n                    .unwrap();\n            let blank_circuit = <StackedCompound<Tree, Sha256Hasher> as CompoundProof<\n                StackedDrg<Tree, Sha256Hasher>,\n                _,\n            >>::blank_circuit(&public_params.vanilla_params);\n\n            let mut cs_blank = MetricCS::new();\n            blank_circuit\n                .synthesize(&mut cs_blank)\n                .expect(\"failed to synthesize\");\n\n            let a = cs_blank.pretty_print_list();\n\n            let mut cs1 = TestConstraintSystem::new();\n            circuit1.synthesize(&mut cs1).expect(\"failed to synthesize\");\n            let b = cs1.pretty_print_list();\n\n            for (i, (a, b)) in a.chunks(100).zip(b.chunks(100)).enumerate() {\n                assert_eq!(a, b, \"failed at chunk {}\", i);\n            }\n        }\n\n        let blank_groth_params = <StackedCompound<Tree, Sha256Hasher> as CompoundProof<\n            StackedDrg<Tree, Sha256Hasher>,\n            _,\n        >>::groth_params(Some(rng), &public_params.vanilla_params)\n        .expect(\"failed to generate groth params\");\n\n        // Discard cached MTs that are no longer needed.\n        TemporaryAux::<Tree, Sha256Hasher>::clear_temp(t_aux_orig).expect(\"t_aux delete failed\");\n\n        let proof = StackedCompound::prove(\n            &public_params,\n            &public_inputs,\n            &private_inputs,\n            &blank_groth_params,\n        )\n        .expect(\"failed while proving\");\n\n        let verified = StackedCompound::verify(\n            &public_params,\n            &public_inputs,\n            &proof,\n            &ChallengeRequirements {\n                minimum_challenges: 1,\n            },\n        )\n        .expect(\"failed while verifying\");\n\n        assert!(verified);\n\n        cache_dir.close().expect(\"Failed to remove cache dir\");\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/mod.rs",
    "content": "mod circuit;\nmod vanilla;\n\npub use self::circuit::*;\npub use self::vanilla::*;\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/cache.rs",
    "content": "use std::path::PathBuf;\n\nuse anyhow::{bail, ensure, Context};\nuse byteorder::{ByteOrder, LittleEndian};\nuse log::info;\nuse rayon::prelude::*;\nuse sha2::{Digest, Sha256};\n\nuse storage_proofs_core::{\n    drgraph::Graph,\n    drgraph::BASE_DEGREE,\n    error::Result,\n    hasher::Hasher,\n    parameter_cache::{ParameterSetMetadata, VERSION},\n};\n\nuse super::graph::{StackedGraph, DEGREE};\n\n/// Path in which to store the parents caches.\nconst PARENT_CACHE_DIR: &str = \"/var/tmp/filecoin-parents\";\n\n/// u32 = 4 bytes\nconst NODE_BYTES: usize = 4;\n\n// StackedGraph will hold two different (but related) `ParentCache`,\n#[derive(Debug)]\npub struct ParentCache {\n    /// Disk path for the cache.\n    path: PathBuf,\n    /// The total number of cache entries.\n    num_cache_entries: u32,\n    cache: CacheData,\n}\n\n#[derive(Debug)]\nstruct CacheData {\n    /// This is a large list of fixed (parent) sized arrays.\n    data: memmap::Mmap,\n    /// Offset in nodes.\n    offset: u32,\n    /// Len in nodes.\n    len: u32,\n    /// The underlyling file.\n    file: std::fs::File,\n}\n\nimpl CacheData {\n    /// Change the cache to point to the newly passed in offset.\n    ///\n    /// The `new_offset` must be set, such that `new_offset + len` does not\n    /// overflow the underlying data.\n    fn shift(&mut self, new_offset: u32) -> Result<()> {\n        if self.offset == new_offset {\n            return Ok(());\n        }\n\n        let offset = new_offset as usize * DEGREE * NODE_BYTES;\n        let len = self.len as usize * DEGREE * NODE_BYTES;\n\n        self.data = unsafe {\n            memmap::MmapOptions::new()\n                .offset(offset as u64)\n                .len(len)\n                .map(&self.file)\n                .context(\"could not shift mmap}\")?\n        };\n        self.offset = new_offset;\n\n        Ok(())\n    }\n\n    /// Returns true if this node is in the cached range.\n    fn contains(&self, node: u32) -> bool {\n        node >= self.offset && node < self.offset + self.len\n    }\n\n    /// Read the parents for the given node from cache.\n    ///\n    /// Panics if the `node` is not in the cache.\n    fn read(&self, node: u32) -> [u32; DEGREE] {\n        assert!(node >= self.offset, \"node not in cache\");\n        let start = (node - self.offset) as usize * DEGREE * NODE_BYTES;\n        let end = start + DEGREE * NODE_BYTES;\n\n        let mut res = [0u32; DEGREE];\n        LittleEndian::read_u32_into(&self.data[start..end], &mut res);\n        res\n    }\n\n    fn reset(&mut self) -> Result<()> {\n        if self.offset == 0 {\n            return Ok(());\n        }\n\n        self.shift(0)\n    }\n\n    fn open(offset: u32, len: u32, path: &PathBuf) -> Result<Self> {\n        let min_cache_size = (offset + len) as usize * DEGREE * NODE_BYTES;\n\n        let file = std::fs::OpenOptions::new()\n            .read(true)\n            .open(&path)\n            .with_context(|| format!(\"could not open path={}\", path.display()))?;\n\n        let actual_len = file.metadata()?.len();\n        if actual_len < min_cache_size as u64 {\n            bail!(\n                \"corrupted cache: {}, expected at least {}, got {} bytes\",\n                path.display(),\n                min_cache_size,\n                actual_len\n            );\n        }\n\n        let data = unsafe {\n            memmap::MmapOptions::new()\n                .offset((offset as usize * DEGREE * NODE_BYTES) as u64)\n                .len(len as usize * DEGREE * NODE_BYTES)\n                .map(&file)\n                .with_context(|| format!(\"could not mmap path={}\", path.display()))?\n        };\n\n        Ok(Self {\n            data,\n            file,\n            len,\n            offset,\n        })\n    }\n}\n\nimpl ParentCache {\n    pub fn new<H, G>(len: u32, cache_entries: u32, graph: &StackedGraph<H, G>) -> Result<Self>\n    where\n        H: Hasher,\n        G: Graph<H> + ParameterSetMetadata + Send + Sync,\n    {\n        let path = cache_path(cache_entries, graph);\n        if path.exists() {\n            Self::open(len, cache_entries, path)\n        } else {\n            Self::generate(len, cache_entries, graph, path)\n        }\n    }\n\n    /// Opens an existing cache from disk.\n    pub fn open(len: u32, cache_entries: u32, path: PathBuf) -> Result<Self> {\n        info!(\"parent cache: opening {}\", path.display());\n\n        let cache = CacheData::open(0, len, &path)?;\n        info!(\"parent cache: opened\");\n\n        Ok(ParentCache {\n            cache,\n            path,\n            num_cache_entries: cache_entries,\n        })\n    }\n\n    /// Generates a new cache and stores it on disk.\n    pub fn generate<H, G>(\n        len: u32,\n        cache_entries: u32,\n        graph: &StackedGraph<H, G>,\n        path: PathBuf,\n    ) -> Result<Self>\n    where\n        H: Hasher,\n        G: Graph<H> + ParameterSetMetadata + Send + Sync,\n    {\n        info!(\"parent cache: generating {}\", path.display());\n\n        std::fs::create_dir_all(PARENT_CACHE_DIR).context(\"unable to crate parent cache dir\")?;\n\n        let file = std::fs::OpenOptions::new()\n            .read(true)\n            .write(true)\n            .create(true)\n            .open(&path)\n            .with_context(|| format!(\"could not open path={}\", path.display()))?;\n\n        let cache_size = cache_entries as usize * NODE_BYTES * DEGREE;\n        file.set_len(cache_size as u64)\n            .with_context(|| format!(\"failed to set length: {}\", cache_size))?;\n\n        let mut data = unsafe {\n            memmap::MmapOptions::new()\n                .map_mut(&file)\n                .with_context(|| format!(\"could not mmap path={}\", path.display()))?\n        };\n\n        data.par_chunks_mut(DEGREE * NODE_BYTES)\n            .enumerate()\n            .try_for_each(|(node, entry)| -> Result<()> {\n                let mut parents = [0u32; DEGREE];\n                graph\n                    .base_graph()\n                    .parents(node, &mut parents[..BASE_DEGREE])?;\n                graph.generate_expanded_parents(node, &mut parents[BASE_DEGREE..]);\n\n                LittleEndian::write_u32_into(&parents, entry);\n                Ok(())\n            })?;\n\n        info!(\"parent cache: generated\");\n        data.flush().context(\"failed to flush parent cache\")?;\n        drop(data);\n\n        info!(\"parent cache: written to disk\");\n\n        Ok(ParentCache {\n            cache: CacheData::open(0, len, &path)?,\n            path,\n            num_cache_entries: cache_entries,\n        })\n    }\n\n    /// Read a single cache element at position `node`.\n    pub fn read(&mut self, node: u32) -> Result<[u32; DEGREE]> {\n        if self.cache.contains(node) {\n            return Ok(self.cache.read(node));\n        }\n\n        // not in memory, shift cache\n        ensure!(\n            node >= self.cache.offset + self.cache.len,\n            \"cache must be read in ascending order {} < {} + {}\",\n            node,\n            self.cache.offset,\n            self.cache.len,\n        );\n\n        // Shift cache by its current size.\n        let new_offset =\n            (self.num_cache_entries - self.cache.len).min(self.cache.offset + self.cache.len);\n        self.cache.shift(new_offset)?;\n\n        Ok(self.cache.read(node))\n    }\n\n    /// Resets the partial cache to the beginning.\n    pub fn reset(&mut self) -> Result<()> {\n        self.cache.reset()\n    }\n}\n\nfn cache_path<H, G>(cache_entries: u32, graph: &StackedGraph<H, G>) -> PathBuf\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata + Send + Sync,\n{\n    let mut hasher = Sha256::default();\n\n    hasher.input(H::name());\n    hasher.input(graph.identifier());\n    for key in &graph.feistel_keys {\n        hasher.input(key.to_le_bytes());\n    }\n    hasher.input(cache_entries.to_le_bytes());\n    let h = hasher.result();\n    PathBuf::from(PARENT_CACHE_DIR).join(format!(\n        \"v{}-sdr-parent-{}.cache\",\n        VERSION,\n        hex::encode(h),\n    ))\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use crate::stacked::vanilla::graph::{StackedBucketGraph, EXP_DEGREE};\n    use storage_proofs_core::hasher::PoseidonHasher;\n\n    #[test]\n    fn test_read_full_range() {\n        let nodes = 24u32;\n        let graph = StackedBucketGraph::<PoseidonHasher>::new_stacked(\n            nodes as usize,\n            BASE_DEGREE,\n            EXP_DEGREE,\n            [0u8; 32],\n        )\n        .unwrap();\n\n        let mut cache = ParentCache::new(nodes, nodes, &graph).unwrap();\n\n        for node in 0..nodes {\n            let mut expected_parents = [0; DEGREE];\n            graph.parents(node as usize, &mut expected_parents).unwrap();\n            let parents = cache.read(node).unwrap();\n\n            assert_eq!(expected_parents, parents);\n        }\n    }\n\n    #[test]\n    fn test_read_partial_range() {\n        let nodes = 48u32;\n        let graph = StackedBucketGraph::<PoseidonHasher>::new_stacked(\n            nodes as usize,\n            BASE_DEGREE,\n            EXP_DEGREE,\n            [0u8; 32],\n        )\n        .unwrap();\n\n        let mut half_cache = ParentCache::new(nodes / 2, nodes, &graph).unwrap();\n        let mut quarter_cache = ParentCache::new(nodes / 4, nodes, &graph).unwrap();\n\n        for node in 0..nodes {\n            let mut expected_parents = [0; DEGREE];\n            graph.parents(node as usize, &mut expected_parents).unwrap();\n\n            let parents = half_cache.read(node).unwrap();\n            assert_eq!(expected_parents, parents);\n\n            let parents = quarter_cache.read(node).unwrap();\n            assert_eq!(expected_parents, parents);\n\n            // some internal checks to make sure the cache works as expected\n            assert_eq!(\n                half_cache.cache.data.len() / DEGREE / NODE_BYTES,\n                nodes as usize / 2\n            );\n            assert_eq!(\n                quarter_cache.cache.data.len() / DEGREE / NODE_BYTES,\n                nodes as usize / 4\n            );\n        }\n\n        half_cache.reset().unwrap();\n        quarter_cache.reset().unwrap();\n\n        for node in 0..nodes {\n            let mut expected_parents = [0; DEGREE];\n            graph.parents(node as usize, &mut expected_parents).unwrap();\n\n            let parents = half_cache.read(node).unwrap();\n            assert_eq!(expected_parents, parents);\n\n            let parents = quarter_cache.read(node).unwrap();\n            assert_eq!(expected_parents, parents);\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/challenges.rs",
    "content": "use num_bigint::BigUint;\nuse num_traits::cast::ToPrimitive;\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\n\nuse storage_proofs_core::hasher::Domain;\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct LayerChallenges {\n    /// How many layers we are generating challenges for.\n    layers: usize,\n    /// The maximum count of challenges\n    max_count: usize,\n}\n\nimpl LayerChallenges {\n    pub const fn new(layers: usize, max_count: usize) -> Self {\n        LayerChallenges { layers, max_count }\n    }\n\n    pub fn layers(&self) -> usize {\n        self.layers\n    }\n\n    pub fn challenges_count_all(&self) -> usize {\n        self.max_count\n    }\n\n    /// Derive all challenges.\n    pub fn derive<D: Domain>(\n        &self,\n        leaves: usize,\n        replica_id: &D,\n        seed: &[u8; 32],\n        k: u8,\n    ) -> Vec<usize> {\n        self.derive_internal(self.challenges_count_all(), leaves, replica_id, seed, k)\n    }\n\n    pub fn derive_internal<D: Domain>(\n        &self,\n        challenges_count: usize,\n        leaves: usize,\n        replica_id: &D,\n        seed: &[u8; 32],\n        k: u8,\n    ) -> Vec<usize> {\n        assert!(leaves > 2, \"Too few leaves: {}\", leaves);\n\n        (0..challenges_count)\n            .map(|i| {\n                let j: u32 = ((challenges_count * k as usize) + i) as u32;\n\n                let hash = Sha256::new()\n                    .chain(replica_id.into_bytes())\n                    .chain(seed)\n                    .chain(&j.to_le_bytes())\n                    .result();\n\n                let big_challenge = BigUint::from_bytes_le(hash.as_ref());\n\n                // For now, we cannot try to prove the first or last node, so make sure the challenge\n                // can never be 0.\n                let big_mod_challenge = big_challenge % (leaves - 1);\n                let big_mod_challenge = big_mod_challenge\n                    .to_usize()\n                    .expect(\"`big_mod_challenge` exceeds size of `usize`\");\n                big_mod_challenge + 1\n            })\n            .collect()\n    }\n}\n\n#[derive(Debug, Default)]\npub struct ChallengeRequirements {\n    pub minimum_challenges: usize,\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n\n    use rand::{thread_rng, Rng};\n    use std::collections::HashMap;\n    use storage_proofs_core::hasher::pedersen::PedersenDomain;\n\n    #[test]\n    fn challenge_derivation() {\n        let n = 200;\n        let layers = 100;\n\n        let challenges = LayerChallenges::new(layers, n);\n        let leaves = 1 << 30;\n        let rng = &mut thread_rng();\n        let replica_id: PedersenDomain = PedersenDomain::random(rng);\n        let seed: [u8; 32] = rng.gen();\n        let partitions = 5;\n        let total_challenges = partitions * n;\n\n        let mut layers_with_duplicates = 0;\n\n        for _layer in 1..=layers {\n            let mut histogram = HashMap::new();\n            for k in 0..partitions {\n                let challenges = challenges.derive(leaves, &replica_id, &seed, k as u8);\n\n                for challenge in challenges {\n                    let counter = histogram.entry(challenge).or_insert(0);\n                    *counter += 1;\n                }\n            }\n            let unique_challenges = histogram.len();\n            if unique_challenges < total_challenges {\n                layers_with_duplicates += 1;\n            }\n        }\n\n        // If we generate 100 layers with 1,000 challenges in each, at most two layers can contain\n        // any duplicates for this assertion to succeed.\n        // assert!(layers_with_duplicates < 3);\n        // TODO: verify why this now fails\n        println!(\"duplicates: {}\", layers_with_duplicates);\n    }\n\n    #[test]\n    // This test shows that partitioning (k = 0..partitions) generates the same challenges as\n    // generating the same number of challenges with only one partition (k = 0).\n    fn challenge_partition_equivalence() {\n        let n = 40;\n        let leaves = 1 << 30;\n        let rng = &mut thread_rng();\n        let replica_id: PedersenDomain = PedersenDomain::random(rng);\n        let seed: [u8; 32] = rng.gen();\n        let partitions = 5;\n        let layers = 100;\n        let total_challenges = n * partitions;\n\n        for _layer in 1..=layers {\n            let one_partition_challenges = LayerChallenges::new(layers, total_challenges).derive(\n                leaves,\n                &replica_id,\n                &seed,\n                0,\n            );\n            let many_partition_challenges = (0..partitions)\n                .flat_map(|k| {\n                    LayerChallenges::new(layers, n).derive(leaves, &replica_id, &seed, k as u8)\n                })\n                .collect::<Vec<_>>();\n\n            assert_eq!(one_partition_challenges, many_partition_challenges);\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/column.rs",
    "content": "use std::marker::PhantomData;\n\nuse paired::bls12_381::Fr;\nuse serde::{Deserialize, Serialize};\nuse storage_proofs_core::{\n    error::Result,\n    hasher::Hasher,\n    merkle::{MerkleTreeTrait, Store},\n};\n\nuse super::{column_proof::ColumnProof, hash::hash_single_column};\n\n#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]\npub struct Column<H: Hasher> {\n    pub(crate) index: u32,\n    pub(crate) rows: Vec<H::Domain>,\n    _h: PhantomData<H>,\n}\n\nimpl<H: Hasher> Column<H> {\n    pub fn new(index: u32, rows: Vec<H::Domain>) -> Result<Self> {\n        Ok(Column {\n            index,\n            rows,\n            _h: PhantomData,\n        })\n    }\n\n    pub fn with_capacity(index: u32, capacity: usize) -> Result<Self> {\n        Column::new(index, Vec::with_capacity(capacity))\n    }\n\n    pub fn rows(&self) -> &[H::Domain] {\n        &self.rows\n    }\n\n    pub fn index(&self) -> u32 {\n        self.index\n    }\n\n    /// Calculate the column hashes `C_i = H(E_i, O_i)` for the passed in column.\n    pub fn hash(&self) -> Fr {\n        hash_single_column(\n            &self\n                .rows\n                .iter()\n                .copied()\n                .map(Into::into)\n                .collect::<Vec<_>>(),\n        )\n    }\n\n    pub fn get_node_at_layer(&self, layer: usize) -> Result<&H::Domain> {\n        assert!(layer > 0, \"layer must be greater than 0\");\n        let row_index = layer - 1;\n\n        Ok(&self.rows[row_index])\n    }\n\n    /// Create a column proof for this column.\n    pub fn into_proof<S: Store<H::Domain>, Tree: MerkleTreeTrait<Hasher = H, Store = S>>(\n        self,\n        tree_c: &Tree,\n    ) -> Result<ColumnProof<Tree::Proof>> {\n        let inclusion_proof = tree_c.gen_proof(self.index() as usize)?;\n        ColumnProof::<Tree::Proof>::from_column(self, inclusion_proof)\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/column_proof.rs",
    "content": "use log::trace;\nuse paired::bls12_381::Fr;\nuse serde::{Deserialize, Serialize};\nuse storage_proofs_core::{error::Result, hasher::Hasher, merkle::MerkleProofTrait};\n\nuse super::column::Column;\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ColumnProof<Proof: MerkleProofTrait> {\n    #[serde(bound(\n        serialize = \"Column<Proof::Hasher>: Serialize\",\n        deserialize = \"Column<Proof::Hasher>: Deserialize<'de>\"\n    ))]\n    pub(crate) column: Column<Proof::Hasher>,\n    #[serde(bound(\n        serialize = \"Proof: Serialize\",\n        deserialize = \"Proof: serde::de::DeserializeOwned\"\n    ))]\n    pub(crate) inclusion_proof: Proof,\n}\n\nimpl<Proof: MerkleProofTrait> ColumnProof<Proof> {\n    pub fn from_column(column: Column<Proof::Hasher>, inclusion_proof: Proof) -> Result<Self> {\n        Ok(ColumnProof {\n            column,\n            inclusion_proof,\n        })\n    }\n\n    pub fn root(&self) -> <Proof::Hasher as Hasher>::Domain {\n        self.inclusion_proof.root()\n    }\n\n    fn column(&self) -> &Column<Proof::Hasher> {\n        &self.column\n    }\n\n    pub fn get_node_at_layer(&self, layer: usize) -> Result<&<Proof::Hasher as Hasher>::Domain> {\n        self.column().get_node_at_layer(layer)\n    }\n\n    pub fn column_hash(&self) -> Fr {\n        self.column.hash()\n    }\n\n    pub fn verify(\n        &self,\n        challenge: u32,\n        expected_root: &<Proof::Hasher as Hasher>::Domain,\n    ) -> bool {\n        let c_i = self.column_hash();\n\n        check_eq!(&self.inclusion_proof.root(), expected_root);\n        check!(self.inclusion_proof.validate_data(c_i.into()));\n        check!(self.inclusion_proof.validate(challenge as usize));\n\n        true\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/create_label/mod.rs",
    "content": "use anyhow::Context;\nuse log::*;\nuse merkletree::merkle::Element;\nuse merkletree::store::StoreConfig;\nuse storage_proofs_core::{\n    cache_key::CacheKey, drgraph::Graph, error::Result, hasher::Hasher, merkle::MerkleTreeTrait,\n};\n\nuse crate::stacked::vanilla::{proof::LayerState, StackedBucketGraph};\n\npub mod multi;\npub mod single;\n\n/// Prepares the necessary `StoreConfig`s with which the layers are stored.\n/// Also checks for already existing layers and marks them as such.\npub fn prepare_layers<Tree: 'static + MerkleTreeTrait>(\n    graph: &StackedBucketGraph<Tree::Hasher>,\n    config: &StoreConfig,\n    layers: usize,\n) -> Vec<LayerState> {\n    let label_configs = (1..=layers).map(|layer| {\n        StoreConfig::from_config(&config, CacheKey::label_layer(layer), Some(graph.size()))\n    });\n\n    let mut states = Vec::with_capacity(layers);\n    for (layer, label_config) in (1..=layers).zip(label_configs) {\n        // Clear possible left over tmp files\n        remove_tmp_layer(&label_config);\n\n        // Check if this layer is already on disk\n        let generated = is_layer_written::<Tree>(graph, &label_config).unwrap_or_default();\n        if generated {\n            // succesful load\n            info!(\"found valid labels for layer {}\", layer);\n        }\n\n        states.push(LayerState {\n            config: label_config,\n            generated,\n        });\n    }\n\n    states\n}\n\n/// Stores a layer atomically on disk, by writing first to `.tmp` and then renaming.\npub fn write_layer(data: &[u8], config: &StoreConfig) -> Result<()> {\n    let data_path = StoreConfig::data_path(&config.path, &config.id);\n    let tmp_data_path = data_path.with_extension(\".tmp\");\n\n    if let Some(parent) = data_path.parent() {\n        std::fs::create_dir_all(parent).context(\"failed to create parent directories\")?;\n    }\n    std::fs::write(&tmp_data_path, data).context(\"failed to write layer data\")?;\n    std::fs::rename(tmp_data_path, data_path).context(\"failed to rename tmp data\")?;\n\n    Ok(())\n}\n\n/// Reads a layer from disk, into the provided slice.\npub fn read_layer(config: &StoreConfig, mut data: &mut [u8]) -> Result<()> {\n    let data_path = StoreConfig::data_path(&config.path, &config.id);\n    let file = std::fs::File::open(data_path).context(\"failed to open layer\")?;\n    let mut buffered = std::io::BufReader::new(file);\n    std::io::copy(&mut buffered, &mut data).context(\"failed to read layer\")?;\n\n    Ok(())\n}\n\npub fn remove_tmp_layer(config: &StoreConfig) {\n    let data_path = StoreConfig::data_path(&config.path, &config.id);\n    let tmp_data_path = data_path.with_extension(\".tmp\");\n    if tmp_data_path.exists() {\n        if let Err(err) = std::fs::remove_file(tmp_data_path) {\n            warn!(\"failed to delete tmp file: {}\", err);\n        }\n    }\n}\n\n/// Checks if the given layer is already written and of the right size.\npub fn is_layer_written<Tree: 'static + MerkleTreeTrait>(\n    graph: &StackedBucketGraph<Tree::Hasher>,\n    config: &StoreConfig,\n) -> Result<bool> {\n    let data_path = StoreConfig::data_path(&config.path, &config.id);\n    if !data_path.exists() {\n        return Ok(false);\n    }\n\n    let file = std::fs::File::open(&data_path)?;\n    let metadata = file.metadata()?;\n    let file_size = metadata.len() as usize;\n\n    if file_size != graph.size() * <Tree::Hasher as Hasher>::Domain::byte_len() {\n        return Ok(false);\n    }\n\n    Ok(true)\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/create_label/multi.rs",
    "content": "use std::convert::TryInto;\nuse std::marker::PhantomData;\nuse std::mem::size_of;\nuse std::sync::atomic::{AtomicU64, Ordering::SeqCst};\n\nuse anyhow::{Context, Result};\nuse byte_slice_cast::*;\nuse crossbeam::thread;\nuse digest::generic_array::{\n    typenum::{Unsigned, U64},\n    GenericArray,\n};\nuse log::*;\nuse mapr::MmapMut;\nuse merkletree::store::{DiskStore, StoreConfig};\nuse storage_proofs_core::{\n    cache_key::CacheKey,\n    drgraph::{Graph, BASE_DEGREE},\n    hasher::Hasher,\n    merkle::*,\n    settings,\n    util::NODE_SIZE,\n};\n\nuse super::super::{\n    cache::ParentCache,\n    graph::{StackedBucketGraph, DEGREE, EXP_DEGREE},\n    memory_handling::{setup_create_label_memory, CacheReader},\n    params::{Labels, LabelsCache},\n    proof::LayerState,\n    utils::*,\n};\n\nconst NODE_WORDS: usize = NODE_SIZE / size_of::<u32>();\n\nconst SHA256_INITIAL_DIGEST: [u32; 8] = [\n    0x6a09_e667,\n    0xbb67_ae85,\n    0x3c6_ef372,\n    0xa54f_f53a,\n    0x510e_527f,\n    0x9b05_688c,\n    0x1f83_d9ab,\n    0x5be0_cd19,\n];\n\n#[inline]\nfn fill_buffer(\n    cur_node: u64,\n    cur_consumer: &AtomicU64,\n    mut cur_parent: &[u32], // parents for this node\n    layer_labels: &UnsafeSlice<u32>,\n    exp_labels: Option<&UnsafeSlice<u32>>, // None for layer0\n    buf: &mut [u8],\n    base_parent_missing: &mut BitMask,\n) {\n    const MIN_BASE_PARENT_NODE: u64 = 2000;\n\n    let cur_node_swap = cur_node.to_be_bytes(); // Note switch to big endian\n    buf[36..44].copy_from_slice(&cur_node_swap); // update buf with current node\n\n    // Perform the first hash\n    let cur_node_ptr =\n        unsafe { &mut layer_labels.as_mut_slice()[cur_node as usize * NODE_WORDS as usize..] };\n\n    cur_node_ptr[..8].copy_from_slice(&SHA256_INITIAL_DIGEST);\n    compress256!(cur_node_ptr, buf, 1);\n\n    // Fill in the base parents\n    // Node 5 (prev node) will always be missing, and there tend to be\n    // frequent close references.\n    if cur_node > MIN_BASE_PARENT_NODE {\n        // Mark base parent 5 as missing\n        // base_parent_missing.set_all(0x20);\n        base_parent_missing.set(5);\n\n        // Skip the last base parent - it always points to the preceding node,\n        // which we know is not ready and will be filled in the main loop\n        for k in 0..BASE_DEGREE - 1 {\n            if cur_parent[0] as u64 >= cur_consumer.load(SeqCst) {\n                // Node is not ready\n                base_parent_missing.set(k);\n            } else {\n                let parent_data = unsafe {\n                    let offset = cur_parent[0] as usize * NODE_WORDS;\n                    &layer_labels.as_slice()[offset..offset + NODE_WORDS]\n                };\n                let a = 64 + (NODE_SIZE * k);\n                buf[a..a + NODE_SIZE].copy_from_slice(parent_data.as_byte_slice());\n            }\n            cur_parent = &cur_parent[1..];\n        }\n        // Advance pointer for the last base parent\n        cur_parent = &cur_parent[1..];\n    } else {\n        base_parent_missing.set_upto(BASE_DEGREE as u8); // (1 << BASE_DEGREE) - 1);\n        cur_parent = &cur_parent[BASE_DEGREE..];\n    }\n\n    if let Some(exp_labels) = exp_labels {\n        // Read from each of the expander parent nodes\n        for k in BASE_DEGREE..DEGREE {\n            let parent_data = unsafe {\n                let offset = cur_parent[0] as usize * NODE_WORDS;\n                &exp_labels.as_slice()[offset..offset + NODE_WORDS]\n            };\n            let a = 64 + (NODE_SIZE * k);\n            buf[a..a + NODE_SIZE].copy_from_slice(parent_data.as_byte_slice());\n            cur_parent = &cur_parent[1..];\n        }\n    }\n}\n\n// This implements a producer, i.e. a thread that pre-fills the buffer\n// with parent node data.\n// - cur_consumer - The node currently being processed (consumed) by the\n//                  hashing thread\n// - cur_producer - The next node to be filled in by producer threads. The\n//                  hashing thread can not yet work on this node.\n// - cur_awaiting - The first not not currently being filled by any producer\n//                  thread.\n// - stride       - Each producer fills in this many nodes at a time. Setting\n//                  this too small with cause a lot of time to be spent in\n//                  thread synchronization\n// - lookahead    - ring_buf size, in nodes\n// - base_parent_missing - Bit mask of any base parent nodes that could not\n//                         be filled in. This is an array of size lookahead.\n// - is_layer0    - Indicates first (no expander parents) or subsequent layer\n#[allow(clippy::too_many_arguments)]\nfn create_label_runner(\n    parents_cache: &CacheReader<u32>,\n    layer_labels: &UnsafeSlice<u32>,\n    exp_labels: Option<&UnsafeSlice<u32>>, // None for layer 0\n    num_nodes: u64,\n    cur_consumer: &AtomicU64,\n    cur_producer: &AtomicU64,\n    cur_awaiting: &AtomicU64,\n    stride: u64,\n    lookahead: u64,\n    ring_buf: &RingBuf,\n    base_parent_missing: &UnsafeSlice<BitMask>,\n) -> Result<()> {\n    info!(\"created label runner\");\n    // Label data bytes per node\n    loop {\n        // Get next work items\n        let work = cur_awaiting.fetch_add(stride, SeqCst);\n        if work >= num_nodes {\n            break;\n        }\n        let count = if work + stride > num_nodes {\n            num_nodes - work\n        } else {\n            stride\n        };\n        // info!(\n        //     \"starting work on count items: {}, starting from {}\",\n        //     count, work\n        // );\n\n        // Do the work of filling the buffers\n        for cur_node in work..work + count {\n            // Determine which node slot in the ring_buffer to use\n            // Note that node 0 does not use a buffer slot\n            let cur_slot = (cur_node - 1) % lookahead;\n\n            // Don't overrun the buffer\n            while cur_node > (cur_consumer.load(SeqCst) + lookahead - 1) {\n                std::thread::sleep(std::time::Duration::from_micros(10));\n            }\n\n            let buf = unsafe { ring_buf.slot_mut(cur_slot as usize) };\n            let bpm = unsafe { base_parent_missing.get_mut(cur_slot as usize) };\n\n            let pc = parents_cache.slice_at(cur_node as usize * DEGREE as usize, cur_consumer);\n            // info!(\"filling\");\n            fill_buffer(\n                cur_node,\n                cur_consumer,\n                pc,\n                &layer_labels,\n                exp_labels,\n                buf,\n                bpm,\n            );\n            // info!(\"filled\");\n        }\n\n        // Wait for the previous node to finish\n        while work > (cur_producer.load(SeqCst) + 1) {\n            std::thread::sleep(std::time::Duration::from_micros(10));\n        }\n\n        // Mark our work as done\n        cur_producer.fetch_add(count, SeqCst);\n    }\n\n    Ok(())\n}\n\nfn create_layer_labels(\n    parents_cache: &CacheReader<u32>,\n    replica_id: &[u8],\n    layer_labels: &mut MmapMut,\n    exp_labels: Option<&mut MmapMut>,\n    num_nodes: u64,\n    cur_layer: u32,\n) -> Result<()> {\n    info!(\"Creating labels for layer {}\", cur_layer);\n    // num_producers is the number of producer threads\n    let (lookahead, num_producers, producer_stride) = {\n        // NOTE: Stride must not exceed `sdr_parents_cache_window_nodes`.\n        // If it does, the process will deadlock with producers and consumers\n        // waiting for each other.\n        // TODO: Enforce this.\n        //(800, 1, 128)\n        (800, 2, 128)\n    };\n\n    const BYTES_PER_NODE: usize = (NODE_SIZE * DEGREE) + 64;\n\n    let mut ring_buf = RingBuf::new(BYTES_PER_NODE, lookahead);\n    let mut base_parent_missing = vec![BitMask::default(); lookahead];\n\n    // Fill in the fixed portion of all buffers\n    for buf in ring_buf.iter_slot_mut() {\n        prepare_block(replica_id, cur_layer, buf);\n    }\n\n    // Node the consumer is currently working on\n    let cur_consumer = AtomicU64::new(0);\n    // Highest node that is ready from the producer\n    let cur_producer = AtomicU64::new(0);\n    // Next node to be filled\n    let cur_awaiting = AtomicU64::new(1);\n\n    // These UnsafeSlices are managed through the 3 Atomics above, to minimize any locking overhead.\n    let layer_labels = UnsafeSlice::from_slice(layer_labels.as_mut_slice_of::<u32>().unwrap());\n    let exp_labels =\n        exp_labels.map(|m| UnsafeSlice::from_slice(m.as_mut_slice_of::<u32>().unwrap()));\n    let base_parent_missing = UnsafeSlice::from_slice(&mut base_parent_missing);\n\n    thread::scope(|s| {\n        let mut runners = Vec::with_capacity(num_producers);\n\n        for _i in 0..num_producers {\n            let layer_labels = &layer_labels;\n            let exp_labels = exp_labels.as_ref();\n            let cur_consumer = &cur_consumer;\n            let cur_producer = &cur_producer;\n            let cur_awaiting = &cur_awaiting;\n            let ring_buf = &ring_buf;\n            let base_parent_missing = &base_parent_missing;\n\n            runners.push(s.spawn(move |_| {\n                create_label_runner(\n                    parents_cache,\n                    layer_labels,\n                    exp_labels,\n                    num_nodes,\n                    cur_consumer,\n                    cur_producer,\n                    cur_awaiting,\n                    producer_stride,\n                    lookahead as u64,\n                    ring_buf,\n                    base_parent_missing,\n                )\n            }));\n        }\n\n        let mut cur_node_ptr = unsafe { layer_labels.as_mut_slice() };\n        let mut cur_parent_ptr = parents_cache.consumer_slice_at(DEGREE);\n        let mut cur_parent_ptr_offset = DEGREE;\n\n        // Calculate node 0 (special case with no parents)\n        // Which is replica_id || cur_layer || 0\n        // TODO - Hash and save intermediate result: replica_id || cur_layer\n        let mut buf = [0u8; (NODE_SIZE * DEGREE) + 64];\n        prepare_block(replica_id, cur_layer, &mut buf);\n\n        cur_node_ptr[..8].copy_from_slice(&SHA256_INITIAL_DIGEST);\n        compress256!(cur_node_ptr, buf, 2);\n\n        // Fix endianess\n        cur_node_ptr[..8].iter_mut().for_each(|x| *x = x.to_be());\n\n        cur_node_ptr[7] &= 0x3FFF_FFFF; // Strip last two bits to ensure in Fr\n\n        // Keep track of which node slot in the ring_buffer to use\n        let mut cur_slot = 0;\n        let mut _count_not_ready = 0;\n\n        // Calculate nodes 1 to n\n        cur_consumer.store(1, SeqCst);\n        let mut i = 1;\n        while i < num_nodes {\n            // Ensure next buffer is ready\n            let mut printed = false;\n            let mut producer_val = cur_producer.load(SeqCst);\n\n            while producer_val < i {\n                if !printed {\n                    debug!(\"PRODUCER NOT READY! {}\", i);\n                    printed = true;\n                    _count_not_ready += 1;\n                }\n                std::thread::sleep(std::time::Duration::from_micros(10));\n                producer_val = cur_producer.load(SeqCst);\n            }\n\n            // Process as many nodes as are ready\n            let ready_count = producer_val - i + 1;\n            for _count in 0..ready_count {\n                cur_node_ptr = &mut cur_node_ptr[8..];\n                // Grab the current slot of the ring_buf\n                let buf = unsafe { ring_buf.slot_mut(cur_slot) };\n                // Fill in the base parents\n                for k in 0..BASE_DEGREE {\n                    let bpm = unsafe { base_parent_missing.get(cur_slot) };\n                    if bpm.get(k) {\n                        // info!(\"getting missing parent, k={}\", k);\n                        let source = unsafe {\n                            if cur_parent_ptr.is_empty() {\n                                cur_parent_ptr =\n                                    parents_cache.consumer_slice_at(cur_parent_ptr_offset);\n                            }\n                            // info!(\"after unsafe, when getting miss parent\");\n                            let start = cur_parent_ptr[0] as usize * NODE_WORDS;\n                            let end = start + NODE_WORDS;\n\n                            // info!(\"before as_slice(), when getting miss parent\");\n                            &layer_labels.as_slice()[start..end]\n                        };\n\n                        buf[64 + (NODE_SIZE * k)..64 + (NODE_SIZE * (k + 1))]\n                            .copy_from_slice(source.as_byte_slice());\n                        // info!(\"got missing parent, k={}\", k);\n                    }\n                    cur_parent_ptr = &cur_parent_ptr[1..];\n                    cur_parent_ptr_offset += 1;\n                }\n\n                // Expanders are already all filled in (layer 1 doesn't use expanders)\n                cur_parent_ptr = &cur_parent_ptr[EXP_DEGREE..];\n                cur_parent_ptr_offset += EXP_DEGREE;\n\n                if cur_layer == 1 {\n                    // Six rounds of all base parents\n                    for _j in 0..6 {\n                        compress256!(cur_node_ptr, &buf[64..], 3);\n                    }\n\n                    // round 7 is only first parent\n                    memset(&mut buf[96..128], 0); // Zero out upper half of last block\n                    buf[96] = 0x80; // Padding\n                    buf[126] = 0x27; // Length (0x2700 = 9984 bits -> 1248 bytes)\n                    compress256!(cur_node_ptr, &buf[64..], 1);\n                } else {\n                    // Two rounds of all parents\n                    let blocks = [\n                        *GenericArray::<u8, U64>::from_slice(&buf[64..128]),\n                        *GenericArray::<u8, U64>::from_slice(&buf[128..192]),\n                        *GenericArray::<u8, U64>::from_slice(&buf[192..256]),\n                        *GenericArray::<u8, U64>::from_slice(&buf[256..320]),\n                        *GenericArray::<u8, U64>::from_slice(&buf[320..384]),\n                        *GenericArray::<u8, U64>::from_slice(&buf[384..448]),\n                        *GenericArray::<u8, U64>::from_slice(&buf[448..512]),\n                    ];\n                    sha2::compress256((&mut cur_node_ptr[..8]).try_into().unwrap(), &blocks);\n                    sha2::compress256((&mut cur_node_ptr[..8]).try_into().unwrap(), &blocks);\n\n                    // Final round is only nine parents\n                    memset(&mut buf[352..384], 0); // Zero out upper half of last block\n                    buf[352] = 0x80; // Padding\n                    buf[382] = 0x27; // Length (0x2700 = 9984 bits -> 1248 bytes)\n                    compress256!(cur_node_ptr, &buf[64..], 5);\n                }\n\n                // Fix endianess\n                cur_node_ptr[..8].iter_mut().for_each(|x| *x = x.to_be());\n\n                cur_node_ptr[7] &= 0x3FFF_FFFF; // Strip last two bits to fit in Fr\n\n                cur_consumer.fetch_add(1, SeqCst);\n                i += 1;\n                cur_slot = (cur_slot + 1) % lookahead;\n            }\n        }\n\n        for runner in runners {\n            runner.join().unwrap().unwrap();\n        }\n    })\n    .unwrap();\n\n    Ok(())\n}\n\n#[allow(clippy::type_complexity)]\npub fn create_labels_for_encoding<Tree: 'static + MerkleTreeTrait, T: AsRef<[u8]>>(\n    graph: &StackedBucketGraph<Tree::Hasher>,\n    parents_cache: &ParentCache,\n    layers: usize,\n    replica_id: T,\n    config: StoreConfig,\n) -> Result<(Labels<Tree>, Vec<LayerState>)> {\n    info!(\"create labels\");\n\n    let layer_states = super::prepare_layers::<Tree>(graph, &config, layers);\n\n    let sector_size = graph.size() * NODE_SIZE;\n    let node_count = graph.size() as u64;\n    let cache_window_nodes = settings::SETTINGS\n        .lock()\n        .expect(\"sdr_parents_cache_size settings lock failure\")\n        .sdr_parents_cache_size as usize;\n\n    let default_cache_size = DEGREE * 4 * cache_window_nodes;\n\n    // NOTE: this means we currently keep 2x sector size around, to improve speed\n    let (parents_cache, mut layer_labels, mut exp_labels) = setup_create_label_memory(\n        sector_size,\n        DEGREE,\n        Some(default_cache_size as usize),\n        &parents_cache.path,\n    )?;\n\n    for (layer, layer_state) in (1..=layers).zip(layer_states.iter()) {\n        info!(\"Layer {}\", layer);\n\n        if layer_state.generated {\n            info!(\"skipping layer {}, already generated\", layer);\n\n            // load the already generated layer into exp_labels\n            super::read_layer(&layer_state.config, &mut exp_labels)?;\n            continue;\n        }\n\n        // Cache reset happens in two parts.\n        // The second part (the finish) happens before each layer but the first.\n        if layers != 1 {\n            parents_cache.finish_reset()?;\n        }\n        create_layer_labels(\n            &parents_cache,\n            &replica_id.as_ref(),\n            &mut layer_labels,\n            if layer == 1 {\n                None\n            } else {\n                Some(&mut exp_labels)\n            },\n            node_count,\n            layer as u32,\n        )?;\n\n        // Cache reset happens in two parts.\n        // The first part (the start) happens after each layer but the last.\n        if layer != layers {\n            parents_cache.start_reset()?;\n        }\n\n        {\n            let layer_config = &layer_state.config;\n\n            info!(\"  storing labels on disk\");\n            super::write_layer(&layer_labels, layer_config).context(\"failed to store labels\")?;\n\n            info!(\n                \"  generated layer {} store with id {}\",\n                layer, layer_config.id\n            );\n\n            std::mem::swap(&mut layer_labels, &mut exp_labels);\n        }\n    }\n\n    Ok((\n        Labels::<Tree> {\n            labels: layer_states.iter().map(|s| s.config.clone()).collect(),\n            _h: PhantomData,\n        },\n        layer_states,\n    ))\n}\n\n#[allow(clippy::type_complexity)]\npub fn create_labels_for_decoding<Tree: 'static + MerkleTreeTrait, T: AsRef<[u8]>>(\n    graph: &StackedBucketGraph<Tree::Hasher>,\n    parents_cache: &ParentCache,\n    layers: usize,\n    replica_id: T,\n    config: StoreConfig,\n) -> Result<LabelsCache<Tree>> {\n    info!(\"create labels\");\n\n    // For now, we require it due to changes in encodings structure.\n    let mut labels: Vec<DiskStore<<Tree::Hasher as Hasher>::Domain>> = Vec::with_capacity(layers);\n    let mut label_configs: Vec<StoreConfig> = Vec::with_capacity(layers);\n\n    let sector_size = graph.size() * NODE_SIZE;\n    let node_count = graph.size() as u64;\n    let cache_window_nodes = (settings::SETTINGS\n        .lock()\n        .expect(\"sdr_parents_cache_window_nodes settings lock failure\")\n        .sdr_parents_cache_size\n        / 2) as usize;\n\n    let default_cache_size = DEGREE * 4 * cache_window_nodes;\n\n    // NOTE: this means we currently keep 2x sector size around, to improve speed\n    let (parents_cache, mut layer_labels, mut exp_labels) = setup_create_label_memory(\n        sector_size,\n        DEGREE,\n        Some(default_cache_size as usize),\n        &parents_cache.path,\n    )?;\n\n    for layer in 1..=layers {\n        info!(\"Layer {}\", layer);\n\n        // Cache reset happens in two parts.\n        // The second part (the finish) happens before each layer but the first.\n        if layers != 1 {\n            parents_cache.finish_reset()?;\n        }\n        create_layer_labels(\n            &parents_cache,\n            &replica_id.as_ref(),\n            &mut layer_labels,\n            if layer == 1 {\n                None\n            } else {\n                Some(&mut exp_labels)\n            },\n            node_count,\n            layer as u32,\n        )?;\n\n        // Cache reset happens in two parts.\n        // The first part (the start) happens after each layer but the last.\n        if layer != layers {\n            parents_cache.start_reset()?;\n        }\n\n        {\n            let layer_config =\n                StoreConfig::from_config(&config, CacheKey::label_layer(layer), Some(graph.size()));\n\n            info!(\"  storing labels on disk\");\n            // Construct and persist the layer data.\n            let layer_store: DiskStore<<Tree::Hasher as Hasher>::Domain> =\n                DiskStore::new_from_slice_with_config(\n                    graph.size(),\n                    Tree::Arity::to_usize(),\n                    &layer_labels,\n                    layer_config.clone(),\n                )?;\n            info!(\n                \"  generated layer {} store with id {}\",\n                layer, layer_config.id\n            );\n\n            std::mem::swap(&mut layer_labels, &mut exp_labels);\n\n            // Track the layer specific store and StoreConfig for later retrieval.\n            labels.push(layer_store);\n            label_configs.push(layer_config);\n        }\n    }\n    assert_eq!(\n        labels.len(),\n        layers,\n        \"Invalid amount of layers encoded expected\"\n    );\n\n    Ok(LabelsCache::<Tree> { labels })\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use ff::PrimeField;\n    use generic_array::typenum::{U0, U2, U8};\n    use paired::bls12_381::{Fr, FrRepr};\n    use storage_proofs_core::hasher::poseidon::PoseidonHasher;\n\n    #[test]\n    fn test_create_labels() {\n        let layers = 11;\n        let nodes_2k = 1 << 11;\n        let nodes_4k = 1 << 12;\n        let replica_id = [9u8; 32];\n        let porep_id = [123; 32];\n        test_create_labels_aux(\n            nodes_2k,\n            layers,\n            replica_id,\n            porep_id,\n            Fr::from_repr(FrRepr([\n                0x1a4017052cbe1c4a,\n                0x446354db91e96d8e,\n                0xbc864a95454eba0c,\n                0x094cf219d72cad06,\n            ]))\n            .unwrap(),\n        );\n\n        test_create_labels_aux(\n            nodes_4k,\n            layers,\n            replica_id,\n            porep_id,\n            Fr::from_repr(FrRepr([\n                0x0a6917a59c51198b,\n                0xd2edc96e3717044a,\n                0xf438a1131f907206,\n                0x084f42888ca2342c,\n            ]))\n            .unwrap(),\n        );\n    }\n\n    fn test_create_labels_aux(\n        sector_size: usize,\n        layers: usize,\n        replica_id: [u8; 32],\n        porep_id: [u8; 32],\n        expected_last_label: Fr,\n    ) {\n        let nodes = sector_size / NODE_SIZE;\n\n        let cache_dir = tempfile::tempdir().expect(\"tempdir failure\");\n        let config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            nodes.trailing_zeros() as usize,\n        );\n\n        let graph = StackedBucketGraph::<PoseidonHasher>::new(\n            None,\n            nodes,\n            BASE_DEGREE,\n            EXP_DEGREE,\n            porep_id,\n        )\n        .unwrap();\n        let cache = graph.parent_cache().unwrap();\n\n        let labels = create_labels_for_decoding::<LCTree<PoseidonHasher, U8, U0, U2>, _>(\n            &graph, &cache, layers, replica_id, config,\n        )\n        .unwrap();\n\n        let final_labels = labels.labels_for_last_layer().unwrap();\n        let last_label = final_labels.read_at(final_labels.len() - 1).unwrap();\n        dbg!(&last_label);\n        assert_eq!(expected_last_label.into_repr(), last_label.0);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/create_label/single.rs",
    "content": "use sha2raw::Sha256;\nuse std::marker::PhantomData;\n\nuse anyhow::{Context, Result};\nuse generic_array::typenum::Unsigned;\nuse log::*;\nuse merkletree::store::{DiskStore, StoreConfig};\nuse storage_proofs_core::{\n    drgraph::Graph,\n    hasher::Hasher,\n    merkle::*,\n    util::{data_at_node_offset, NODE_SIZE},\n};\n\nuse super::super::{\n    cache::ParentCache, proof::LayerState, Labels, LabelsCache, StackedBucketGraph,\n};\n\n#[allow(clippy::type_complexity)]\npub fn create_labels_for_encoding<Tree: 'static + MerkleTreeTrait, T: AsRef<[u8]>>(\n    graph: &StackedBucketGraph<Tree::Hasher>,\n    parents_cache: &mut ParentCache,\n    layers: usize,\n    replica_id: T,\n    config: StoreConfig,\n) -> Result<(Labels<Tree>, Vec<LayerState>)> {\n    info!(\"generate labels\");\n\n    let layer_states = super::prepare_layers::<Tree>(graph, &config, layers);\n\n    let layer_size = graph.size() * NODE_SIZE;\n    // NOTE: this means we currently keep 2x sector size around, to improve speed.\n    let mut layer_labels = vec![0u8; layer_size]; // Buffer for labels of the current layer\n    let mut exp_labels = vec![0u8; layer_size]; // Buffer for labels of the previous layer, needed for expander parents\n\n    for (layer, layer_state) in (1..=layers).zip(layer_states.iter()) {\n        info!(\"generating layer: {}\", layer);\n        if layer_state.generated {\n            info!(\"skipping layer {}, already generated\", layer);\n\n            // load the already generated layer into exp_labels\n            super::read_layer(&layer_state.config, &mut exp_labels)?;\n            continue;\n        }\n\n        parents_cache.reset()?;\n\n        if layer == 1 {\n            for node in 0..graph.size() {\n                create_label(\n                    graph,\n                    Some(parents_cache),\n                    &replica_id,\n                    &mut layer_labels,\n                    layer,\n                    node,\n                )?;\n            }\n        } else {\n            for node in 0..graph.size() {\n                create_label_exp(\n                    graph,\n                    Some(parents_cache),\n                    &replica_id,\n                    &exp_labels,\n                    &mut layer_labels,\n                    layer,\n                    node,\n                )?;\n            }\n        }\n\n        // Write the result to disk to avoid keeping it in memory all the time.\n        let layer_config = &layer_state.config;\n\n        info!(\"  storing labels on disk\");\n        super::write_layer(&layer_labels, layer_config).context(\"failed to store labels\")?;\n\n        info!(\n            \"  generated layer {} store with id {}\",\n            layer, layer_config.id\n        );\n\n        info!(\"  setting exp parents\");\n        std::mem::swap(&mut layer_labels, &mut exp_labels);\n    }\n\n    Ok((\n        Labels::<Tree> {\n            labels: layer_states.iter().map(|s| s.config.clone()).collect(),\n            _h: PhantomData,\n        },\n        layer_states,\n    ))\n}\n\n#[allow(clippy::type_complexity)]\npub fn create_labels_for_decoding<Tree: 'static + MerkleTreeTrait, T: AsRef<[u8]>>(\n    graph: &StackedBucketGraph<Tree::Hasher>,\n    parents_cache: &mut ParentCache,\n    layers: usize,\n    replica_id: T,\n    config: StoreConfig,\n) -> Result<LabelsCache<Tree>> {\n    info!(\"generate labels\");\n\n    // For now, we require it due to changes in encodings structure.\n    let mut labels: Vec<DiskStore<<Tree::Hasher as Hasher>::Domain>> = Vec::with_capacity(layers);\n\n    let layer_size = graph.size() * NODE_SIZE;\n    // NOTE: this means we currently keep 2x sector size around, to improve speed.\n    let mut layer_labels = vec![0u8; layer_size]; // Buffer for labels of the current layer\n    let mut exp_labels = vec![0u8; layer_size]; // Buffer for labels of the previous layer, needed for expander parents\n\n    for layer in 1..=layers {\n        info!(\"generating layer: {}\", layer);\n\n        parents_cache.reset()?;\n\n        if layer == 1 {\n            for node in 0..graph.size() {\n                create_label(\n                    graph,\n                    Some(parents_cache),\n                    &replica_id,\n                    &mut layer_labels,\n                    layer,\n                    node,\n                )?;\n            }\n        } else {\n            for node in 0..graph.size() {\n                create_label_exp(\n                    graph,\n                    Some(parents_cache),\n                    &replica_id,\n                    &exp_labels,\n                    &mut layer_labels,\n                    layer,\n                    node,\n                )?;\n            }\n        }\n\n        // Write the result to disk to avoid keeping it in memory all the time.\n        info!(\"  storing labels on disk\");\n        super::write_layer(&layer_labels, &config)?;\n\n        let layer_store: DiskStore<<Tree::Hasher as Hasher>::Domain> =\n            DiskStore::new_from_disk(graph.size(), Tree::Arity::to_usize(), &config)?;\n        info!(\"  generated layer {} store with id {}\", layer, config.id);\n\n        info!(\"  setting exp parents\");\n        std::mem::swap(&mut layer_labels, &mut exp_labels);\n\n        // Track the layer specific store and StoreConfig for later retrieval.\n        labels.push(layer_store);\n    }\n\n    assert_eq!(\n        labels.len(),\n        layers,\n        \"Invalid amount of layers encoded expected\"\n    );\n\n    Ok(LabelsCache::<Tree> { labels })\n}\n\npub fn create_label<H: Hasher, T: AsRef<[u8]>>(\n    graph: &StackedBucketGraph<H>,\n    cache: Option<&mut ParentCache>,\n    replica_id: T,\n    layer_labels: &mut [u8],\n    layer_index: usize,\n    node: usize,\n) -> Result<()> {\n    let mut hasher = Sha256::new();\n    let mut buffer = [0u8; 32];\n\n    buffer[..4].copy_from_slice(&(layer_index as u32).to_be_bytes());\n    buffer[4..12].copy_from_slice(&(node as u64).to_be_bytes());\n    hasher.input(&[replica_id.as_ref(), &buffer[..]][..]);\n\n    // hash parents for all non 0 nodes\n    let hash = if node > 0 {\n        // prefetch previous node, which is always a parent\n        let prev = &layer_labels[(node - 1) * NODE_SIZE..node * NODE_SIZE];\n        prefetch!(prev.as_ptr() as *const i8);\n\n        graph.copy_parents_data(node as u32, &*layer_labels, hasher, cache)?\n    } else {\n        hasher.finish()\n    };\n\n    // store the newly generated key\n    let start = data_at_node_offset(node);\n    let end = start + NODE_SIZE;\n    layer_labels[start..end].copy_from_slice(&hash[..]);\n\n    // strip last two bits, to ensure result is in Fr.\n    layer_labels[end - 1] &= 0b0011_1111;\n\n    Ok(())\n}\n\npub fn create_label_exp<H: Hasher, T: AsRef<[u8]>>(\n    graph: &StackedBucketGraph<H>,\n    cache: Option<&mut ParentCache>,\n    replica_id: T,\n    exp_parents_data: &[u8],\n    layer_labels: &mut [u8],\n    layer_index: usize,\n    node: usize,\n) -> Result<()> {\n    let mut hasher = Sha256::new();\n    let mut buffer = [0u8; 32];\n\n    buffer[0..4].copy_from_slice(&(layer_index as u32).to_be_bytes());\n    buffer[4..12].copy_from_slice(&(node as u64).to_be_bytes());\n    hasher.input(&[replica_id.as_ref(), &buffer[..]][..]);\n\n    // hash parents for all non 0 nodes\n    let hash = if node > 0 {\n        // prefetch previous node, which is always a parent\n        let prev = &layer_labels[(node - 1) * NODE_SIZE..node * NODE_SIZE];\n        prefetch!(prev.as_ptr() as *const i8);\n\n        graph.copy_parents_data_exp(node as u32, &*layer_labels, exp_parents_data, hasher, cache)?\n    } else {\n        hasher.finish()\n    };\n\n    // store the newly generated key\n    let start = data_at_node_offset(node);\n    let end = start + NODE_SIZE;\n    layer_labels[start..end].copy_from_slice(&hash[..]);\n\n    // strip last two bits, to ensure result is in Fr.\n    layer_labels[end - 1] &= 0b0011_1111;\n\n    Ok(())\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/create_label.rs",
    "content": "#[cfg(target_arch = \"x86\")]\nuse std::arch::x86::*;\n#[cfg(target_arch = \"x86_64\")]\nuse std::arch::x86_64::*;\n\nuse sha2raw::Sha256;\nuse storage_proofs_core::{\n    error::Result,\n    hasher::Hasher,\n    util::{data_at_node_offset, NODE_SIZE},\n};\n\nuse super::graph::StackedBucketGraph;\n\npub fn create_label<H: Hasher>(\n    graph: &StackedBucketGraph<H>,\n    replica_id: &H::Domain,\n    layer_labels: &mut [u8],\n    node: usize,\n) -> Result<()> {\n    let mut hasher = Sha256::new();\n    let mut buffer = [0u8; 32];\n\n    buffer[..8].copy_from_slice(&(node as u64).to_be_bytes());\n    hasher.input(&[AsRef::<[u8]>::as_ref(replica_id), &buffer[..]][..]);\n\n    // hash parents for all non 0 nodes\n    let hash = if node > 0 {\n        // prefetch previous node, which is always a parent\n        let prev = &layer_labels[(node - 1) * NODE_SIZE..node * NODE_SIZE];\n        unsafe {\n            _mm_prefetch(prev.as_ptr() as *const i8, _MM_HINT_T0);\n        }\n\n        graph.copy_parents_data(node as u32, &*layer_labels, hasher)\n    } else {\n        hasher.finish()\n    };\n\n    // store the newly generated key\n    let start = data_at_node_offset(node);\n    let end = start + NODE_SIZE;\n    layer_labels[start..end].copy_from_slice(&hash[..]);\n\n    // strip last two bits, to ensure result is in Fr.\n    layer_labels[end - 1] &= 0b0011_1111;\n\n    Ok(())\n}\n\npub fn create_label_exp<H: Hasher>(\n    graph: &StackedBucketGraph<H>,\n    replica_id: &H::Domain,\n    exp_parents_data: &[u8],\n    layer_labels: &mut [u8],\n    node: usize,\n) -> Result<()> {\n    let mut hasher = Sha256::new();\n    let mut buffer = [0u8; 32];\n\n    buffer[..8].copy_from_slice(&(node as u64).to_be_bytes());\n    hasher.input(&[AsRef::<[u8]>::as_ref(replica_id), &buffer[..]][..]);\n\n    // hash parents for all non 0 nodes\n    let hash = if node > 0 {\n        // prefetch previous node, which is always a parent\n        let prev = &layer_labels[(node - 1) * NODE_SIZE..node * NODE_SIZE];\n        unsafe {\n            _mm_prefetch(prev.as_ptr() as *const i8, _MM_HINT_T0);\n        }\n\n        graph.copy_parents_data_exp(node as u32, &*layer_labels, exp_parents_data, hasher)\n    } else {\n        hasher.finish()\n    };\n\n    // store the newly generated key\n    let start = data_at_node_offset(node);\n    let end = start + NODE_SIZE;\n    layer_labels[start..end].copy_from_slice(&hash[..]);\n\n    // strip last two bits, to ensure result is in Fr.\n    layer_labels[end - 1] &= 0b0011_1111;\n\n    Ok(())\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/encoding_proof.rs",
    "content": "use log::trace;\nuse std::marker::PhantomData;\n\nuse paired::bls12_381::Fr;\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\nuse storage_proofs_core::{fr32::bytes_into_fr_repr_safe, hasher::Hasher};\n\nuse crate::encode::encode;\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct EncodingProof<H: Hasher> {\n    pub(crate) parents: Vec<H::Domain>,\n    pub(crate) layer_index: u32,\n    pub(crate) node: u64,\n    #[serde(skip)]\n    _h: PhantomData<H>,\n}\n\nimpl<H: Hasher> EncodingProof<H> {\n    pub fn new(layer_index: u32, node: u64, parents: Vec<H::Domain>) -> Self {\n        EncodingProof {\n            layer_index,\n            node,\n            parents,\n            _h: PhantomData,\n        }\n    }\n\n    fn create_key(&self, replica_id: &H::Domain) -> H::Domain {\n        let mut hasher = Sha256::new();\n        let mut buffer = [0u8; 64];\n\n        // replica_id\n        buffer[..32].copy_from_slice(AsRef::<[u8]>::as_ref(replica_id));\n\n        // layer index\n        buffer[32..36].copy_from_slice(&(self.layer_index as u32).to_be_bytes());\n        // node id\n        buffer[36..44].copy_from_slice(&(self.node as u64).to_be_bytes());\n\n        hasher.input(&buffer[..]);\n\n        // parents\n        for parent in &self.parents {\n            hasher.input(AsRef::<[u8]>::as_ref(parent));\n        }\n\n        bytes_into_fr_repr_safe(hasher.result().as_ref()).into()\n    }\n\n    pub fn verify<G: Hasher>(\n        &self,\n        replica_id: &H::Domain,\n        exp_encoded_node: &H::Domain,\n        decoded_node: &G::Domain,\n    ) -> bool {\n        let key = self.create_key(replica_id);\n\n        let fr: Fr = (*decoded_node).into();\n        let encoded_node = encode(key, fr.into());\n\n        check_eq!(exp_encoded_node, &encoded_node);\n\n        true\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/graph.rs",
    "content": "use std::marker::PhantomData;\n\n#[cfg(target_arch = \"x86\")]\nuse std::arch::x86::*;\n#[cfg(target_arch = \"x86_64\")]\nuse std::arch::x86_64::*;\n\nuse anyhow::ensure;\nuse log::info;\nuse once_cell::sync::OnceCell;\nuse rayon::prelude::*;\nuse sha2raw::Sha256;\nuse storage_proofs_core::{\n    crypto::feistel::{self, FeistelPrecomputed},\n    drgraph::BASE_DEGREE,\n    drgraph::{BucketGraph, Graph},\n    error::Result,\n    hasher::Hasher,\n    parameter_cache::ParameterSetMetadata,\n    settings,\n    util::NODE_SIZE,\n};\n\n/// The expansion degree used for Stacked Graphs.\npub const EXP_DEGREE: usize = 8;\nconst FEISTEL_KEYS: [feistel::Index; 4] = [1, 2, 3, 4];\n\nconst DEGREE: usize = BASE_DEGREE + EXP_DEGREE;\n\n/// Returns a reference to the parent cache, initializing it lazily the first time this is called.\nfn parent_cache<H, G>(\n    cache_entries: u32,\n    graph: &StackedGraph<H, G>,\n) -> Result<&'static ParentCache>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata + Send + Sync,\n{\n    static INSTANCE_32_GIB: OnceCell<ParentCache> = OnceCell::new();\n    static INSTANCE_64_GIB: OnceCell<ParentCache> = OnceCell::new();\n\n    const NODE_GIB: u32 = (1024 * 1024 * 1024) / NODE_SIZE as u32;\n    ensure!(\n        ((cache_entries == 32 * NODE_GIB) || (cache_entries == 64 * NODE_GIB)),\n        \"Cache is only available for 32GiB and 64GiB sectors\"\n    );\n    info!(\"using parent_cache[{}]\", cache_entries);\n    if cache_entries == 32 * NODE_GIB {\n        Ok(INSTANCE_32_GIB.get_or_init(|| {\n            ParentCache::new(cache_entries, graph).expect(\"failed to fill 32GiB cache\")\n        }))\n    } else {\n        Ok(INSTANCE_64_GIB.get_or_init(|| {\n            ParentCache::new(cache_entries, graph).expect(\"failed to fill 64GiB cache\")\n        }))\n    }\n}\n\n// StackedGraph will hold two different (but related) `ParentCache`,\n#[derive(Debug, Clone)]\nstruct ParentCache {\n    /// This is a large list of fixed (parent) sized arrays.\n    /// `Vec<Vec<u32>>` was showing quite a large memory overhead, so this is layed out as a fixed boxed slice of memory.\n    cache: Box<[u32]>,\n}\n\nimpl ParentCache {\n    pub fn new<H, G>(cache_entries: u32, graph: &StackedGraph<H, G>) -> Result<Self>\n    where\n        H: Hasher,\n        G: Graph<H> + ParameterSetMetadata + Send + Sync,\n    {\n        info!(\"filling parents cache\");\n        let mut cache = vec![0u32; DEGREE * cache_entries as usize];\n\n        let base_degree = BASE_DEGREE;\n        let exp_degree = EXP_DEGREE;\n\n        cache\n            .par_chunks_mut(DEGREE)\n            .enumerate()\n            .try_for_each(|(node, entry)| -> Result<()> {\n                graph\n                    .base_graph()\n                    .parents(node, &mut entry[..base_degree])?;\n                graph.generate_expanded_parents(\n                    node,\n                    &mut entry[base_degree..base_degree + exp_degree],\n                );\n                Ok(())\n            })?;\n\n        info!(\"cache filled\");\n\n        Ok(ParentCache {\n            cache: cache.into_boxed_slice(),\n        })\n    }\n\n    /// Read a single cache element at position `node`.\n    #[inline]\n    pub fn read(&self, node: u32) -> &[u32] {\n        let start = node as usize * DEGREE;\n        let end = start + DEGREE;\n        &self.cache[start..end]\n    }\n}\n\n#[derive(Clone)]\npub struct StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + 'static,\n{\n    expansion_degree: usize,\n    base_graph: G,\n    feistel_precomputed: FeistelPrecomputed,\n    id: String,\n    cache: Option<&'static ParentCache>,\n    _h: PhantomData<H>,\n}\n\nimpl<H, G> std::fmt::Debug for StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + 'static,\n{\n    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n        f.debug_struct(\"StackedGraph\")\n            .field(\"expansion_degree\", &self.expansion_degree)\n            .field(\"base_graph\", &self.base_graph)\n            .field(\"feistel_precomputed\", &self.feistel_precomputed)\n            .field(\"id\", &self.id)\n            .field(\"cache\", &self.cache)\n            .finish()\n    }\n}\n\npub type StackedBucketGraph<H> = StackedGraph<H, BucketGraph<H>>;\n\n#[inline]\nfn prefetch(parents: &[u32], data: &[u8]) {\n    for parent in parents {\n        let start = *parent as usize * NODE_SIZE;\n        let end = start + NODE_SIZE;\n\n        unsafe {\n            _mm_prefetch(data[start..end].as_ptr() as *const i8, _MM_HINT_T0);\n        }\n    }\n}\n\n#[inline]\nfn read_node<'a>(i: usize, parents: &[u32], data: &'a [u8]) -> &'a [u8] {\n    let start = parents[i] as usize * NODE_SIZE;\n    let end = start + NODE_SIZE;\n    &data[start..end]\n}\n\nimpl<H, G> StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata + Sync + Send,\n{\n    pub fn new(\n        base_graph: Option<G>,\n        nodes: usize,\n        base_degree: usize,\n        expansion_degree: usize,\n        seed: [u8; 28],\n    ) -> Result<Self> {\n        assert_eq!(base_degree, BASE_DEGREE);\n        assert_eq!(expansion_degree, EXP_DEGREE);\n        ensure!(nodes <= std::u32::MAX as usize, \"too many nodes\");\n\n        let use_cache = settings::SETTINGS.lock().unwrap().maximize_caching;\n\n        let base_graph = match base_graph {\n            Some(graph) => graph,\n            None => G::new(nodes, base_degree, 0, seed)?,\n        };\n        let bg_id = base_graph.identifier();\n\n        let mut res = StackedGraph {\n            base_graph,\n            id: format!(\n                \"stacked_graph::StackedGraph{{expansion_degree: {} base_graph: {} }}\",\n                expansion_degree, bg_id,\n            ),\n            expansion_degree,\n            cache: None,\n            feistel_precomputed: feistel::precompute((expansion_degree * nodes) as feistel::Index),\n            _h: PhantomData,\n        };\n\n        if use_cache {\n            info!(\"using parents cache of unlimited size\");\n\n            let cache = parent_cache(nodes as u32, &res)?;\n            res.cache = Some(cache);\n        }\n\n        Ok(res)\n    }\n\n    pub fn copy_parents_data_exp(\n        &self,\n        node: u32,\n        base_data: &[u8],\n        exp_data: &[u8],\n        hasher: Sha256,\n    ) -> [u8; 32] {\n        if let Some(cache) = self.cache {\n            let cache_parents = cache.read(node as u32);\n            self.copy_parents_data_inner_exp(&cache_parents, base_data, exp_data, hasher)\n        } else {\n            let mut cache_parents = [0u32; DEGREE];\n\n            self.parents(node as usize, &mut cache_parents[..]).unwrap();\n            self.copy_parents_data_inner_exp(&cache_parents, base_data, exp_data, hasher)\n        }\n    }\n\n    pub fn copy_parents_data(&self, node: u32, base_data: &[u8], hasher: Sha256) -> [u8; 32] {\n        if let Some(cache) = self.cache {\n            let cache_parents = cache.read(node as u32);\n            self.copy_parents_data_inner(&cache_parents, base_data, hasher)\n        } else {\n            let mut cache_parents = [0u32; DEGREE];\n\n            self.parents(node as usize, &mut cache_parents[..]).unwrap();\n            self.copy_parents_data_inner(&cache_parents, base_data, hasher)\n        }\n    }\n\n    fn copy_parents_data_inner_exp(\n        &self,\n        cache_parents: &[u32],\n        base_data: &[u8],\n        exp_data: &[u8],\n        mut hasher: Sha256,\n    ) -> [u8; 32] {\n        prefetch(&cache_parents[..BASE_DEGREE], base_data);\n        prefetch(&cache_parents[BASE_DEGREE..], exp_data);\n\n        // fill buffer\n        let parents = [\n            read_node(0, cache_parents, base_data),\n            read_node(1, cache_parents, base_data),\n            read_node(2, cache_parents, base_data),\n            read_node(3, cache_parents, base_data),\n            read_node(4, cache_parents, base_data),\n            read_node(5, cache_parents, base_data),\n            read_node(6, cache_parents, exp_data),\n            read_node(7, cache_parents, exp_data),\n            read_node(8, cache_parents, exp_data),\n            read_node(9, cache_parents, exp_data),\n            read_node(10, cache_parents, exp_data),\n            read_node(11, cache_parents, exp_data),\n            read_node(12, cache_parents, exp_data),\n            read_node(13, cache_parents, exp_data),\n        ];\n\n        // round 1 (14)\n        hasher.input(&parents);\n\n        // round 2 (14)\n        hasher.input(&parents);\n\n        // round 3 (9)\n        hasher.input(&parents[..8]);\n        hasher.finish_with(&parents[8])\n    }\n\n    fn copy_parents_data_inner(\n        &self,\n        cache_parents: &[u32],\n        base_data: &[u8],\n        mut hasher: Sha256,\n    ) -> [u8; 32] {\n        prefetch(&cache_parents[..BASE_DEGREE], base_data);\n\n        // fill buffer\n        let parents = [\n            read_node(0, cache_parents, base_data),\n            read_node(1, cache_parents, base_data),\n            read_node(2, cache_parents, base_data),\n            read_node(3, cache_parents, base_data),\n            read_node(4, cache_parents, base_data),\n            read_node(5, cache_parents, base_data),\n        ];\n\n        // round 1 (0..6)\n        hasher.input(&parents);\n\n        // round 2 (6..12)\n        hasher.input(&parents);\n\n        // round 3 (12..18)\n        hasher.input(&parents);\n\n        // round 4 (18..24)\n        hasher.input(&parents);\n\n        // round 5 (24..30)\n        hasher.input(&parents);\n\n        // round 6 (30..36)\n        hasher.input(&parents);\n\n        // round 7 (37)\n        hasher.finish_with(parents[0])\n    }\n}\n\nimpl<H, G> ParameterSetMetadata for StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata,\n{\n    fn identifier(&self) -> String {\n        self.id.clone()\n    }\n\n    fn sector_size(&self) -> u64 {\n        self.base_graph.sector_size()\n    }\n}\n\nimpl<H, G> Graph<H> for StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata + Sync + Send,\n{\n    type Key = Vec<u8>;\n\n    fn size(&self) -> usize {\n        self.base_graph().size()\n    }\n\n    fn degree(&self) -> usize {\n        self.base_graph.degree() + self.expansion_degree\n    }\n\n    #[inline]\n    fn parents(&self, node: usize, parents: &mut [u32]) -> Result<()> {\n        if let Some(cache) = self.cache {\n            // Read from the cache\n            let cache_parents = cache.read(node as u32);\n            parents.copy_from_slice(cache_parents);\n        } else {\n            self.base_parents(node, &mut parents[..self.base_graph().degree()])?;\n\n            // expanded_parents takes raw_node\n            self.expanded_parents(\n                node,\n                &mut parents[self.base_graph().degree()\n                    ..self.base_graph().degree() + self.expansion_degree()],\n            );\n        }\n        Ok(())\n    }\n\n    fn seed(&self) -> [u8; 28] {\n        self.base_graph().seed()\n    }\n\n    fn new(\n        nodes: usize,\n        base_degree: usize,\n        expansion_degree: usize,\n        seed: [u8; 28],\n    ) -> Result<Self> {\n        Self::new_stacked(nodes, base_degree, expansion_degree, seed)\n    }\n\n    fn create_key(\n        &self,\n        _id: &H::Domain,\n        _node: usize,\n        _parents: &[u32],\n        _base_parents_data: &[u8],\n        _exp_parents_data: Option<&[u8]>,\n    ) -> Result<Self::Key> {\n        unimplemented!(\"not used\");\n    }\n}\n\nimpl<'a, H, G> StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata + Sync + Send,\n{\n    /// Assign one parent to `node` using a Chung's construction with a reversible\n    /// permutation function from a Feistel cipher (controlled by `invert_permutation`).\n    fn correspondent(&self, node: usize, i: usize) -> u32 {\n        // We can't just generate random values between `[0, size())`, we need to\n        // expand the search space (domain) to accommodate every unique parent assignment\n        // generated here. This can be visualized more clearly as a matrix where the each\n        // new parent of each new node is assigned a unique `index`:\n        //\n        //\n        //          | Parent 1 | Parent 2 | Parent 3 |\n        //\n        // | Node 1 |     0    |     1    |     2    |\n        //\n        // | Node 2 |     3    |     4    |     5    |\n        //\n        // | Node 3 |     6    |     7    |     8    |\n        //\n        // | Node 4 |     9    |     A    |     B    |\n        //\n        // This starting `index` will be shuffled to another position to generate a\n        // parent-child relationship, e.g., if generating the parents for the second node,\n        // `permute` would be called with values `[3; 4; 5]` that would be mapped to other\n        // indexes in the search space of `[0, B]`, say, values `[A; 0; 4]`, that would\n        // correspond to nodes numbered `[4; 1, 2]` which will become the parents of the\n        // second node. In a later pass invalid parents like 2, self-referencing, and parents\n        // with indexes bigger than 2 (if in the `forward` direction, smaller than 2 if the\n        // inverse), will be removed.\n        let a = (node * self.expansion_degree) as feistel::Index + i as feistel::Index;\n\n        let transformed = feistel::permute(\n            self.size() as feistel::Index * self.expansion_degree as feistel::Index,\n            a,\n            &FEISTEL_KEYS,\n            self.feistel_precomputed,\n        );\n        transformed as u32 / self.expansion_degree as u32\n        // Collapse the output in the matrix search space to the row of the corresponding\n        // node (losing the column information, that will be regenerated later when calling\n        // back this function in the `reversed` direction).\n    }\n\n    fn generate_expanded_parents(&self, node: usize, expanded_parents: &mut [u32]) {\n        debug_assert_eq!(expanded_parents.len(), self.expansion_degree);\n        for (i, el) in expanded_parents.iter_mut().enumerate() {\n            *el = self.correspondent(node, i);\n        }\n    }\n\n    pub fn new_stacked(\n        nodes: usize,\n        base_degree: usize,\n        expansion_degree: usize,\n        seed: [u8; 28],\n    ) -> Result<Self> {\n        Self::new(None, nodes, base_degree, expansion_degree, seed)\n    }\n\n    pub fn base_graph(&self) -> &G {\n        &self.base_graph\n    }\n\n    pub fn expansion_degree(&self) -> usize {\n        self.expansion_degree\n    }\n\n    pub fn base_parents(&self, node: usize, parents: &mut [u32]) -> Result<()> {\n        if let Some(cache) = self.cache {\n            // Read from the cache\n            let cache_parents = cache.read(node as u32);\n            parents.copy_from_slice(&cache_parents[..self.base_graph().degree()]);\n            Ok(())\n        } else {\n            // No cache usage, generate on demand.\n            self.base_graph().parents(node, parents)\n        }\n    }\n\n    /// Assign `self.expansion_degree` parents to `node` using an invertible permutation\n    /// that is applied one way for the forward layers and one way for the reversed\n    /// ones.\n    #[inline]\n    pub fn expanded_parents(&self, node: usize, parents: &mut [u32]) {\n        if let Some(cache) = self.cache {\n            // Read from the cache\n            let cache_parents = cache.read(node as u32);\n            parents.copy_from_slice(&cache_parents[self.base_graph().degree()..]);\n        } else {\n            // No cache usage, generate on demand.\n            self.generate_expanded_parents(node, parents);\n        }\n    }\n}\n\nimpl<H, G> PartialEq for StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H>,\n{\n    fn eq(&self, other: &StackedGraph<H, G>) -> bool {\n        self.base_graph == other.base_graph && self.expansion_degree == other.expansion_degree\n    }\n}\n\nimpl<H, G> Eq for StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H>,\n{\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use std::collections::HashSet;\n\n    // Test that 3 (or more) rounds of the Feistel cipher can be used\n    // as a pseudorandom permutation, that is, each input will be mapped\n    // to a unique output (and though not test here, since the cipher\n    // is symmetric, the decryption rounds also work as the inverse\n    // permutation), for more details see:\n    // https://en.wikipedia.org/wiki/Feistel_cipher#Theoretical_work.\n    #[test]\n    fn test_shuffle() {\n        let n = 2_u64.pow(10);\n        let d = EXP_DEGREE as u64;\n        // Use a relatively small value of `n` as Feistel is expensive (but big\n        // enough that `n >> d`).\n\n        let mut shuffled: HashSet<u64> = HashSet::with_capacity((n * d) as usize);\n\n        let feistel_keys = &[1, 2, 3, 4];\n        let feistel_precomputed = feistel::precompute((n * d) as feistel::Index);\n\n        for i in 0..n {\n            for k in 0..d {\n                let permuted =\n                    feistel::permute(n * d, i * d + k, feistel_keys, feistel_precomputed);\n\n                // Since the permutation implies a one-to-one correspondence,\n                // traversing the entire input space should generate the entire\n                // output space (in `shuffled`) without repetitions (since a duplicate\n                // output would imply there is another output that wasn't generated\n                // and the permutation would be incomplete).\n                assert!(shuffled.insert(permuted));\n            }\n        }\n\n        // Actually implied by the previous `assert!` this is left in place as an\n        // extra safety check that indeed the permutation preserved all the output\n        // space (of `n * d` nodes) without repetitions (which the `HashSet` would\n        // have skipped as duplicates).\n        assert_eq!(shuffled.len(), (n * d) as usize);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/hash.rs",
    "content": "use neptune::poseidon::Poseidon;\nuse paired::bls12_381::Fr;\nuse storage_proofs_core::hasher::types::{POSEIDON_CONSTANTS_11, POSEIDON_CONSTANTS_2};\n\n/// Hash all elements in the given column.\npub fn hash_single_column(column: &[Fr]) -> Fr {\n    match column.len() {\n        2 => {\n            let mut hasher = Poseidon::new_with_preimage(column, &*POSEIDON_CONSTANTS_2);\n            hasher.hash()\n        }\n        11 => {\n            let mut hasher = Poseidon::new_with_preimage(column, &*POSEIDON_CONSTANTS_11);\n            hasher.hash()\n        }\n        _ => panic!(\"unsupported column size: {}\", column.len()),\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/labeling_proof.rs",
    "content": "use std::marker::PhantomData;\n\nuse log::trace;\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\nuse storage_proofs_core::{fr32::bytes_into_fr_repr_safe, hasher::Hasher};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct LabelingProof<H: Hasher> {\n    pub(crate) parents: Vec<H::Domain>,\n    pub(crate) node: u64,\n    #[serde(skip)]\n    _h: PhantomData<H>,\n}\n\nimpl<H: Hasher> LabelingProof<H> {\n    pub fn new(node: u64, parents: Vec<H::Domain>) -> Self {\n        LabelingProof {\n            node,\n            parents,\n            _h: PhantomData,\n        }\n    }\n\n    fn create_label(&self, replica_id: &H::Domain) -> H::Domain {\n        let mut hasher = Sha256::new();\n        let mut buffer = [0u8; 64];\n\n        // replica_id\n        buffer[..32].copy_from_slice(AsRef::<[u8]>::as_ref(replica_id));\n\n        // node id\n        buffer[32..40].copy_from_slice(&(self.node as u64).to_be_bytes());\n\n        hasher.input(&buffer[..]);\n\n        // parents\n        for parent in &self.parents {\n            let data = AsRef::<[u8]>::as_ref(parent);\n            hasher.input(data);\n        }\n\n        bytes_into_fr_repr_safe(hasher.result().as_ref()).into()\n    }\n\n    pub fn verify(&self, replica_id: &H::Domain, expected_label: &H::Domain) -> bool {\n        let label = self.create_label(replica_id);\n        check_eq!(expected_label, &label);\n\n        true\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/macros.rs",
    "content": "/// Checks that the two passed values are equal. If they are not equal it prints a trace and returns `false`.\nmacro_rules! check_eq {\n    ($left:expr , $right:expr,) => ({\n        check_eq!($left, $right)\n    });\n    ($left:expr , $right:expr) => ({\n        match (&($left), &($right)) {\n            (left_val, right_val) => {\n                if !(*left_val == *right_val) {\n                    trace!(\"check failed: `(left == right)`\\\n                          \\n\\\n                          \\n{}\\\n                          \\n\",\n                           pretty_assertions::Comparison::new(left_val, right_val));\n                    return false;\n                }\n            }\n        }\n    });\n    ($left:expr , $right:expr, $($arg:tt)*) => ({\n        match (&($left), &($right)) {\n            (left_val, right_val) => {\n                if !(*left_val == *right_val) {\n                    trace!(\"check failed: `(left == right)`: {}\\\n                          \\n\\\n                          \\n{}\\\n                          \\n\",\n                           format_args!($($arg)*),\n                           pretty_assertions::Comparison::new(left_val, right_val));\n                    return false;\n                }\n            }\n        }\n    });\n}\n\n/// Checks that the passed in value is true. If they are not equal it prints a trace and returns `false`.\nmacro_rules! check {\n    ($val:expr) => {\n        if !$val {\n            trace!(\"expected {:?} to be true\", dbg!($val));\n            return false;\n        }\n    };\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/memory_handling.rs",
    "content": "use std::cell::UnsafeCell;\nuse std::fs::File;\nuse std::marker::{PhantomData, Sync};\nuse std::mem::size_of;\nuse std::path::PathBuf;\n\nuse anyhow::Result;\nuse byte_slice_cast::*;\nuse log::*;\nuse mapr::{Mmap, MmapMut, MmapOptions};\nuse std::sync::atomic::{AtomicU64, AtomicUsize, Ordering::SeqCst};\n\npub struct CacheReader<T> {\n    file: File,\n    bufs: UnsafeCell<[Mmap; 2]>,\n    size: usize,\n    degree: usize,\n    window_size: usize,\n    cur_window: AtomicUsize,\n    cur_window_safe: AtomicUsize,\n    _t: PhantomData<T>,\n}\n\nunsafe impl<T> Sync for CacheReader<T> {}\n\nimpl<T: FromByteSlice> CacheReader<T> {\n    pub fn new(filename: &PathBuf, window_size: Option<usize>, degree: usize) -> Result<Self> {\n        info!(\"initializing cache\");\n        let file = File::open(filename)?;\n        let size = File::metadata(&file)?.len() as usize;\n        let window_size = match window_size {\n            Some(s) => {\n                if s < size {\n                    assert_eq!(\n                        0,\n                        size % degree * size_of::<T>(),\n                        \"window size is not multiple of element size\"\n                    );\n                };\n                s\n            }\n            None => {\n                let num_windows = 8;\n                assert_eq!(0, size % num_windows);\n                size / num_windows\n            }\n        };\n\n        let buf0 = Self::map_buf(0, window_size, &file)?;\n        let buf1 = Self::map_buf(window_size as u64, window_size, &file)?;\n        Ok(Self {\n            file,\n            bufs: UnsafeCell::new([buf0, buf1]),\n            size,\n            degree,\n            window_size,\n            // The furthest window from which the cache has yet been read.\n            cur_window: AtomicUsize::new(0),\n            cur_window_safe: AtomicUsize::new(0),\n            _t: PhantomData::<T>,\n        })\n    }\n\n    #[inline]\n    fn get_bufs(&self) -> &[Mmap] {\n        unsafe { &std::slice::from_raw_parts((*self.bufs.get()).as_ptr(), 2) }\n    }\n\n    #[inline]\n    #[allow(clippy::mut_from_ref)]\n    unsafe fn get_mut_bufs(&self) -> &mut [Mmap] {\n        std::slice::from_raw_parts_mut((*self.bufs.get()).as_mut_ptr(), 2)\n    }\n\n    #[allow(dead_code)]\n    // This is unused, but included to document the meaning of its components.\n    // This allows splitting the reset in order to avoid a pause.\n    pub fn reset(&self) -> Result<()> {\n        self.start_reset();\n        self.finish_reset();\n        Ok(())\n    }\n\n    pub fn start_reset(&self) -> Result<()> {\n        let buf0 = Self::map_buf(0, self.window_size, &self.file)?;\n        let bufs = unsafe { self.get_mut_bufs() };\n        bufs[0] = buf0;\n        Ok(())\n    }\n    pub fn finish_reset(&self) -> Result<()> {\n        let buf1 = Self::map_buf(self.window_size as u64, self.window_size, &self.file)?;\n        let bufs = unsafe { self.get_mut_bufs() };\n        bufs[1] = buf1;\n        self.cur_window.store(0, SeqCst);\n        self.cur_window_safe.store(0, SeqCst);\n        Ok(())\n    }\n\n    fn map_buf(offset: u64, len: usize, file: &File) -> Result<Mmap> {\n        match unsafe {\n            MmapOptions::new()\n                .offset(offset)\n                .len(len)\n                .private()\n                .lock()\n                .map(file)\n        }\n        .and_then(|mut parents| {\n            parents.mlock()?;\n            Ok(parents)\n        }) {\n            Ok(parents) => Ok(parents),\n            Err(err) => {\n                // fallback to not locked if permissions are not available\n                warn!(\"failed to lock map {:?}, falling back\", err);\n                let parents = unsafe {\n                    MmapOptions::new()\n                        .offset(offset)\n                        .len(len)\n                        .private()\n                        .map(file)?\n                };\n                Ok(parents)\n            }\n        }\n    }\n\n    #[inline]\n    fn window_element_count(&self) -> usize {\n        self.window_size / size_of::<T>()\n    }\n\n    /// `pos` is in units of `T`.\n    #[inline]\n    pub fn consumer_slice_at(&self, pos: usize) -> &[T] {\n        assert!(\n            pos < self.size,\n            \"pos {} out of range for buffer of size {}\",\n            pos,\n            self.size\n        );\n        let window = pos / self.window_element_count();\n        let pos = pos % self.window_element_count();\n        let targeted_buf = &self.get_bufs()[window % 2];\n\n        &targeted_buf.as_slice_of::<T>().unwrap()[pos..]\n    }\n\n    /// `pos` is in units of `T`.\n    #[inline]\n    pub fn slice_at(&self, pos: usize, consumer: &AtomicU64) -> &[T] {\n        assert!(\n            pos < self.size,\n            \"pos {} out of range for buffer of size {}\",\n            pos,\n            self.size\n        );\n        let window = pos / self.window_element_count();\n        if window == 1 {\n            self.cur_window.compare_and_swap(0, 1, SeqCst);\n            self.cur_window_safe.compare_and_swap(0, 1, SeqCst);\n        }\n\n        let pos = pos % self.window_element_count();\n\n        // Check using `cur_window_safe`, to ensure we wait until the window is safe to use.\n        // If we were to instead check `cur_window`, it could have been incremented but the mapping not completed yet.\n        let cur = self.cur_window_safe.load(SeqCst);\n        if window > cur {\n            // Only one producer will successfully increment `cur_window`.\n            // We need this second atomic because we cannot increment `cur_window_safe` until after the window has been advanced.\n            let instant_cur = self.cur_window.compare_and_swap(cur, cur + 1, SeqCst);\n\n            if instant_cur == cur {\n                // We successfully incremented `self.cur_window`, so we are responsible for advancing the window.\n\n                {\n                    // Wait until the consumer has advanced far enough that it is safe to load the unused buffer.\n                    let safe_consumer = (window - 1) * (self.window_element_count() / self.degree);\n                    while (consumer.load(SeqCst) as usize) < safe_consumer {}\n                }\n\n                self.advance_rear_window(window);\n\n                // Now it is safe to use the new window.\n                self.cur_window_safe.fetch_add(1, SeqCst);\n            } else {\n                // We failed to increment `self.cur_window`, so we must wait for the window to be advanced before continuing.\n                // Wait until it is safe to use the new current window.\n                while self.cur_window_safe.load(SeqCst) != cur + 1 {}\n            }\n        }\n\n        let targeted_buf = &self.get_bufs()[window % 2];\n\n        &targeted_buf.as_slice_of::<T>().unwrap()[pos..]\n    }\n\n    fn advance_rear_window(&self, new_window: usize) {\n        assert!(new_window as usize * self.window_size < self.size);\n\n        let replace_idx = (new_window % 2) as usize;\n\n        let new_buf = Self::map_buf(\n            (new_window * self.window_size) as u64,\n            self.window_size as usize,\n            &self.file,\n        )\n        .unwrap();\n\n        unsafe {\n            self.get_mut_bufs()[replace_idx] = new_buf;\n        }\n    }\n}\n\nfn allocate_layer(sector_size: usize) -> Result<MmapMut> {\n    match MmapOptions::new()\n        .len(sector_size)\n        .private()\n        .clone()\n        .lock()\n        .map_anon()\n        .and_then(|mut layer| {\n            layer.mlock()?;\n            Ok(layer)\n        }) {\n        Ok(layer) => Ok(layer),\n        Err(err) => {\n            // fallback to not locked if permissions are not available\n            warn!(\"failed to lock map {:?}, falling back\", err);\n            let layer = MmapOptions::new().len(sector_size).private().map_anon()?;\n            Ok(layer)\n        }\n    }\n}\n\npub fn setup_create_label_memory(\n    sector_size: usize,\n    degree: usize,\n    window_size: Option<usize>,\n    cache_path: &PathBuf,\n) -> Result<(CacheReader<u32>, MmapMut, MmapMut)> {\n    let parents_cache = CacheReader::new(cache_path, window_size, degree)?;\n    let layer_labels = allocate_layer(sector_size)?;\n    let exp_labels = allocate_layer(sector_size)?;\n\n    Ok((parents_cache, layer_labels, exp_labels))\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/mod.rs",
    "content": "mod challenges;\nmod column;\nmod column_proof;\nmod create_label;\nmod encoding_proof;\nmod graph;\npub(crate) mod hash;\nmod labeling_proof;\nmod params;\nmod porep;\nmod proof;\nmod proof_scheme;\n\npub use self::challenges::{ChallengeRequirements, LayerChallenges};\npub use self::column::Column;\npub use self::column_proof::ColumnProof;\npub use self::create_label::*;\npub use self::encoding_proof::EncodingProof;\npub use self::graph::{StackedBucketGraph, StackedGraph, EXP_DEGREE};\npub use self::labeling_proof::LabelingProof;\npub use self::params::*;\npub use self::proof::{StackedDrg, TOTAL_PARENTS};\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/params.rs",
    "content": "use std::marker::PhantomData;\nuse std::path::{Path, PathBuf};\n\nuse anyhow::Context;\nuse generic_array::typenum::{self, Unsigned};\nuse log::trace;\nuse merkletree::merkle::get_merkle_tree_leafs;\nuse merkletree::store::{DiskStore, Store, StoreConfig};\nuse serde::{Deserialize, Serialize};\nuse storage_proofs_core::{\n    drgraph::Graph,\n    error::Result,\n    fr32::bytes_into_fr_repr_safe,\n    hasher::{Domain, Hasher},\n    merkle::*,\n    parameter_cache::ParameterSetMetadata,\n    util::data_at_node,\n};\n\nuse super::{\n    column::Column, column_proof::ColumnProof, graph::StackedBucketGraph, EncodingProof,\n    LabelingProof, LayerChallenges,\n};\n\npub const BINARY_ARITY: usize = 2;\npub const QUAD_ARITY: usize = 4;\npub const OCT_ARITY: usize = 8;\n\n#[derive(Debug, Clone)]\npub struct SetupParams {\n    // Number of nodes\n    pub nodes: usize,\n\n    // Base degree of DRG\n    pub degree: usize,\n\n    pub expansion_degree: usize,\n\n    // Random seed\n    pub seed: [u8; 28],\n\n    pub layer_challenges: LayerChallenges,\n}\n\n#[derive(Debug)]\npub struct PublicParams<Tree>\nwhere\n    Tree: 'static + MerkleTreeTrait,\n{\n    pub graph: StackedBucketGraph<Tree::Hasher>,\n    pub layer_challenges: LayerChallenges,\n    _t: PhantomData<Tree>,\n}\n\nimpl<Tree> Clone for PublicParams<Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    fn clone(&self) -> Self {\n        Self {\n            graph: self.graph.clone(),\n            layer_challenges: self.layer_challenges.clone(),\n            _t: Default::default(),\n        }\n    }\n}\n\nimpl<Tree> PublicParams<Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    pub fn new(graph: StackedBucketGraph<Tree::Hasher>, layer_challenges: LayerChallenges) -> Self {\n        PublicParams {\n            graph,\n            layer_challenges,\n            _t: PhantomData,\n        }\n    }\n}\n\nimpl<Tree> ParameterSetMetadata for PublicParams<Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    fn identifier(&self) -> String {\n        format!(\n            \"layered_drgporep::PublicParams{{ graph: {}, challenges: {:?}, tree: {} }}\",\n            self.graph.identifier(),\n            self.layer_challenges,\n            Tree::display()\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        self.graph.sector_size()\n    }\n}\n\nimpl<'a, Tree> From<&'a PublicParams<Tree>> for PublicParams<Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    fn from(other: &PublicParams<Tree>) -> PublicParams<Tree> {\n        PublicParams::new(other.graph.clone(), other.layer_challenges.clone())\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct PublicInputs<T: Domain, S: Domain> {\n    pub replica_id: T,\n    pub seed: [u8; 32],\n    pub tau: Option<Tau<T, S>>,\n    /// Partition index\n    pub k: Option<usize>,\n}\n\nimpl<T: Domain, S: Domain> PublicInputs<T, S> {\n    pub fn challenges(\n        &self,\n        layer_challenges: &LayerChallenges,\n        leaves: usize,\n        partition_k: Option<usize>,\n    ) -> Vec<usize> {\n        let k = partition_k.unwrap_or(0);\n\n        layer_challenges.derive::<T>(leaves, &self.replica_id, &self.seed, k as u8)\n    }\n}\n\n#[derive(Debug)]\npub struct PrivateInputs<Tree: MerkleTreeTrait, G: Hasher> {\n    pub p_aux: PersistentAux<<Tree::Hasher as Hasher>::Domain>,\n    pub t_aux: TemporaryAuxCache<Tree, G>,\n}\n\n#[derive(Debug, Serialize, Deserialize)]\npub struct Proof<Tree: MerkleTreeTrait, G: Hasher> {\n    #[serde(bound(\n        serialize = \"MerkleProof<G, typenum::U2>: Serialize\",\n        deserialize = \"MerkleProof<G, typenum::U2>: Deserialize<'de>\"\n    ))]\n    pub comm_d_proofs: MerkleProof<G, typenum::U2>,\n    #[serde(bound(\n        serialize = \"MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>: Serialize\",\n        deserialize = \"MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>: Deserialize<'de>\"\n    ))]\n    pub comm_r_last_proof:\n        MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n    #[serde(bound(\n        serialize = \"ReplicaColumnProof<MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,>: Serialize\",\n        deserialize = \"ReplicaColumnProof<MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>: Deserialize<'de>\"\n    ))]\n    pub replica_column_proofs: ReplicaColumnProof<\n        MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n    >,\n    #[serde(bound(\n        serialize = \"LabelingProof<Tree::Hasher>: Serialize\",\n        deserialize = \"LabelingProof<Tree::Hasher>: Deserialize<'de>\"\n    ))]\n    /// Indexed by layer in 1..layers.\n    pub labeling_proofs: Vec<LabelingProof<Tree::Hasher>>,\n    #[serde(bound(\n        serialize = \"EncodingProof<Tree::Hasher>: Serialize\",\n        deserialize = \"EncodingProof<Tree::Hasher>: Deserialize<'de>\"\n    ))]\n    pub encoding_proof: EncodingProof<Tree::Hasher>,\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> Clone for Proof<Tree, G> {\n    fn clone(&self) -> Self {\n        Self {\n            comm_d_proofs: self.comm_d_proofs.clone(),\n            comm_r_last_proof: self.comm_r_last_proof.clone(),\n            replica_column_proofs: self.replica_column_proofs.clone(),\n            labeling_proofs: self.labeling_proofs.clone(),\n            encoding_proof: self.encoding_proof.clone(),\n        }\n    }\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> Proof<Tree, G> {\n    pub fn comm_r_last(&self) -> <Tree::Hasher as Hasher>::Domain {\n        self.comm_r_last_proof.root()\n    }\n\n    pub fn comm_c(&self) -> <Tree::Hasher as Hasher>::Domain {\n        self.replica_column_proofs.c_x.root()\n    }\n\n    /// Verify the full proof.\n    pub fn verify(\n        &self,\n        pub_params: &PublicParams<Tree>,\n        pub_inputs: &PublicInputs<<Tree::Hasher as Hasher>::Domain, <G as Hasher>::Domain>,\n        challenge: usize,\n        graph: &StackedBucketGraph<Tree::Hasher>,\n    ) -> bool {\n        let replica_id = &pub_inputs.replica_id;\n\n        check!(challenge < graph.size());\n        check!(pub_inputs.tau.is_some());\n\n        // Verify initial data layer\n        trace!(\"verify initial data layer\");\n\n        check!(self.comm_d_proofs.proves_challenge(challenge));\n\n        if let Some(ref tau) = pub_inputs.tau {\n            check_eq!(&self.comm_d_proofs.root(), &tau.comm_d);\n        } else {\n            return false;\n        }\n\n        // Verify replica column openings\n        trace!(\"verify replica column openings\");\n        let mut parents = vec![0; graph.degree()];\n        graph.parents(challenge, &mut parents).unwrap(); // FIXME: error handling\n        check!(self.replica_column_proofs.verify(challenge, &parents));\n\n        check!(self.verify_final_replica_layer(challenge));\n\n        check!(self.verify_labels(replica_id, &pub_params.layer_challenges));\n\n        trace!(\"verify encoding\");\n\n        check!(self.encoding_proof.verify::<G>(\n            replica_id,\n            &self.comm_r_last_proof.leaf(),\n            &self.comm_d_proofs.leaf()\n        ));\n\n        true\n    }\n\n    /// Verify all labels.\n    fn verify_labels(\n        &self,\n        replica_id: &<Tree::Hasher as Hasher>::Domain,\n        layer_challenges: &LayerChallenges,\n    ) -> bool {\n        // Verify Labels Layer 1..layers\n        for layer in 1..=layer_challenges.layers() {\n            trace!(\"verify labeling (layer: {})\", layer,);\n\n            check!(self.labeling_proofs.get(layer - 1).is_some());\n            let labeling_proof = &self.labeling_proofs.get(layer - 1).unwrap();\n            let labeled_node = self\n                .replica_column_proofs\n                .c_x\n                .get_node_at_layer(layer)\n                .unwrap(); // FIXME: error handling\n            check!(labeling_proof.verify(replica_id, labeled_node));\n        }\n\n        true\n    }\n\n    /// Verify final replica layer openings\n    fn verify_final_replica_layer(&self, challenge: usize) -> bool {\n        trace!(\"verify final replica layer openings\");\n        check!(self.comm_r_last_proof.proves_challenge(challenge));\n\n        true\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ReplicaColumnProof<Proof: MerkleProofTrait> {\n    #[serde(bound(\n        serialize = \"ColumnProof<Proof>: Serialize\",\n        deserialize = \"ColumnProof<Proof>: Deserialize<'de>\"\n    ))]\n    pub c_x: ColumnProof<Proof>,\n    #[serde(bound(\n        serialize = \"ColumnProof<Proof>: Serialize\",\n        deserialize = \"ColumnProof<Proof>: Deserialize<'de>\"\n    ))]\n    pub drg_parents: Vec<ColumnProof<Proof>>,\n    #[serde(bound(\n        serialize = \"ColumnProof<Proof>: Serialize\",\n        deserialize = \"ColumnProof<Proof>: Deserialize<'de>\"\n    ))]\n    pub exp_parents: Vec<ColumnProof<Proof>>,\n}\n\nimpl<Proof: MerkleProofTrait> ReplicaColumnProof<Proof> {\n    pub fn verify(&self, challenge: usize, parents: &[u32]) -> bool {\n        let expected_comm_c = self.c_x.root();\n\n        trace!(\"  verify c_x\");\n        check!(self.c_x.verify(challenge as u32, &expected_comm_c));\n\n        trace!(\"  verify drg_parents\");\n        for (proof, parent) in self.drg_parents.iter().zip(parents.iter()) {\n            check!(proof.verify(*parent, &expected_comm_c));\n        }\n\n        trace!(\"  verify exp_parents\");\n        for (proof, parent) in self\n            .exp_parents\n            .iter()\n            .zip(parents.iter().skip(self.drg_parents.len()))\n        {\n            check!(proof.verify(*parent, &expected_comm_c));\n        }\n\n        true\n    }\n}\n\npub type TransformedLayers<Tree, G> = (\n    Tau<<<Tree as MerkleTreeTrait>::Hasher as Hasher>::Domain, <G as Hasher>::Domain>,\n    PersistentAux<<<Tree as MerkleTreeTrait>::Hasher as Hasher>::Domain>,\n    TemporaryAux<Tree, G>,\n);\n\n/// Tau for a single parition.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct Tau<D: Domain, E: Domain> {\n    pub comm_d: E,\n    pub comm_r: D,\n}\n\n/// Stored along side the sector on disk.\n#[derive(Default, Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]\npub struct PersistentAux<D> {\n    pub comm_c: D,\n    pub comm_r_last: D,\n}\n\n#[derive(Debug, Serialize, Deserialize)]\npub struct TemporaryAux<Tree: MerkleTreeTrait, G: Hasher> {\n    /// The encoded nodes for 1..layers.\n    #[serde(bound(\n        serialize = \"StoreConfig: Serialize\",\n        deserialize = \"StoreConfig: Deserialize<'de>\"\n    ))]\n    pub labels: Labels<Tree>,\n    pub tree_d_config: StoreConfig,\n    pub tree_r_last_config: StoreConfig,\n    pub tree_c_config: StoreConfig,\n    pub _g: PhantomData<G>,\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> Clone for TemporaryAux<Tree, G> {\n    fn clone(&self) -> Self {\n        Self {\n            labels: self.labels.clone(),\n            tree_d_config: self.tree_d_config.clone(),\n            tree_r_last_config: self.tree_r_last_config.clone(),\n            tree_c_config: self.tree_c_config.clone(),\n            _g: Default::default(),\n        }\n    }\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> TemporaryAux<Tree, G> {\n    pub fn set_cache_path<P: AsRef<Path>>(&mut self, cache_path: P) {\n        let cp = cache_path.as_ref().to_path_buf();\n        for label in self.labels.labels.iter_mut() {\n            label.path = cp.clone();\n        }\n        self.tree_d_config.path = cp.clone();\n        self.tree_r_last_config.path = cp.clone();\n        self.tree_c_config.path = cp;\n    }\n\n    pub fn labels_for_layer(\n        &self,\n        layer: usize,\n    ) -> Result<DiskStore<<Tree::Hasher as Hasher>::Domain>> {\n        self.labels.labels_for_layer(layer)\n    }\n\n    pub fn domain_node_at_layer(\n        &self,\n        layer: usize,\n        node_index: u32,\n    ) -> Result<<Tree::Hasher as Hasher>::Domain> {\n        Ok(self.labels_for_layer(layer)?.read_at(node_index as usize)?)\n    }\n\n    pub fn column(&self, column_index: u32) -> Result<Column<Tree::Hasher>> {\n        self.labels.column(column_index)\n    }\n\n    // 'clear_temp' will discard all persisted merkle and layer data\n    // that is no longer required.\n    pub fn clear_temp(t_aux: TemporaryAux<Tree, G>) -> Result<()> {\n        let cached = |config: &StoreConfig| {\n            Path::new(&StoreConfig::data_path(&config.path, &config.id)).exists()\n        };\n\n        if cached(&t_aux.tree_d_config) {\n            let tree_d_size = t_aux\n                .tree_d_config\n                .size\n                .context(\"tree_d config has no size\")?;\n            let tree_d_store: DiskStore<G::Domain> =\n                DiskStore::new_from_disk(tree_d_size, BINARY_ARITY, &t_aux.tree_d_config)\n                    .context(\"tree_d\")?;\n            // Note: from_data_store requires the base tree leaf count\n            let tree_d = BinaryMerkleTree::<G>::from_data_store(\n                tree_d_store,\n                get_merkle_tree_leafs(tree_d_size, BINARY_ARITY)?,\n            )\n            .context(\"tree_d\")?;\n\n            tree_d.delete(t_aux.tree_d_config).context(\"tree_d\")?;\n            trace!(\"tree d deleted\");\n        }\n\n        if cached(&t_aux.tree_c_config) {\n            let tree_c_size = t_aux\n                .tree_c_config\n                .size\n                .context(\"tree_c config has no size\")?;\n\n            let tree_count = get_base_tree_count::<Tree>();\n            let configs = split_config(t_aux.tree_c_config.clone(), tree_count)?;\n            for config in &configs {\n                let tree_c_store = DiskStore::<<Tree::Hasher as Hasher>::Domain>::new_from_disk(\n                    tree_c_size,\n                    Tree::Arity::to_usize(),\n                    &config,\n                )\n                .context(\"tree_c\")?;\n                // Note: from_data_store requires the base tree leaf count\n                let tree_c = DiskTree::<\n                    Tree::Hasher,\n                    Tree::Arity,\n                    Tree::SubTreeArity,\n                    Tree::TopTreeArity,\n                >::from_data_store(\n                    tree_c_store,\n                    get_merkle_tree_leafs(tree_c_size, Tree::Arity::to_usize())?,\n                )\n                .context(\"tree_c\")?;\n                tree_c.delete(config.clone()).context(\"tree_c\")?;\n            }\n            trace!(\"tree c deleted\");\n        }\n\n        for i in 0..t_aux.labels.labels.len() {\n            let cur_config = t_aux.labels.labels[i].clone();\n            if cached(&cur_config) {\n                DiskStore::<<Tree::Hasher as Hasher>::Domain>::delete(cur_config)\n                    .with_context(|| format!(\"labels {}\", i))?;\n                trace!(\"layer {} deleted\", i);\n            }\n        }\n\n        Ok(())\n    }\n}\n\n#[derive(Debug)]\npub struct TemporaryAuxCache<Tree: MerkleTreeTrait, G: Hasher> {\n    /// The encoded nodes for 1..layers.\n    pub labels: LabelsCache<Tree>,\n    pub tree_d: BinaryMerkleTree<G>,\n\n    // Notably this is a LevelCacheTree instead of a full merkle.\n    pub tree_r_last: LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n\n    // Store the 'cached_above_base layers' value from the tree_r_last\n    // StoreConfig for later use (i.e. proof generation).\n    pub tree_r_last_config_levels: usize,\n\n    pub tree_c: DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n    pub t_aux: TemporaryAux<Tree, G>,\n    pub replica_path: PathBuf,\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> TemporaryAuxCache<Tree, G> {\n    pub fn new(t_aux: &TemporaryAux<Tree, G>, replica_path: PathBuf) -> Result<Self> {\n        // tree_d_size stored in the config is the base tree size\n        let tree_d_size = t_aux.tree_d_config.size.unwrap();\n        let tree_d_leafs = get_merkle_tree_leafs(tree_d_size, BINARY_ARITY)?;\n        trace!(\n            \"Instantiating tree d with size {} and leafs {}\",\n            tree_d_size,\n            tree_d_leafs,\n        );\n        let tree_d_store: DiskStore<G::Domain> =\n            DiskStore::new_from_disk(tree_d_size, BINARY_ARITY, &t_aux.tree_d_config)\n                .context(\"tree_d_store\")?;\n        let tree_d =\n            BinaryMerkleTree::<G>::from_data_store(tree_d_store, tree_d_leafs).context(\"tree_d\")?;\n\n        let tree_count = get_base_tree_count::<Tree>();\n        let configs = split_config(t_aux.tree_c_config.clone(), tree_count)?;\n\n        // tree_c_size stored in the config is the base tree size\n        let tree_c_size = t_aux.tree_c_config.size.unwrap();\n        trace!(\n            \"Instantiating tree c [count {}] with size {} and arity {}\",\n            tree_count,\n            tree_c_size,\n            Tree::Arity::to_usize(),\n        );\n        let tree_c = create_disk_tree::<\n            DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n        >(tree_c_size, &configs)?;\n\n        let (configs, replica_paths) = split_config_and_replica(\n            t_aux.tree_r_last_config.clone(),\n            replica_path.clone(),\n            tree_count,\n        )?;\n\n        // tree_c_size stored in the config is the base tree size\n        let tree_r_last_size = t_aux.tree_r_last_config.size.unwrap();\n        let tree_r_last_config_levels = t_aux.tree_r_last_config.levels;\n        trace!(\n            \"Instantiating tree r last [count {}] with size {} and arity {}, {}, {}\",\n            tree_count,\n            tree_r_last_size,\n            Tree::Arity::to_usize(),\n            Tree::SubTreeArity::to_usize(),\n            Tree::TopTreeArity::to_usize(),\n        );\n        let tree_r_last = create_lc_tree::<\n            LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n        >(tree_r_last_size, &configs, &replica_paths)?;\n\n        Ok(TemporaryAuxCache {\n            labels: LabelsCache::new(&t_aux.labels).context(\"labels_cache\")?,\n            tree_d,\n            tree_r_last,\n            tree_r_last_config_levels,\n            tree_c,\n            replica_path,\n            t_aux: t_aux.clone(),\n        })\n    }\n\n    pub fn labels_for_layer(&self, layer: usize) -> &DiskStore<<Tree::Hasher as Hasher>::Domain> {\n        self.labels.labels_for_layer(layer)\n    }\n\n    pub fn domain_node_at_layer(\n        &self,\n        layer: usize,\n        node_index: u32,\n    ) -> Result<<Tree::Hasher as Hasher>::Domain> {\n        Ok(self.labels_for_layer(layer).read_at(node_index as usize)?)\n    }\n\n    pub fn column(&self, column_index: u32) -> Result<Column<Tree::Hasher>> {\n        self.labels.column(column_index)\n    }\n}\n\ntype VerifyCallback = fn(&StoreConfig, usize) -> Result<()>;\n\n#[derive(Debug, Serialize, Deserialize)]\npub struct Labels<Tree: MerkleTreeTrait> {\n    #[serde(bound(\n        serialize = \"StoreConfig: Serialize\",\n        deserialize = \"StoreConfig: Deserialize<'de>\"\n    ))]\n    pub labels: Vec<StoreConfig>,\n    pub _h: PhantomData<Tree>,\n}\n\nimpl<Tree: MerkleTreeTrait> Clone for Labels<Tree> {\n    fn clone(&self) -> Self {\n        Self {\n            labels: self.labels.clone(),\n            _h: Default::default(),\n        }\n    }\n}\n\nimpl<Tree: MerkleTreeTrait> Labels<Tree> {\n    pub fn new(labels: Vec<StoreConfig>) -> Self {\n        Labels {\n            labels,\n            _h: PhantomData,\n        }\n    }\n\n    pub fn len(&self) -> usize {\n        self.labels.len()\n    }\n\n    pub fn is_empty(&self) -> bool {\n        self.labels.is_empty()\n    }\n\n    pub fn verify_stores(&self, callback: VerifyCallback, cache_dir: &PathBuf) -> Result<()> {\n        let updated_path_labels = self.labels.clone();\n        for mut label in updated_path_labels {\n            label.path = cache_dir.to_path_buf();\n            callback(&label, BINARY_ARITY)?;\n        }\n\n        Ok(())\n    }\n\n    pub fn labels_for_layer(\n        &self,\n        layer: usize,\n    ) -> Result<DiskStore<<Tree::Hasher as Hasher>::Domain>> {\n        assert!(layer != 0, \"Layer cannot be 0\");\n        assert!(\n            layer <= self.layers(),\n            \"Layer {} is not available (only {} layers available)\",\n            layer,\n            self.layers()\n        );\n\n        let row_index = layer - 1;\n        let config = self.labels[row_index].clone();\n        assert!(config.size.is_some());\n\n        DiskStore::new_from_disk(config.size.unwrap(), Tree::Arity::to_usize(), &config)\n    }\n\n    /// Returns label for the last layer.\n    pub fn labels_for_last_layer(&self) -> Result<DiskStore<<Tree::Hasher as Hasher>::Domain>> {\n        self.labels_for_layer(self.labels.len() - 1)\n    }\n\n    /// How many layers are available.\n    fn layers(&self) -> usize {\n        self.labels.len()\n    }\n\n    /// Build the column for the given node.\n    pub fn column(&self, node: u32) -> Result<Column<Tree::Hasher>> {\n        let rows = self\n            .labels\n            .iter()\n            .map(|label| {\n                assert!(label.size.is_some());\n                let store =\n                    DiskStore::new_from_disk(label.size.unwrap(), Tree::Arity::to_usize(), &label)?;\n                store.read_at(node as usize)\n            })\n            .collect::<Result<_>>()?;\n\n        Column::new(node, rows)\n    }\n\n    /// Update all configs to the new passed in root cache path.\n    pub fn update_root<P: AsRef<Path>>(&mut self, root: P) {\n        for config in &mut self.labels {\n            config.path = root.as_ref().into();\n        }\n    }\n}\n\n#[derive(Debug)]\npub struct LabelsCache<Tree: MerkleTreeTrait> {\n    pub labels: Vec<DiskStore<<Tree::Hasher as Hasher>::Domain>>,\n}\n\nimpl<Tree: MerkleTreeTrait> LabelsCache<Tree> {\n    pub fn new(labels: &Labels<Tree>) -> Result<Self> {\n        let mut disk_store_labels: Vec<DiskStore<<Tree::Hasher as Hasher>::Domain>> =\n            Vec::with_capacity(labels.len());\n        for i in 0..labels.len() {\n            disk_store_labels.push(labels.labels_for_layer(i + 1)?);\n        }\n\n        Ok(LabelsCache {\n            labels: disk_store_labels,\n        })\n    }\n\n    pub fn len(&self) -> usize {\n        self.labels.len()\n    }\n\n    pub fn is_empty(&self) -> bool {\n        self.labels.is_empty()\n    }\n\n    pub fn labels_for_layer(&self, layer: usize) -> &DiskStore<<Tree::Hasher as Hasher>::Domain> {\n        assert!(layer != 0, \"Layer cannot be 0\");\n        assert!(\n            layer <= self.layers(),\n            \"Layer {} is not available (only {} layers available)\",\n            layer,\n            self.layers()\n        );\n\n        let row_index = layer - 1;\n        &self.labels[row_index]\n    }\n\n    /// Returns the labels on the last layer.\n    pub fn labels_for_last_layer(&self) -> Result<&DiskStore<<Tree::Hasher as Hasher>::Domain>> {\n        Ok(&self.labels[self.labels.len() - 1])\n    }\n\n    /// How many layers are available.\n    fn layers(&self) -> usize {\n        self.labels.len()\n    }\n\n    /// Build the column for the given node.\n    pub fn column(&self, node: u32) -> Result<Column<Tree::Hasher>> {\n        let rows = self\n            .labels\n            .iter()\n            .map(|labels| labels.read_at(node as usize))\n            .collect::<Result<_>>()?;\n\n        Column::new(node, rows)\n    }\n}\n\npub fn get_node<H: Hasher>(data: &[u8], index: usize) -> Result<H::Domain> {\n    H::Domain::try_from_bytes(data_at_node(data, index).expect(\"invalid node math\"))\n}\n\n/// Generate the replica id as expected for Stacked DRG.\npub fn generate_replica_id<H: Hasher, T: AsRef<[u8]>>(\n    prover_id: &[u8; 32],\n    sector_id: u64,\n    ticket: &[u8; 32],\n    comm_d: T,\n) -> H::Domain {\n    use sha2::{Digest, Sha256};\n\n    let hash = Sha256::new()\n        .chain(prover_id)\n        .chain(&sector_id.to_be_bytes()[..])\n        .chain(ticket)\n        .chain(AsRef::<[u8]>::as_ref(&comm_d))\n        .result();\n\n    bytes_into_fr_repr_safe(hash.as_ref()).into()\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/porep.rs",
    "content": "use std::path::PathBuf;\n\nuse merkletree::store::StoreConfig;\nuse storage_proofs_core::{\n    error::Result,\n    hasher::Hasher,\n    merkle::{BinaryMerkleTree, MerkleTreeTrait},\n    Data,\n};\n\nuse super::{\n    params::{PersistentAux, PublicParams, Tau, TemporaryAux},\n    proof::StackedDrg,\n};\nuse crate::PoRep;\n\nimpl<'a, 'c, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> PoRep<'a, Tree::Hasher, G>\n    for StackedDrg<'a, Tree, G>\n{\n    type Tau = Tau<<Tree::Hasher as Hasher>::Domain, <G as Hasher>::Domain>;\n    type ProverAux = (\n        PersistentAux<<Tree::Hasher as Hasher>::Domain>,\n        TemporaryAux<Tree, G>,\n    );\n\n    fn replicate(\n        pp: &'a PublicParams<Tree>,\n        replica_id: &<Tree::Hasher as Hasher>::Domain,\n        data: Data<'a>,\n        data_tree: Option<BinaryMerkleTree<G>>,\n        config: StoreConfig,\n        replica_path: PathBuf,\n    ) -> Result<(Self::Tau, Self::ProverAux)> {\n        let (tau, p_aux, t_aux) = Self::transform_and_replicate_layers(\n            &pp.graph,\n            &pp.layer_challenges,\n            replica_id,\n            data,\n            data_tree,\n            config,\n            replica_path,\n        )?;\n\n        Ok((tau, (p_aux, t_aux)))\n    }\n\n    fn extract_all<'b>(\n        pp: &'b PublicParams<Tree>,\n        replica_id: &'b <Tree::Hasher as Hasher>::Domain,\n        data: &'b [u8],\n        config: Option<StoreConfig>,\n    ) -> Result<Vec<u8>> {\n        let mut data = data.to_vec();\n\n        Self::extract_and_invert_transform_layers(\n            &pp.graph,\n            &pp.layer_challenges,\n            replica_id,\n            &mut data,\n            config.expect(\"Missing store config\"),\n        )?;\n\n        Ok(data)\n    }\n\n    fn extract(\n        _pp: &PublicParams<Tree>,\n        _replica_id: &<Tree::Hasher as Hasher>::Domain,\n        _data: &[u8],\n        _node: usize,\n        _config: Option<StoreConfig>,\n    ) -> Result<Vec<u8>> {\n        unimplemented!();\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/proof.rs",
    "content": "use std::fs::OpenOptions;\nuse std::io::Write;\nuse std::marker::PhantomData;\nuse std::path::PathBuf;\nuse std::sync::{mpsc, Arc, RwLock};\n\nuse generic_array::typenum::{self, Unsigned};\nuse log::{info, trace};\nuse merkletree::merkle::{\n    get_merkle_tree_cache_size, get_merkle_tree_leafs, get_merkle_tree_len,\n    is_merkle_tree_size_valid,\n};\nuse merkletree::store::{DiskStore, StoreConfig};\nuse paired::bls12_381::Fr;\nuse rayon::prelude::*;\nuse storage_proofs_core::{\n    cache_key::CacheKey,\n    data::Data,\n    drgraph::Graph,\n    error::Result,\n    hasher::{Domain, HashFunction, Hasher, PoseidonArity},\n    measurements::{\n        measure_op,\n        Operation::{CommD, EncodeWindowTimeAll, GenerateTreeC, GenerateTreeRLast},\n    },\n    merkle::*,\n    settings,\n    util::{default_rows_to_discard, NODE_SIZE},\n};\nuse typenum::{U11, U2, U8};\n\nuse super::{\n    challenges::LayerChallenges,\n    column::Column,\n    create_label, create_label_exp,\n    graph::StackedBucketGraph,\n    hash::hash_single_column,\n    params::{\n        get_node, Labels, LabelsCache, PersistentAux, Proof, PublicInputs, PublicParams,\n        ReplicaColumnProof, Tau, TemporaryAux, TemporaryAuxCache, TransformedLayers, BINARY_ARITY,\n    },\n    EncodingProof, LabelingProof,\n};\n\nuse ff::Field;\nuse generic_array::{sequence::GenericSequence, GenericArray};\nuse neptune::batch_hasher::BatcherType;\nuse neptune::column_tree_builder::{ColumnTreeBuilder, ColumnTreeBuilderTrait};\nuse neptune::tree_builder::{TreeBuilder, TreeBuilderTrait};\nuse storage_proofs_core::fr32::fr_into_bytes;\n\nuse crate::encode::{decode, encode};\nuse crate::PoRep;\n\npub const TOTAL_PARENTS: usize = 37;\n\n#[derive(Debug)]\npub struct StackedDrg<'a, Tree: 'a + MerkleTreeTrait, G: 'a + Hasher> {\n    _a: PhantomData<&'a Tree>,\n    _b: PhantomData<&'a G>,\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tree, G> {\n    #[allow(clippy::too_many_arguments)]\n    pub(crate) fn prove_layers(\n        graph: &StackedBucketGraph<Tree::Hasher>,\n        pub_inputs: &PublicInputs<<Tree::Hasher as Hasher>::Domain, <G as Hasher>::Domain>,\n        p_aux: &PersistentAux<<Tree::Hasher as Hasher>::Domain>,\n        t_aux: &TemporaryAuxCache<Tree, G>,\n        layer_challenges: &LayerChallenges,\n        layers: usize,\n        _total_layers: usize,\n        partition_count: usize,\n    ) -> Result<Vec<Vec<Proof<Tree, G>>>> {\n        assert!(layers > 0);\n        assert_eq!(t_aux.labels.len(), layers);\n\n        let graph_size = graph.size();\n\n        // Sanity checks on restored trees.\n        assert!(pub_inputs.tau.is_some());\n        assert_eq!(pub_inputs.tau.as_ref().unwrap().comm_d, t_aux.tree_d.root());\n\n        let get_drg_parents_columns = |x: usize| -> Result<Vec<Column<Tree::Hasher>>> {\n            let base_degree = graph.base_graph().degree();\n\n            let mut columns = Vec::with_capacity(base_degree);\n\n            let mut parents = vec![0; base_degree];\n            graph.base_parents(x, &mut parents)?;\n\n            for parent in &parents {\n                columns.push(t_aux.column(*parent)?);\n            }\n\n            debug_assert!(columns.len() == base_degree);\n\n            Ok(columns)\n        };\n\n        let get_exp_parents_columns = |x: usize| -> Result<Vec<Column<Tree::Hasher>>> {\n            let mut parents = vec![0; graph.expansion_degree()];\n            graph.expanded_parents(x, &mut parents)?;\n\n            parents.iter().map(|parent| t_aux.column(*parent)).collect()\n        };\n\n        (0..partition_count)\n            .map(|k| {\n                trace!(\"proving partition {}/{}\", k + 1, partition_count);\n\n                // Derive the set of challenges we are proving over.\n                let challenges = pub_inputs.challenges(layer_challenges, graph_size, Some(k));\n\n                // Stacked commitment specifics\n                challenges\n                    .into_par_iter()\n                    .enumerate()\n                    .map(|(challenge_index, challenge)| {\n                        trace!(\" challenge {} ({})\", challenge, challenge_index);\n                        assert!(challenge < graph.size(), \"Invalid challenge\");\n                        assert!(challenge > 0, \"Invalid challenge\");\n\n                        // Initial data layer openings (c_X in Comm_D)\n                        let comm_d_proof = t_aux.tree_d.gen_proof(challenge)?;\n                        assert!(comm_d_proof.validate(challenge));\n\n                        // Stacked replica column openings\n                        let rcp = {\n                            let (c_x, drg_parents, exp_parents) = {\n                                assert_eq!(p_aux.comm_c, t_aux.tree_c.root());\n                                let tree_c = &t_aux.tree_c;\n\n                                // All labels in C_X\n                                trace!(\"  c_x\");\n                                let c_x = t_aux.column(challenge as u32)?.into_proof(tree_c)?;\n\n                                // All labels in the DRG parents.\n                                trace!(\"  drg_parents\");\n                                let drg_parents = get_drg_parents_columns(challenge)?\n                                    .into_iter()\n                                    .map(|column| column.into_proof(tree_c))\n                                    .collect::<Result<_>>()?;\n\n                                // Labels for the expander parents\n                                trace!(\"  exp_parents\");\n                                let exp_parents = get_exp_parents_columns(challenge)?\n                                    .into_iter()\n                                    .map(|column| column.into_proof(tree_c))\n                                    .collect::<Result<_>>()?;\n\n                                (c_x, drg_parents, exp_parents)\n                            };\n\n                            ReplicaColumnProof {\n                                c_x,\n                                drg_parents,\n                                exp_parents,\n                            }\n                        };\n\n                        // Final replica layer openings\n                        trace!(\"final replica layer openings\");\n                        let comm_r_last_proof = t_aux.tree_r_last.gen_cached_proof(\n                            challenge,\n                            Some(t_aux.tree_r_last_config_rows_to_discard),\n                        )?;\n\n                        debug_assert!(comm_r_last_proof.validate(challenge));\n\n                        // Labeling Proofs Layer 1..l\n                        let mut labeling_proofs = Vec::with_capacity(layers);\n                        let mut encoding_proof = None;\n\n                        for layer in 1..=layers {\n                            trace!(\"  encoding proof layer {}\", layer,);\n                            let parents_data: Vec<<Tree::Hasher as Hasher>::Domain> = if layer == 1\n                            {\n                                let mut parents = vec![0; graph.base_graph().degree()];\n                                graph.base_parents(challenge, &mut parents)?;\n\n                                parents\n                                    .into_iter()\n                                    .map(|parent| t_aux.domain_node_at_layer(layer, parent))\n                                    .collect::<Result<_>>()?\n                            } else {\n                                let mut parents = vec![0; graph.degree()];\n                                graph.parents(challenge, &mut parents)?;\n                                let base_parents_count = graph.base_graph().degree();\n\n                                parents\n                                    .into_iter()\n                                    .enumerate()\n                                    .map(|(i, parent)| {\n                                        if i < base_parents_count {\n                                            // parents data for base parents is from the current layer\n                                            t_aux.domain_node_at_layer(layer, parent)\n                                        } else {\n                                            // parents data for exp parents is from the previous layer\n                                            t_aux.domain_node_at_layer(layer - 1, parent)\n                                        }\n                                    })\n                                    .collect::<Result<_>>()?\n                            };\n\n                            // repeat parents\n                            let mut parents_data_full = vec![Default::default(); TOTAL_PARENTS];\n                            for chunk in parents_data_full.chunks_mut(parents_data.len()) {\n                                chunk.copy_from_slice(&parents_data[..chunk.len()]);\n                            }\n\n                            let proof = LabelingProof::<Tree::Hasher>::new(\n                                layer as u32,\n                                challenge as u64,\n                                parents_data_full.clone(),\n                            );\n\n                            {\n                                let labeled_node = rcp.c_x.get_node_at_layer(layer)?;\n                                assert!(\n                                    proof.verify(&pub_inputs.replica_id, &labeled_node),\n                                    format!(\"Invalid encoding proof generated at layer {}\", layer)\n                                );\n                                trace!(\"Valid encoding proof generated at layer {}\", layer);\n                            }\n\n                            labeling_proofs.push(proof);\n\n                            if layer == layers {\n                                encoding_proof = Some(EncodingProof::new(\n                                    layer as u32,\n                                    challenge as u64,\n                                    parents_data_full,\n                                ));\n                            }\n                        }\n\n                        Ok(Proof {\n                            comm_d_proofs: comm_d_proof,\n                            replica_column_proofs: rcp,\n                            comm_r_last_proof,\n                            labeling_proofs,\n                            encoding_proof: encoding_proof.expect(\"invalid tapering\"),\n                        })\n                    })\n                    .collect()\n            })\n            .collect()\n    }\n\n    pub(crate) fn extract_and_invert_transform_layers(\n        graph: &StackedBucketGraph<Tree::Hasher>,\n        layer_challenges: &LayerChallenges,\n        replica_id: &<Tree::Hasher as Hasher>::Domain,\n        data: &mut [u8],\n        config: StoreConfig,\n    ) -> Result<()> {\n        trace!(\"extract_and_invert_transform_layers\");\n\n        let layers = layer_challenges.layers();\n        assert!(layers > 0);\n\n        // generate labels\n        let (labels, _) = Self::generate_labels(graph, layer_challenges, replica_id, config)?;\n\n        let last_layer_labels = labels.labels_for_last_layer()?;\n        let size = merkletree::store::Store::len(last_layer_labels);\n\n        for (key, encoded_node_bytes) in last_layer_labels\n            .read_range(0..size)?\n            .into_iter()\n            .zip(data.chunks_mut(NODE_SIZE))\n        {\n            let encoded_node =\n                <Tree::Hasher as Hasher>::Domain::try_from_bytes(encoded_node_bytes)?;\n            let data_node = decode::<<Tree::Hasher as Hasher>::Domain>(key, encoded_node);\n\n            // store result in the data\n            encoded_node_bytes.copy_from_slice(AsRef::<[u8]>::as_ref(&data_node));\n        }\n\n        Ok(())\n    }\n\n    #[allow(clippy::type_complexity)]\n    fn generate_labels(\n        graph: &StackedBucketGraph<Tree::Hasher>,\n        layer_challenges: &LayerChallenges,\n        replica_id: &<Tree::Hasher as Hasher>::Domain,\n        config: StoreConfig,\n    ) -> Result<(LabelsCache<Tree>, Labels<Tree>)> {\n        info!(\"generate labels\");\n\n        let layers = layer_challenges.layers();\n        // For now, we require it due to changes in encodings structure.\n        let mut labels: Vec<DiskStore<<Tree::Hasher as Hasher>::Domain>> =\n            Vec::with_capacity(layers);\n        let mut label_configs: Vec<StoreConfig> = Vec::with_capacity(layers);\n\n        let layer_size = graph.size() * NODE_SIZE;\n        // NOTE: this means we currently keep 2x sector size around, to improve speed.\n        let mut labels_buffer = vec![0u8; 2 * layer_size];\n\n        let use_cache = settings::SETTINGS.lock().unwrap().maximize_caching;\n        let mut cache = if use_cache {\n            Some(graph.parent_cache()?)\n        } else {\n            None\n        };\n\n        for layer in 1..=layers {\n            info!(\"generating layer: {}\", layer);\n            if let Some(ref mut cache) = cache {\n                cache.reset()?;\n            }\n\n            if layer == 1 {\n                let layer_labels = &mut labels_buffer[..layer_size];\n                for node in 0..graph.size() {\n                    create_label(graph, cache.as_mut(), replica_id, layer_labels, layer, node)?;\n                }\n            } else {\n                let (layer_labels, exp_labels) = labels_buffer.split_at_mut(layer_size);\n                for node in 0..graph.size() {\n                    create_label_exp(\n                        graph,\n                        cache.as_mut(),\n                        replica_id,\n                        exp_labels,\n                        layer_labels,\n                        layer,\n                        node,\n                    )?;\n                }\n            }\n\n            info!(\"  setting exp parents\");\n            labels_buffer.copy_within(..layer_size, layer_size);\n\n            // Write the result to disk to avoid keeping it in memory all the time.\n            let layer_config =\n                StoreConfig::from_config(&config, CacheKey::label_layer(layer), Some(graph.size()));\n\n            info!(\"  storing labels on disk\");\n            // Construct and persist the layer data.\n            let layer_store: DiskStore<<Tree::Hasher as Hasher>::Domain> =\n                DiskStore::new_from_slice_with_config(\n                    graph.size(),\n                    Tree::Arity::to_usize(),\n                    &labels_buffer[..layer_size],\n                    layer_config.clone(),\n                )?;\n            info!(\n                \"  generated layer {} store with id {}\",\n                layer, layer_config.id\n            );\n\n            // Track the layer specific store and StoreConfig for later retrieval.\n            labels.push(layer_store);\n            label_configs.push(layer_config);\n        }\n\n        assert_eq!(\n            labels.len(),\n            layers,\n            \"Invalid amount of layers encoded expected\"\n        );\n\n        Ok((\n            LabelsCache::<Tree> { labels },\n            Labels::<Tree> {\n                labels: label_configs,\n                _h: PhantomData,\n            },\n        ))\n    }\n\n    fn build_binary_tree<K: Hasher>(\n        tree_data: &[u8],\n        config: StoreConfig,\n    ) -> Result<BinaryMerkleTree<K>> {\n        trace!(\"building tree (size: {})\", tree_data.len());\n\n        let leafs = tree_data.len() / NODE_SIZE;\n        assert_eq!(tree_data.len() % NODE_SIZE, 0);\n\n        let tree = MerkleTree::from_par_iter_with_config(\n            (0..leafs)\n                .into_par_iter()\n                // TODO: proper error handling instead of `unwrap()`\n                .map(|i| get_node::<K>(tree_data, i).unwrap()),\n            config,\n        )?;\n        Ok(tree)\n    }\n\n    fn generate_tree_c<ColumnArity, TreeArity>(\n        layers: usize,\n        nodes_count: usize,\n        tree_count: usize,\n        configs: Vec<StoreConfig>,\n        labels: &LabelsCache<Tree>,\n    ) -> Result<DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>\n    where\n        ColumnArity: 'static + PoseidonArity,\n        TreeArity: PoseidonArity,\n    {\n        if settings::SETTINGS.lock().unwrap().use_gpu_column_builder {\n            Self::generate_tree_c_gpu::<ColumnArity, TreeArity>(\n                layers,\n                nodes_count,\n                tree_count,\n                configs,\n                labels,\n            )\n        } else {\n            Self::generate_tree_c_cpu::<ColumnArity, TreeArity>(\n                layers,\n                nodes_count,\n                tree_count,\n                configs,\n                labels,\n            )\n        }\n    }\n\n    #[allow(clippy::needless_range_loop)]\n    fn generate_tree_c_gpu<ColumnArity, TreeArity>(\n        layers: usize,\n        nodes_count: usize,\n        tree_count: usize,\n        configs: Vec<StoreConfig>,\n        labels: &LabelsCache<Tree>,\n    ) -> Result<DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>\n    where\n        ColumnArity: 'static + PoseidonArity,\n        TreeArity: PoseidonArity,\n    {\n        info!(\"generating tree c using the GPU\");\n        // Build the tree for CommC\n        measure_op(GenerateTreeC, || {\n            info!(\"Building column hashes\");\n\n            // NOTE: The max number of columns we recommend sending to the GPU at once is\n            // 400000 for columns and 700000 for trees (conservative soft-limits discussed).\n            //\n            // 'column_write_batch_size' is how many nodes to chunk the base layer of data\n            // into when persisting to disk.\n            //\n            // Override these values with care using environment variables:\n            // FIL_PROOFS_MAX_GPU_COLUMN_BATCH_SIZE, FIL_PROOFS_MAX_GPU_TREE_BATCH_SIZE, and\n            // FIL_PROOFS_COLUMN_WRITE_BATCH_SIZE respectively.\n            let max_gpu_column_batch_size =\n                settings::SETTINGS.lock().unwrap().max_gpu_column_batch_size as usize;\n            let max_gpu_tree_batch_size =\n                settings::SETTINGS.lock().unwrap().max_gpu_tree_batch_size as usize;\n            let column_write_batch_size =\n                settings::SETTINGS.lock().unwrap().column_write_batch_size as usize;\n\n            // This channel will receive batches of columns and add them to the ColumnTreeBuilder.\n            let (builder_tx, builder_rx) = mpsc::sync_channel(0);\n            mpsc::sync_channel::<(Vec<GenericArray<Fr, ColumnArity>>, bool)>(\n                max_gpu_column_batch_size * ColumnArity::to_usize() * 32,\n            );\n\n            let config_count = configs.len(); // Don't move config into closure below.\n            rayon::scope(|s| {\n                s.spawn(move |_| {\n                    for i in 0..config_count {\n                        let mut node_index = 0;\n                        let builder_tx = builder_tx.clone();\n                        while node_index != nodes_count {\n                            let chunked_nodes_count =\n                                std::cmp::min(nodes_count - node_index, max_gpu_column_batch_size);\n                            trace!(\n                                \"processing config {}/{} with column nodes {}\",\n                                i + 1,\n                                tree_count,\n                                chunked_nodes_count,\n                            );\n                            let mut columns: Vec<GenericArray<Fr, ColumnArity>> = vec![\n                                GenericArray::<Fr, ColumnArity>::generate(|_i: usize| Fr::zero());\n                                chunked_nodes_count\n                            ];\n\n                            // Allocate layer data array and insert a placeholder for each layer.\n                            let mut layer_data: Vec<Vec<Fr>> =\n                                vec![Vec::with_capacity(chunked_nodes_count); layers];\n\n                            rayon::scope(|s| {\n                                // capture a shadowed version of layer_data.\n                                let layer_data: &mut Vec<_> = &mut layer_data;\n\n                                // gather all layer data in parallel.\n                                s.spawn(move |_| {\n                                    for (layer_index, layer_elements) in\n                                        layer_data.iter_mut().enumerate()\n                                    {\n                                        let store = labels.labels_for_layer(layer_index + 1);\n                                        let start = (i * nodes_count) + node_index;\n                                        let end = start + chunked_nodes_count;\n                                        let elements: Vec<<Tree::Hasher as Hasher>::Domain> = store\n                                            .read_range(std::ops::Range { start, end })\n                                            .expect(\"failed to read store range\");\n                                        layer_elements.extend(elements.into_iter().map(Into::into));\n                                    }\n                                });\n                            });\n\n                            // Copy out all layer data arranged into columns.\n                            for layer_index in 0..layers {\n                                for index in 0..chunked_nodes_count {\n                                    columns[index][layer_index] = layer_data[layer_index][index];\n                                }\n                            }\n\n                            drop(layer_data);\n\n                            node_index += chunked_nodes_count;\n                            trace!(\n                                \"node index {}/{}/{}\",\n                                node_index,\n                                chunked_nodes_count,\n                                nodes_count,\n                            );\n\n                            let is_final = node_index == nodes_count;\n                            builder_tx\n                                .send((columns, is_final))\n                                .expect(\"failed to send columns\");\n                        }\n                    }\n                });\n                let configs = &configs;\n                s.spawn(move |_| {\n                    let mut column_tree_builder = ColumnTreeBuilder::<\n                            ColumnArity,\n                        TreeArity,\n                        >::new(\n                        Some(BatcherType::GPU),\n                        nodes_count,\n                        max_gpu_column_batch_size,\n                        max_gpu_tree_batch_size,\n                    ).expect(\"failed to create ColumnTreeBuilder\");\n\n                    let mut i = 0;\n                    let mut config = &configs[i];\n\n                    // Loop until all trees for all configs have been built.\n                    while i < configs.len() {\n                        let (columns, is_final): (Vec<GenericArray<Fr, ColumnArity>>, bool) = builder_rx.recv().expect(\"failed to recv columns\");\n\n                        // Just add non-final column batches.\n                        if !is_final {\n                            column_tree_builder.add_columns(&columns).expect(\"failed to add columns\");\n                            continue;\n                        };\n\n                        // If we get here, this is a final column: build a sub-tree.\n                        let (base_data, tree_data) = column_tree_builder.add_final_columns(&columns).expect(\"failed to add final columns\");\n                        trace!(\n                            \"base data len {}, tree data len {}\",\n                            base_data.len(),\n                            tree_data.len()\n                        );\n                        let tree_len = base_data.len() + tree_data.len();\n                        info!(\n                            \"persisting base tree_c {}/{} of length {}\",\n                            i + 1,\n                            tree_count,\n                            tree_len,\n                        );\n                        assert_eq!(base_data.len(), nodes_count);\n                        assert_eq!(tree_len, config.size.unwrap());\n\n                        // Persist the base and tree data to disk based using the current store config.\n                        let tree_c_store =\n                            DiskStore::<<Tree::Hasher as Hasher>::Domain>::new_with_config(\n                                tree_len,\n                                Tree::Arity::to_usize(),\n                                config.clone(),\n                            ).expect(\"failed to create DiskStore for base tree data\");\n\n                        let store = Arc::new(RwLock::new(tree_c_store));\n                        let batch_size = std::cmp::min(base_data.len(), column_write_batch_size);\n                        let flatten_and_write_store = |data: &Vec<Fr>, offset| {\n                            data.into_par_iter()\n                                .chunks(column_write_batch_size)\n                                .enumerate()\n                                .try_for_each(|(index, fr_elements)| {\n                                    let mut buf = Vec::with_capacity(batch_size * NODE_SIZE);\n\n                                    for fr in fr_elements {\n                                        buf.extend(fr_into_bytes(&fr));\n                                    }\n                                    store\n                                        .write()\n                                        .expect(\"failed to access store for write\")\n                                        .copy_from_slice(&buf[..], offset + (batch_size * index))\n                                })\n                        };\n\n                        trace!(\n                            \"flattening tree_c base data of {} nodes using batch size {}\",\n                            base_data.len(),\n                            batch_size\n                        );\n                        flatten_and_write_store(&base_data, 0).expect(\"failed to flatten and write store\");\n                        trace!(\"done flattening tree_c base data\");\n\n                        let base_offset = base_data.len();\n                        trace!(\"flattening tree_c tree data of {} nodes using batch size {} and base offset {}\", tree_data.len(), batch_size, base_offset);\n                        flatten_and_write_store(&tree_data, base_offset).expect(\"failed to flatten and write store\");\n                        trace!(\"done flattening tree_c tree data\");\n\n                        trace!(\"writing tree_c store data\");\n                        store\n                            .write()\n                            .expect(\"failed to access store for sync\")\n                            .sync().unwrap();\n                        trace!(\"done writing tree_c store data\");\n\n                        // Move on to the next config.\n                        i += 1;\n                        if i == configs.len() {\n                            break;\n                        }\n                        config = &configs[i];\n                    }\n                });\n            });\n\n            create_disk_tree::<\n                DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n            >(configs[0].size.unwrap(), &configs)\n        })\n    }\n\n    fn generate_tree_c_cpu<ColumnArity, TreeArity>(\n        layers: usize,\n        nodes_count: usize,\n        tree_count: usize,\n        configs: Vec<StoreConfig>,\n        labels: &LabelsCache<Tree>,\n    ) -> Result<DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>\n    where\n        ColumnArity: PoseidonArity,\n        TreeArity: PoseidonArity,\n    {\n        info!(\"generating tree c using the CPU\");\n        measure_op(GenerateTreeC, || {\n            info!(\"Building column hashes\");\n\n            let mut trees = Vec::with_capacity(tree_count);\n            for (i, config) in configs.iter().enumerate() {\n                let mut hashes: Vec<<Tree::Hasher as Hasher>::Domain> =\n                    vec![<Tree::Hasher as Hasher>::Domain::default(); nodes_count];\n\n                rayon::scope(|s| {\n                    let n = num_cpus::get();\n\n                    // only split if we have at least two elements per thread\n                    let num_chunks = if n > nodes_count * 2 { 1 } else { n };\n\n                    // chunk into n chunks\n                    let chunk_size = (nodes_count as f64 / num_chunks as f64).ceil() as usize;\n\n                    // calculate all n chunks in parallel\n                    for (chunk, hashes_chunk) in hashes.chunks_mut(chunk_size).enumerate() {\n                        let labels = &labels;\n\n                        s.spawn(move |_| {\n                            for (j, hash) in hashes_chunk.iter_mut().enumerate() {\n                                let data: Vec<_> = (1..=layers)\n                                    .map(|layer| {\n                                        let store = labels.labels_for_layer(layer);\n                                        let el: <Tree::Hasher as Hasher>::Domain = store\n                                            .read_at((i * nodes_count) + j + chunk * chunk_size)\n                                            .unwrap();\n                                        el.into()\n                                    })\n                                    .collect();\n\n                                *hash = hash_single_column(&data).into();\n                            }\n                        });\n                    }\n                });\n\n                info!(\"building base tree_c {}/{}\", i + 1, tree_count);\n                trees.push(DiskTree::<\n                    Tree::Hasher,\n                    Tree::Arity,\n                    typenum::U0,\n                    typenum::U0,\n                >::from_par_iter_with_config(\n                    hashes.into_par_iter(), config.clone()\n                ));\n            }\n\n            assert_eq!(tree_count, trees.len());\n            create_disk_tree::<\n                DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n            >(configs[0].size.unwrap(), &configs)\n        })\n    }\n\n    fn generate_tree_r_last<TreeArity>(\n        data: &mut Data,\n        nodes_count: usize,\n        tree_count: usize,\n        tree_r_last_config: StoreConfig,\n        replica_path: PathBuf,\n        labels: &LabelsCache<Tree>,\n    ) -> Result<LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>\n    where\n        TreeArity: PoseidonArity,\n    {\n        let (configs, replica_config) = split_config_and_replica(\n            tree_r_last_config.clone(),\n            replica_path,\n            nodes_count,\n            tree_count,\n        )?;\n\n        data.ensure_data()?;\n        let last_layer_labels = labels.labels_for_last_layer()?;\n\n        if settings::SETTINGS.lock().unwrap().use_gpu_tree_builder {\n            info!(\"generating tree r last using the GPU\");\n            let max_gpu_tree_batch_size =\n                settings::SETTINGS.lock().unwrap().max_gpu_tree_batch_size as usize;\n\n            // This channel will receive batches of leaf nodes and add them to the TreeBuilder.\n            let (builder_tx, builder_rx) = mpsc::sync_channel::<(Vec<Fr>, bool)>(0);\n            let config_count = configs.len(); // Don't move config into closure below.\n            let configs = &configs;\n            rayon::scope(|s| {\n                s.spawn(move |_| {\n                    for i in 0..config_count {\n                        let mut node_index = 0;\n                        while node_index != nodes_count {\n                            let chunked_nodes_count =\n                                std::cmp::min(nodes_count - node_index, max_gpu_tree_batch_size);\n                            let start = (i * nodes_count) + node_index;\n                            let end = start + chunked_nodes_count;\n                            trace!(\n                                \"processing config {}/{} with leaf nodes {} [{}, {}, {}-{}]\",\n                                i + 1,\n                                tree_count,\n                                chunked_nodes_count,\n                                node_index,\n                                nodes_count,\n                                start,\n                                end,\n                            );\n\n                            let encoded_data = last_layer_labels\n                                .read_range(start..end)\n                                .expect(\"failed to read layer range\")\n                                .into_par_iter()\n                                .zip(\n                                    data.as_mut()[(start * NODE_SIZE)..(end * NODE_SIZE)]\n                                        .par_chunks_mut(NODE_SIZE),\n                                )\n                                .map(|(key, data_node_bytes)| {\n                                    let data_node =\n                                        <Tree::Hasher as Hasher>::Domain::try_from_bytes(\n                                            data_node_bytes,\n                                        )\n                                        .expect(\"try_from_bytes failed\");\n                                    let encoded_node =\n                                        encode::<<Tree::Hasher as Hasher>::Domain>(key, data_node);\n                                    data_node_bytes\n                                        .copy_from_slice(AsRef::<[u8]>::as_ref(&encoded_node));\n\n                                    encoded_node\n                                });\n\n                            node_index += chunked_nodes_count;\n                            trace!(\n                                \"node index {}/{}/{}\",\n                                node_index,\n                                chunked_nodes_count,\n                                nodes_count,\n                            );\n\n                            let encoded: Vec<_> =\n                                encoded_data.into_par_iter().map(|x| x.into()).collect();\n\n                            let is_final = node_index == nodes_count;\n                            builder_tx\n                                .send((encoded, is_final))\n                                .expect(\"failed to send encoded\");\n                        }\n                    }\n                });\n\n                {\n                    let tree_r_last_config = &tree_r_last_config;\n                    s.spawn(move |_| {\n                        let mut tree_builder = TreeBuilder::<Tree::Arity>::new(\n                            Some(BatcherType::GPU),\n                            nodes_count,\n                            max_gpu_tree_batch_size,\n                            tree_r_last_config.rows_to_discard,\n                        )\n                        .expect(\"failed to create TreeBuilder\");\n\n                        let mut i = 0;\n                        let mut config = &configs[i];\n\n                        // Loop until all trees for all configs have been built.\n                        while i < configs.len() {\n                            let (encoded, is_final) =\n                                builder_rx.recv().expect(\"failed to recv encoded data\");\n\n                            // Just add non-final leaf batches.\n                            if !is_final {\n                                tree_builder\n                                    .add_leaves(&encoded)\n                                    .expect(\"failed to add leaves\");\n                                continue;\n                            };\n\n                            // If we get here, this is a final leaf batch: build a sub-tree.\n                            info!(\n                                \"building base tree_r_last with GPU {}/{}\",\n                                i + 1,\n                                tree_count\n                            );\n                            let (_, tree_data) = tree_builder\n                                .add_final_leaves(&encoded)\n                                .expect(\"failed to add final leaves\");\n                            let tree_data_len = tree_data.len();\n                            let cache_size = get_merkle_tree_cache_size(\n                                get_merkle_tree_leafs(\n                                    config.size.unwrap(),\n                                    Tree::Arity::to_usize(),\n                                )\n                                .expect(\"failed to get merkle tree leaves\"),\n                                Tree::Arity::to_usize(),\n                                config.rows_to_discard,\n                            )\n                            .expect(\"failed to get merkle tree cache size\");\n                            assert_eq!(tree_data_len, cache_size);\n\n                            let flat_tree_data: Vec<_> = tree_data\n                                .into_par_iter()\n                                .flat_map(|el| fr_into_bytes(&el))\n                                .collect();\n\n                            // Persist the data to the store based on the current config.\n                            let tree_r_last_path = StoreConfig::data_path(&config.path, &config.id);\n                            trace!(\n                                \"persisting tree r of len {} with {} rows to discard at path {:?}\",\n                                tree_data_len,\n                                config.rows_to_discard,\n                                tree_r_last_path\n                            );\n                            let mut f = OpenOptions::new()\n                                .create(true)\n                                .write(true)\n                                .open(&tree_r_last_path)\n                                .expect(\"failed to open file for tree_r_last\");\n                            f.write_all(&flat_tree_data)\n                                .expect(\"failed to wrote tree_r_last data\");\n\n                            // Move on to the next config.\n                            i += 1;\n                            if i == configs.len() {\n                                break;\n                            }\n                            config = &configs[i];\n                        }\n                    });\n                }\n            });\n        } else {\n            info!(\"generating tree r last using the CPU\");\n            let size = Store::len(last_layer_labels);\n\n            let mut start = 0;\n            let mut end = size / tree_count;\n\n            for (i, config) in configs.iter().enumerate() {\n                let encoded_data = last_layer_labels\n                    .read_range(start..end)?\n                    .into_par_iter()\n                    .zip(\n                        data.as_mut()[(start * NODE_SIZE)..(end * NODE_SIZE)]\n                            .par_chunks_mut(NODE_SIZE),\n                    )\n                    .map(|(key, data_node_bytes)| {\n                        let data_node =\n                            <Tree::Hasher as Hasher>::Domain::try_from_bytes(data_node_bytes)\n                                .expect(\"try from bytes failed\");\n                        let encoded_node =\n                            encode::<<Tree::Hasher as Hasher>::Domain>(key, data_node);\n                        data_node_bytes.copy_from_slice(AsRef::<[u8]>::as_ref(&encoded_node));\n\n                        encoded_node\n                    });\n\n                info!(\n                    \"building base tree_r_last with CPU {}/{}\",\n                    i + 1,\n                    tree_count\n                );\n                LCTree::<Tree::Hasher, Tree::Arity, typenum::U0, typenum::U0>::from_par_iter_with_config(encoded_data, config.clone())?;\n\n                start = end;\n                end += size / tree_count;\n            }\n        };\n\n        create_lc_tree::<LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>(\n            tree_r_last_config.size.unwrap(),\n            &configs,\n            &replica_config,\n        )\n    }\n\n    pub(crate) fn transform_and_replicate_layers(\n        graph: &StackedBucketGraph<Tree::Hasher>,\n        layer_challenges: &LayerChallenges,\n        replica_id: &<Tree::Hasher as Hasher>::Domain,\n        data: Data,\n        data_tree: Option<BinaryMerkleTree<G>>,\n        config: StoreConfig,\n        replica_path: PathBuf,\n    ) -> Result<TransformedLayers<Tree, G>> {\n        // Generate key layers.\n        let (_, labels) = measure_op(EncodeWindowTimeAll, || {\n            Self::generate_labels(graph, layer_challenges, replica_id, config.clone())\n        })?;\n\n        Self::transform_and_replicate_layers_inner(\n            graph,\n            layer_challenges,\n            data,\n            data_tree,\n            config,\n            replica_path,\n            labels,\n        )\n    }\n\n    pub(crate) fn transform_and_replicate_layers_inner(\n        graph: &StackedBucketGraph<Tree::Hasher>,\n        layer_challenges: &LayerChallenges,\n        mut data: Data,\n        data_tree: Option<BinaryMerkleTree<G>>,\n        config: StoreConfig,\n        replica_path: PathBuf,\n        label_configs: Labels<Tree>,\n    ) -> Result<TransformedLayers<Tree, G>> {\n        trace!(\"transform_and_replicate_layers\");\n        let nodes_count = graph.size();\n\n        assert_eq!(data.len(), nodes_count * NODE_SIZE);\n        trace!(\"nodes count {}, data len {}\", nodes_count, data.len());\n\n        let tree_count = get_base_tree_count::<Tree>();\n        let nodes_count = graph.size() / tree_count;\n\n        // Ensure that the node count will work for binary and oct arities.\n        let binary_arity_valid = is_merkle_tree_size_valid(nodes_count, BINARY_ARITY);\n        let other_arity_valid = is_merkle_tree_size_valid(nodes_count, Tree::Arity::to_usize());\n        trace!(\n            \"is_merkle_tree_size_valid({}, BINARY_ARITY) = {}\",\n            nodes_count,\n            binary_arity_valid\n        );\n        trace!(\n            \"is_merkle_tree_size_valid({}, {}) = {}\",\n            nodes_count,\n            Tree::Arity::to_usize(),\n            other_arity_valid\n        );\n        assert!(binary_arity_valid);\n        assert!(other_arity_valid);\n\n        let layers = layer_challenges.layers();\n        assert!(layers > 0);\n\n        // Generate all store configs that we need based on the\n        // cache_path in the specified config.\n        let mut tree_d_config = StoreConfig::from_config(\n            &config,\n            CacheKey::CommDTree.to_string(),\n            Some(get_merkle_tree_len(nodes_count, BINARY_ARITY)?),\n        );\n        tree_d_config.rows_to_discard = default_rows_to_discard(nodes_count, BINARY_ARITY);\n\n        let mut tree_r_last_config = StoreConfig::from_config(\n            &config,\n            CacheKey::CommRLastTree.to_string(),\n            Some(get_merkle_tree_len(nodes_count, Tree::Arity::to_usize())?),\n        );\n\n        // A default 'rows_to_discard' value will be chosen for tree_r_last, unless the user overrides this value via the\n        // environment setting (FIL_PROOFS_ROWS_TO_DISCARD).  If this value is specified, no checking is done on it and it may\n        // result in a broken configuration.  Use with caution.\n        tree_r_last_config.rows_to_discard =\n            default_rows_to_discard(nodes_count, Tree::Arity::to_usize());\n        trace!(\n            \"tree_r_last using rows_to_discard={}\",\n            tree_r_last_config.rows_to_discard\n        );\n\n        let mut tree_c_config = StoreConfig::from_config(\n            &config,\n            CacheKey::CommCTree.to_string(),\n            Some(get_merkle_tree_len(nodes_count, Tree::Arity::to_usize())?),\n        );\n        tree_c_config.rows_to_discard =\n            default_rows_to_discard(nodes_count, Tree::Arity::to_usize());\n\n        let labels = LabelsCache::<Tree>::new(&label_configs)?;\n        let configs = split_config(tree_c_config.clone(), tree_count)?;\n\n        let tree_c_root = match layers {\n            2 => {\n                let tree_c = Self::generate_tree_c::<U2, Tree::Arity>(\n                    layers,\n                    nodes_count,\n                    tree_count,\n                    configs,\n                    &labels,\n                )?;\n                tree_c.root()\n            }\n            8 => {\n                let tree_c = Self::generate_tree_c::<U8, Tree::Arity>(\n                    layers,\n                    nodes_count,\n                    tree_count,\n                    configs,\n                    &labels,\n                )?;\n                tree_c.root()\n            }\n            11 => {\n                let tree_c = Self::generate_tree_c::<U11, Tree::Arity>(\n                    layers,\n                    nodes_count,\n                    tree_count,\n                    configs,\n                    &labels,\n                )?;\n                tree_c.root()\n            }\n            _ => panic!(\"Unsupported column arity\"),\n        };\n        info!(\"tree_c done\");\n\n        // Build the MerkleTree over the original data (if needed).\n        let tree_d = match data_tree {\n            Some(t) => {\n                trace!(\"using existing original data merkle tree\");\n                assert_eq!(t.len(), 2 * (data.len() / NODE_SIZE) - 1);\n\n                t\n            }\n            None => {\n                trace!(\"building merkle tree for the original data\");\n                data.ensure_data()?;\n                measure_op(CommD, || {\n                    Self::build_binary_tree::<G>(data.as_ref(), tree_d_config.clone())\n                })?\n            }\n        };\n        tree_d_config.size = Some(tree_d.len());\n        assert_eq!(tree_d_config.size.unwrap(), tree_d.len());\n        let tree_d_root = tree_d.root();\n        drop(tree_d);\n\n        // Encode original data into the last layer.\n        info!(\"building tree_r_last\");\n        let tree_r_last = measure_op(GenerateTreeRLast, || {\n            Self::generate_tree_r_last::<Tree::Arity>(\n                &mut data,\n                nodes_count,\n                tree_count,\n                tree_r_last_config.clone(),\n                replica_path.clone(),\n                &labels,\n            )\n        })?;\n        info!(\"tree_r_last done\");\n\n        let tree_r_last_root = tree_r_last.root();\n        drop(tree_r_last);\n\n        data.drop_data();\n\n        // comm_r = H(comm_c || comm_r_last)\n        let comm_r: <Tree::Hasher as Hasher>::Domain =\n            <Tree::Hasher as Hasher>::Function::hash2(&tree_c_root, &tree_r_last_root);\n\n        Ok((\n            Tau {\n                comm_d: tree_d_root,\n                comm_r,\n            },\n            PersistentAux {\n                comm_c: tree_c_root,\n                comm_r_last: tree_r_last_root,\n            },\n            TemporaryAux {\n                labels: label_configs,\n                tree_d_config,\n                tree_r_last_config,\n                tree_c_config,\n                _g: PhantomData,\n            },\n        ))\n    }\n\n    /// Phase1 of replication.\n    pub fn replicate_phase1(\n        pp: &'a PublicParams<Tree>,\n        replica_id: &<Tree::Hasher as Hasher>::Domain,\n        config: StoreConfig,\n    ) -> Result<Labels<Tree>> {\n        info!(\"replicate_phase1\");\n\n        let (_, labels) = measure_op(EncodeWindowTimeAll, || {\n            Self::generate_labels(&pp.graph, &pp.layer_challenges, replica_id, config)\n        })?;\n\n        Ok(labels)\n    }\n\n    #[allow(clippy::type_complexity)]\n    /// Phase2 of replication.\n    #[allow(clippy::type_complexity)]\n    pub fn replicate_phase2(\n        pp: &'a PublicParams<Tree>,\n        labels: Labels<Tree>,\n        data: Data<'a>,\n        data_tree: BinaryMerkleTree<G>,\n        config: StoreConfig,\n        replica_path: PathBuf,\n    ) -> Result<(\n        <Self as PoRep<'a, Tree::Hasher, G>>::Tau,\n        <Self as PoRep<'a, Tree::Hasher, G>>::ProverAux,\n    )> {\n        info!(\"replicate_phase2\");\n\n        let (tau, paux, taux) = Self::transform_and_replicate_layers_inner(\n            &pp.graph,\n            &pp.layer_challenges,\n            data,\n            Some(data_tree),\n            config,\n            replica_path,\n            labels,\n        )?;\n\n        Ok((tau, (paux, taux)))\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use ff::Field;\n    use paired::bls12_381::Fr;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::{\n        drgraph::BASE_DEGREE,\n        fr32::fr_into_bytes,\n        hasher::{Blake2sHasher, PedersenHasher, PoseidonHasher, Sha256Hasher},\n        merkle::MerkleTreeTrait,\n        proof::ProofScheme,\n        table_tests,\n        test_helper::setup_replica,\n    };\n\n    use crate::stacked::{PrivateInputs, SetupParams, EXP_DEGREE};\n    use crate::PoRep;\n\n    const DEFAULT_STACKED_LAYERS: usize = 11;\n\n    #[test]\n    fn test_calculate_fixed_challenges() {\n        let layer_challenges = LayerChallenges::new(10, 333);\n        let expected = 333;\n\n        let calculated_count = layer_challenges.challenges_count_all();\n        assert_eq!(expected as usize, calculated_count);\n    }\n\n    #[test]\n    fn extract_all_pedersen_8() {\n        test_extract_all::<DiskTree<PedersenHasher, typenum::U8, typenum::U0, typenum::U0>>();\n    }\n\n    #[test]\n    fn extract_all_pedersen_8_2() {\n        test_extract_all::<DiskTree<PedersenHasher, typenum::U8, typenum::U2, typenum::U0>>();\n    }\n\n    #[test]\n    fn extract_all_pedersen_8_8_2() {\n        test_extract_all::<DiskTree<PedersenHasher, typenum::U8, typenum::U8, typenum::U2>>();\n    }\n\n    #[test]\n    fn extract_all_sha256_8() {\n        test_extract_all::<DiskTree<Sha256Hasher, typenum::U8, typenum::U0, typenum::U0>>();\n    }\n\n    #[test]\n    fn extract_all_sha256_8_8() {\n        test_extract_all::<DiskTree<Sha256Hasher, typenum::U8, typenum::U8, typenum::U0>>();\n    }\n\n    #[test]\n    fn extract_all_sha256_8_8_2() {\n        test_extract_all::<DiskTree<Sha256Hasher, typenum::U8, typenum::U8, typenum::U2>>();\n    }\n\n    #[test]\n    fn extract_all_blake2s_8() {\n        test_extract_all::<DiskTree<Blake2sHasher, typenum::U8, typenum::U0, typenum::U0>>();\n    }\n\n    #[test]\n    fn extract_all_blake2s_8_8() {\n        test_extract_all::<DiskTree<Blake2sHasher, typenum::U8, typenum::U8, typenum::U0>>();\n    }\n\n    #[test]\n    fn extract_all_blake2s_8_8_2() {\n        test_extract_all::<DiskTree<Blake2sHasher, typenum::U8, typenum::U8, typenum::U2>>();\n    }\n\n    #[test]\n    fn extract_all_poseidon_8() {\n        test_extract_all::<DiskTree<PoseidonHasher, typenum::U8, typenum::U0, typenum::U0>>();\n    }\n\n    #[test]\n    fn extract_all_poseidon_8_2() {\n        test_extract_all::<DiskTree<PoseidonHasher, typenum::U8, typenum::U2, typenum::U0>>();\n    }\n\n    #[test]\n    fn extract_all_poseidon_8_8_2() {\n        test_extract_all::<DiskTree<PoseidonHasher, typenum::U8, typenum::U8, typenum::U2>>();\n    }\n\n    fn test_extract_all<Tree: 'static + MerkleTreeTrait>() {\n        // femme::pretty::Logger::new()\n        //     .start(log::LevelFilter::Trace)\n        //     .ok();\n\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n        let replica_id: <Tree::Hasher as Hasher>::Domain =\n            <Tree::Hasher as Hasher>::Domain::random(rng);\n        let nodes = 64 * get_base_tree_count::<Tree>();\n\n        let data: Vec<u8> = (0..nodes)\n            .flat_map(|_| {\n                let v: <Tree::Hasher as Hasher>::Domain =\n                    <Tree::Hasher as Hasher>::Domain::random(rng);\n                v.into_bytes()\n            })\n            .collect();\n\n        // MT for original data is always named tree-d, and it will be\n        // referenced later in the process as such.\n        let cache_dir = tempfile::tempdir().unwrap();\n        let config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            default_rows_to_discard(nodes, BINARY_ARITY),\n        );\n\n        // Generate a replica path.\n        let replica_path = cache_dir.path().join(\"replica-path\");\n        let mut mmapped_data = setup_replica(&data, &replica_path);\n\n        let challenges = LayerChallenges::new(DEFAULT_STACKED_LAYERS, 5);\n\n        let sp = SetupParams {\n            nodes,\n            degree: BASE_DEGREE,\n            expansion_degree: EXP_DEGREE,\n            porep_id: [32; 32],\n            layer_challenges: challenges.clone(),\n        };\n\n        let pp = StackedDrg::<Tree, Blake2sHasher>::setup(&sp).expect(\"setup failed\");\n\n        StackedDrg::<Tree, Blake2sHasher>::replicate(\n            &pp,\n            &replica_id,\n            (mmapped_data.as_mut()).into(),\n            None,\n            config.clone(),\n            replica_path.clone(),\n        )\n        .expect(\"replication failed\");\n\n        let mut copied = vec![0; data.len()];\n        copied.copy_from_slice(&mmapped_data);\n        assert_ne!(data, copied, \"replication did not change data\");\n\n        let decoded_data = StackedDrg::<Tree, Blake2sHasher>::extract_all(\n            &pp,\n            &replica_id,\n            mmapped_data.as_mut(),\n            Some(config.clone()),\n        )\n        .expect(\"failed to extract data\");\n\n        assert_eq!(data, decoded_data);\n\n        cache_dir.close().expect(\"Failed to remove cache dir\");\n    }\n\n    fn prove_verify_fixed(n: usize) {\n        let challenges = LayerChallenges::new(DEFAULT_STACKED_LAYERS, 5);\n\n        test_prove_verify::<DiskTree<PedersenHasher, typenum::U4, typenum::U0, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<PedersenHasher, typenum::U4, typenum::U2, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<PedersenHasher, typenum::U4, typenum::U8, typenum::U2>>(\n            n,\n            challenges.clone(),\n        );\n\n        test_prove_verify::<DiskTree<PedersenHasher, typenum::U8, typenum::U0, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<PedersenHasher, typenum::U8, typenum::U2, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<PedersenHasher, typenum::U8, typenum::U8, typenum::U2>>(\n            n,\n            challenges.clone(),\n        );\n\n        test_prove_verify::<DiskTree<Sha256Hasher, typenum::U8, typenum::U0, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<Sha256Hasher, typenum::U8, typenum::U2, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<Sha256Hasher, typenum::U8, typenum::U8, typenum::U2>>(\n            n,\n            challenges.clone(),\n        );\n\n        test_prove_verify::<DiskTree<Sha256Hasher, typenum::U4, typenum::U0, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<Sha256Hasher, typenum::U4, typenum::U2, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<Sha256Hasher, typenum::U4, typenum::U8, typenum::U2>>(\n            n,\n            challenges.clone(),\n        );\n\n        test_prove_verify::<DiskTree<Blake2sHasher, typenum::U4, typenum::U0, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<Blake2sHasher, typenum::U4, typenum::U2, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<Blake2sHasher, typenum::U4, typenum::U8, typenum::U2>>(\n            n,\n            challenges.clone(),\n        );\n\n        test_prove_verify::<DiskTree<Blake2sHasher, typenum::U8, typenum::U0, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<Blake2sHasher, typenum::U8, typenum::U2, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<Blake2sHasher, typenum::U8, typenum::U8, typenum::U2>>(\n            n,\n            challenges.clone(),\n        );\n\n        test_prove_verify::<DiskTree<PoseidonHasher, typenum::U4, typenum::U0, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<PoseidonHasher, typenum::U4, typenum::U2, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<PoseidonHasher, typenum::U4, typenum::U8, typenum::U2>>(\n            n,\n            challenges.clone(),\n        );\n\n        test_prove_verify::<DiskTree<PoseidonHasher, typenum::U8, typenum::U0, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<PoseidonHasher, typenum::U8, typenum::U2, typenum::U0>>(\n            n,\n            challenges.clone(),\n        );\n        test_prove_verify::<DiskTree<PoseidonHasher, typenum::U8, typenum::U8, typenum::U2>>(\n            n,\n            challenges.clone(),\n        );\n    }\n\n    fn test_prove_verify<Tree: 'static + MerkleTreeTrait>(n: usize, challenges: LayerChallenges) {\n        // This will be called multiple times, only the first one succeeds, and that is ok.\n        // femme::pretty::Logger::new()\n        //     .start(log::LevelFilter::Trace)\n        //     .ok();\n\n        let nodes = n * get_base_tree_count::<Tree>();\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let degree = BASE_DEGREE;\n        let expansion_degree = EXP_DEGREE;\n        let replica_id: <Tree::Hasher as Hasher>::Domain =\n            <Tree::Hasher as Hasher>::Domain::random(rng);\n        let data: Vec<u8> = (0..nodes)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n\n        // MT for original data is always named tree-d, and it will be\n        // referenced later in the process as such.\n        let cache_dir = tempfile::tempdir().unwrap();\n        let config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            default_rows_to_discard(nodes, BINARY_ARITY),\n        );\n\n        // Generate a replica path.\n        let replica_path = cache_dir.path().join(\"replica-path\");\n        let mut mmapped_data = setup_replica(&data, &replica_path);\n\n        let partitions = 2;\n\n        let arbitrary_porep_id = [92; 32];\n        let sp = SetupParams {\n            nodes,\n            degree,\n            expansion_degree,\n            porep_id: arbitrary_porep_id,\n            layer_challenges: challenges.clone(),\n        };\n\n        let pp = StackedDrg::<Tree, Blake2sHasher>::setup(&sp).expect(\"setup failed\");\n        let (tau, (p_aux, t_aux)) = StackedDrg::<Tree, Blake2sHasher>::replicate(\n            &pp,\n            &replica_id,\n            (mmapped_data.as_mut()).into(),\n            None,\n            config,\n            replica_path.clone(),\n        )\n        .expect(\"replication failed\");\n\n        let mut copied = vec![0; data.len()];\n        copied.copy_from_slice(&mmapped_data);\n        assert_ne!(data, copied, \"replication did not change data\");\n\n        let seed = rng.gen();\n        let pub_inputs =\n            PublicInputs::<<Tree::Hasher as Hasher>::Domain, <Blake2sHasher as Hasher>::Domain> {\n                replica_id,\n                seed,\n                tau: Some(tau),\n                k: None,\n            };\n\n        // Store a copy of the t_aux for later resource deletion.\n        let t_aux_orig = t_aux.clone();\n\n        // Convert TemporaryAux to TemporaryAuxCache, which instantiates all\n        // elements based on the configs stored in TemporaryAux.\n        let t_aux = TemporaryAuxCache::<Tree, Blake2sHasher>::new(&t_aux, replica_path)\n            .expect(\"failed to restore contents of t_aux\");\n\n        let priv_inputs = PrivateInputs { p_aux, t_aux };\n\n        let all_partition_proofs = &StackedDrg::<Tree, Blake2sHasher>::prove_all_partitions(\n            &pp,\n            &pub_inputs,\n            &priv_inputs,\n            partitions,\n        )\n        .expect(\"failed to generate partition proofs\");\n\n        let proofs_are_valid = StackedDrg::<Tree, Blake2sHasher>::verify_all_partitions(\n            &pp,\n            &pub_inputs,\n            all_partition_proofs,\n        )\n        .expect(\"failed to verify partition proofs\");\n\n        // Discard cached MTs that are no longer needed.\n        TemporaryAux::<Tree, Blake2sHasher>::clear_temp(t_aux_orig).expect(\"t_aux delete failed\");\n\n        assert!(proofs_are_valid);\n\n        cache_dir.close().expect(\"Failed to remove cache dir\");\n    }\n\n    table_tests! {\n        prove_verify_fixed {\n           prove_verify_fixed_64_64(64);\n        }\n    }\n\n    #[test]\n    // We are seeing a bug, in which setup never terminates for some sector sizes.\n    // This test is to debug that and should remain as a regression teset.\n    fn setup_terminates() {\n        let degree = BASE_DEGREE;\n        let expansion_degree = EXP_DEGREE;\n        let nodes = 1024 * 1024 * 32 * 8; // This corresponds to 8GiB sectors (32-byte nodes)\n        let layer_challenges = LayerChallenges::new(10, 333);\n        let sp = SetupParams {\n            nodes,\n            degree,\n            expansion_degree,\n            porep_id: [32; 32],\n            layer_challenges: layer_challenges.clone(),\n        };\n\n        // When this fails, the call to setup should panic, but seems to actually hang (i.e. neither return nor panic) for some reason.\n        // When working as designed, the call to setup returns without error.\n        let _pp = StackedDrg::<\n            DiskTree<PedersenHasher, typenum::U8, typenum::U0, typenum::U0>,\n            Blake2sHasher,\n        >::setup(&sp)\n        .expect(\"setup failed\");\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/proof_scheme.rs",
    "content": "use anyhow::ensure;\nuse log::trace;\nuse rayon::prelude::*;\nuse storage_proofs_core::{\n    drgraph::Graph,\n    error::Result,\n    hasher::{HashFunction, Hasher},\n    merkle::MerkleTreeTrait,\n    proof::ProofScheme,\n};\n\nuse super::{\n    challenges::ChallengeRequirements,\n    graph::StackedBucketGraph,\n    params::{PrivateInputs, Proof, PublicInputs, PublicParams, SetupParams},\n    proof::StackedDrg,\n};\n\nimpl<'a, 'c, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> ProofScheme<'a>\n    for StackedDrg<'c, Tree, G>\n{\n    type PublicParams = PublicParams<Tree>;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<<Tree::Hasher as Hasher>::Domain, <G as Hasher>::Domain>;\n    type PrivateInputs = PrivateInputs<Tree, G>;\n    type Proof = Vec<Proof<Tree, G>>;\n    type Requirements = ChallengeRequirements;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        let graph = StackedBucketGraph::<Tree::Hasher>::new_stacked(\n            sp.nodes,\n            sp.degree,\n            sp.expansion_degree,\n            sp.porep_id,\n        )?;\n\n        Ok(PublicParams::new(graph, sp.layer_challenges.clone()))\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        let proofs = Self::prove_all_partitions(pub_params, pub_inputs, priv_inputs, 1)?;\n        let k = match pub_inputs.k {\n            None => 0,\n            Some(k) => k,\n        };\n        // Because partition proofs require a common setup, the general ProofScheme implementation,\n        // which makes use of `ProofScheme::prove` cannot be used here. Instead, we need to prove all\n        // partitions in one pass, as implemented by `prove_all_partitions` below.\n        assert!(\n            k < 1,\n            \"It is a programmer error to call StackedDrg::prove with more than one partition.\"\n        );\n\n        Ok(proofs[k].to_owned())\n    }\n\n    fn prove_all_partitions<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n        partition_count: usize,\n    ) -> Result<Vec<Self::Proof>> {\n        trace!(\"prove_all_partitions\");\n        ensure!(partition_count > 0, \"partitions must not be 0\");\n\n        Self::prove_layers(\n            &pub_params.graph,\n            pub_inputs,\n            &priv_inputs.p_aux,\n            &priv_inputs.t_aux,\n            &pub_params.layer_challenges,\n            pub_params.layer_challenges.layers(),\n            pub_params.layer_challenges.layers(),\n            partition_count,\n        )\n    }\n\n    fn verify_all_partitions(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        partition_proofs: &[Self::Proof],\n    ) -> Result<bool> {\n        trace!(\"verify_all_partitions\");\n\n        // generate graphs\n        let graph = &pub_params.graph;\n\n        let expected_comm_r = if let Some(ref tau) = pub_inputs.tau {\n            &tau.comm_r\n        } else {\n            return Ok(false);\n        };\n\n        let res = partition_proofs.par_iter().enumerate().all(|(k, proofs)| {\n            trace!(\n                \"verifying partition proof {}/{}\",\n                k + 1,\n                partition_proofs.len()\n            );\n\n            trace!(\"verify comm_r\");\n            let actual_comm_r: <Tree::Hasher as Hasher>::Domain = {\n                let comm_c = proofs[0].comm_c();\n                let comm_r_last = proofs[0].comm_r_last();\n                <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last)\n            };\n\n            if expected_comm_r != &actual_comm_r {\n                return false;\n            }\n\n            let challenges =\n                pub_inputs.challenges(&pub_params.layer_challenges, graph.size(), Some(k));\n\n            proofs.par_iter().enumerate().all(|(i, proof)| {\n                trace!(\"verify challenge {}/{}\", i + 1, challenges.len());\n\n                // Validate for this challenge\n                let challenge = challenges[i];\n\n                // make sure all proofs have the same comm_c\n                if proof.comm_c() != proofs[0].comm_c() {\n                    return false;\n                }\n                // make sure all proofs have the same comm_r_last\n                if proof.comm_r_last() != proofs[0].comm_r_last() {\n                    return false;\n                }\n\n                proof.verify(pub_params, pub_inputs, challenge, graph)\n            })\n        });\n\n        Ok(res)\n    }\n\n    fn with_partition(pub_in: Self::PublicInputs, k: Option<usize>) -> Self::PublicInputs {\n        self::PublicInputs {\n            replica_id: pub_in.replica_id,\n            seed: pub_in.seed,\n            tau: pub_in.tau,\n            k,\n        }\n    }\n\n    fn satisfies_requirements(\n        public_params: &PublicParams<Tree>,\n        requirements: &ChallengeRequirements,\n        partitions: usize,\n    ) -> bool {\n        let partition_challenges = public_params.layer_challenges.challenges_count_all();\n\n        partition_challenges * partitions >= requirements.minimum_challenges\n    }\n}\n"
  },
  {
    "path": "storage-proofs/porep/src/stacked/vanilla/utils.rs",
    "content": "use std::cell::UnsafeCell;\n\n/// A slice type which can be shared between threads, but must be fully managed by the caller.\n/// Any synchronization must be ensured by the caller, which is why all access is `unsafe`.\n#[derive(Debug)]\npub struct UnsafeSlice<'a, T> {\n    // holds the data to ensure lifetime correctness\n    data: UnsafeCell<&'a mut [T]>,\n    /// pointer to the data\n    ptr: *mut T,\n    /// Number of elements, not bytes.\n    len: usize,\n}\n\nunsafe impl<'a, T> Sync for UnsafeSlice<'a, T> {}\n\nimpl<'a, T> UnsafeSlice<'a, T> {\n    /// Takes mutable slice, to ensure that `UnsafeSlice` is the only user of this memory, until it gets dropped.\n    pub fn from_slice(source: &'a mut [T]) -> Self {\n        let len = source.len();\n        let ptr = source.as_mut_ptr();\n        let data = UnsafeCell::new(source);\n        Self { data, ptr, len }\n    }\n\n    /// Safety: The caller must ensure that there are no unsynchronized parallel access to the same regions.\n    #[inline]\n    pub unsafe fn as_mut_slice(&self) -> &'a mut [T] {\n        std::slice::from_raw_parts_mut(self.ptr, self.len)\n    }\n    /// Safety: The caller must ensure that there are no unsynchronized parallel access to the same regions.\n    #[inline]\n    pub unsafe fn as_slice(&self) -> &'a [T] {\n        std::slice::from_raw_parts(self.ptr, self.len)\n    }\n\n    #[inline]\n    pub unsafe fn get(&self, index: usize) -> &'a T {\n        &*self.ptr.add(index)\n    }\n\n    #[inline]\n    pub unsafe fn get_mut(&self, index: usize) -> &'a mut T {\n        &mut *self.ptr.add(index)\n    }\n}\n\n/// Set all values in the given slice to the provided value.\n#[inline]\npub fn memset(slice: &mut [u8], value: u8) {\n    for v in slice.iter_mut() {\n        *v = value;\n    }\n}\n\n#[inline]\npub fn prepare_block(replica_id: &[u8], layer: u32, buf: &mut [u8]) {\n    buf[..32].copy_from_slice(replica_id);\n    buf[35] = (layer & 0xFF) as u8;\n    buf[64] = 0x80; // Padding\n    buf[126] = 0x02 // Length (512 bits = 64B)\n}\n\n#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]\npub struct BitMask(u32);\n\nimpl BitMask {\n    /// Sets the full mask for the first `n` bits.\n    #[inline]\n    pub fn set_upto(&mut self, n: u8) {\n        assert!(n <= 32);\n        self.0 |= (1 << n) - 1\n    }\n\n    /// Sets the ith bit.\n    #[inline]\n    pub fn set(&mut self, i: usize) {\n        self.0 |= 1 << i\n    }\n\n    /// Returns true if the ith bit is set, false otherwise.\n    #[inline]\n    pub fn get(self, i: usize) -> bool {\n        self.0 & (1 << i) != 0\n    }\n}\n\n#[derive(Debug)]\npub struct RingBuf {\n    data: UnsafeCell<Box<[u8]>>,\n    slot_size: usize,\n    num_slots: usize,\n}\n\nunsafe impl Sync for RingBuf {}\n\nimpl RingBuf {\n    /// Creates a new\n    pub fn new(slot_size: usize, num_slots: usize) -> Self {\n        let data = vec![0u8; slot_size * num_slots].into_boxed_slice();\n\n        RingBuf {\n            data: UnsafeCell::from(data),\n            slot_size,\n            num_slots,\n        }\n    }\n\n    #[allow(clippy::mut_from_ref)]\n    unsafe fn slice_mut(&self) -> &mut [u8] {\n        std::slice::from_raw_parts_mut((*self.data.get()).as_mut_ptr(), self.len())\n    }\n\n    fn len(&self) -> usize {\n        self.slot_size * self.num_slots\n    }\n\n    #[allow(clippy::mut_from_ref)]\n    pub unsafe fn slot_mut(&self, slot: usize) -> &mut [u8] {\n        let start = self.slot_size * slot;\n        let end = start + self.slot_size;\n\n        &mut self.slice_mut()[start..end]\n    }\n\n    pub fn iter_slot_mut(&mut self) -> std::slice::ChunksExactMut<u8> {\n        // Safety: safe because we are holding &mut self\n        unsafe { self.slice_mut().chunks_exact_mut(self.slot_size) }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/post/Cargo.toml",
    "content": "[package]\nname = \"storage-proofs-post\"\nversion = \"2.0.0\"\nauthors = [\"dignifiedquire <me@dignifiedquire.com>\"]\nlicense = \"MIT OR Apache-2.0\"\ndescription = \"Proofs of Space Time\"\nedition = \"2018\"\nrepository = \"https://github.com/filecoin-project/rust-fil-proofs\"\nreadme = \"README.md\"\n\n[dependencies]\nstorage-proofs-core = { path = \"../core\", version = \"2.0.0\" }\nrand = \"0.7\"\nmerkletree = \"0.20.0\"\nbyteorder = \"1\"\nsha2 = { version = \"0.8.3\", package = \"sha2ni\" }\nrayon = \"1.0.0\"\nserde = { version = \"1.0\", features = [\"derive\"]}\nblake2b_simd = \"0.5\"\nblake2s_simd = \"0.5\"\nff = { version = \"0.2.1\", package = \"fff\" }\nbellperson = \"0.8.0\"\npaired = { version = \"0.19.0\", features = [\"serde\"] }\nfil-sapling-crypto = \"0.6.0\"\nlog = \"0.4.7\"\nhex = \"0.4.0\"\ngeneric-array = \"0.13.2\"\nanyhow = \"1.0.23\"\nneptune = { git = \"https://github.com/filecoin-project/neptune.git\", branch=\"v1-0-candidate\", features=[\"gpu\"] }\n\n[dev-dependencies]\ntempdir = \"0.3.7\"\ntempfile = \"3\"\npretty_assertions = \"0.6.1\"\nrand_xorshift = \"0.2.0\"\n"
  },
  {
    "path": "storage-proofs/post/README.md",
    "content": "# Storage Proofs PoSt\n\n## License\n\nMIT or Apache 2.0\n"
  },
  {
    "path": "storage-proofs/post/src/election/circuit.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellperson::gadgets::num;\nuse bellperson::{Circuit, ConstraintSystem, SynthesisError};\nuse ff::Field;\nuse generic_array::typenum;\nuse paired::bls12_381::{Bls12, Fr};\nuse typenum::marker_traits::Unsigned;\n\nuse storage_proofs_core::{\n    compound_proof::CircuitComponent,\n    gadgets::constraint,\n    gadgets::por::PoRCircuit,\n    gadgets::variables::Root,\n    hasher::{HashFunction, Hasher, PoseidonFunction, PoseidonMDArity},\n    merkle::MerkleTreeTrait,\n};\n\n/// This is the `ElectionPoSt` circuit.\npub struct ElectionPoStCircuit<Tree: MerkleTreeTrait> {\n    pub comm_r: Option<Fr>,\n    pub comm_c: Option<Fr>,\n    pub comm_r_last: Option<Fr>,\n    pub leafs: Vec<Option<Fr>>,\n    #[allow(clippy::type_complexity)]\n    pub paths: Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>,\n    pub partial_ticket: Option<Fr>,\n    pub randomness: Option<Fr>,\n    pub prover_id: Option<Fr>,\n    pub sector_id: Option<Fr>,\n    pub _t: PhantomData<Tree>,\n}\n\n#[derive(Clone, Default)]\npub struct ComponentPrivateInputs {}\n\nimpl<'a, Tree: MerkleTreeTrait> CircuitComponent for ElectionPoStCircuit<Tree> {\n    type ComponentPrivateInputs = ComponentPrivateInputs;\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait> Circuit<Bls12> for ElectionPoStCircuit<Tree> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let comm_r = self.comm_r;\n        let comm_c = self.comm_c;\n        let comm_r_last = self.comm_r_last;\n        let leafs = self.leafs;\n        let paths = self.paths;\n        let partial_ticket = self.partial_ticket;\n        let randomness = self.randomness;\n        let prover_id = self.prover_id;\n        let sector_id = self.sector_id;\n\n        assert_eq!(paths.len(), leafs.len());\n\n        // 1. Verify comm_r\n\n        let comm_r_last_num = num::AllocatedNum::alloc(cs.namespace(|| \"comm_r_last\"), || {\n            comm_r_last\n                .map(Into::into)\n                .ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        let comm_c_num = num::AllocatedNum::alloc(cs.namespace(|| \"comm_c\"), || {\n            comm_c\n                .map(Into::into)\n                .ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        let comm_r_num = num::AllocatedNum::alloc(cs.namespace(|| \"comm_r\"), || {\n            comm_r\n                .map(Into::into)\n                .ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        comm_r_num.inputize(cs.namespace(|| \"comm_r_input\"))?;\n\n        // Verify H(Comm_C || comm_r_last) == comm_r\n        {\n            let hash_num = <Tree::Hasher as Hasher>::Function::hash2_circuit(\n                cs.namespace(|| \"H_comm_c_comm_r_last\"),\n                &comm_c_num,\n                &comm_r_last_num,\n            )?;\n\n            // Check actual equality\n            constraint::equal(\n                cs,\n                || \"enforce_comm_c_comm_r_last_hash_comm_r\",\n                &comm_r_num,\n                &hash_num,\n            );\n        }\n\n        // 2. Verify Inclusion Paths\n        for (i, (leaf, path)) in leafs.iter().zip(paths.iter()).enumerate() {\n            PoRCircuit::<Tree>::synthesize(\n                cs.namespace(|| format!(\"challenge_inclusion{}\", i)),\n                Root::Val(*leaf),\n                path.clone().into(),\n                Root::from_allocated::<CS>(comm_r_last_num.clone()),\n                true,\n            )?;\n        }\n\n        // 3. Verify partial ticket\n\n        // randomness\n        let randomness_num = num::AllocatedNum::alloc(cs.namespace(|| \"randomness\"), || {\n            randomness\n                .map(Into::into)\n                .ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        // prover_id\n        let prover_id_num = num::AllocatedNum::alloc(cs.namespace(|| \"prover_id\"), || {\n            prover_id\n                .map(Into::into)\n                .ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        // sector_id\n        let sector_id_num = num::AllocatedNum::alloc(cs.namespace(|| \"sector_id\"), || {\n            sector_id\n                .map(Into::into)\n                .ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        let mut partial_ticket_nums = vec![randomness_num, prover_id_num, sector_id_num];\n        for (i, leaf) in leafs.iter().enumerate() {\n            let leaf_num =\n                num::AllocatedNum::alloc(cs.namespace(|| format!(\"leaf_{}\", i)), || {\n                    leaf.map(Into::into)\n                        .ok_or_else(|| SynthesisError::AssignmentMissing)\n                })?;\n            partial_ticket_nums.push(leaf_num);\n        }\n\n        // pad to a multiple of md arity\n        let arity = PoseidonMDArity::to_usize();\n        while partial_ticket_nums.len() % arity != 0 {\n            partial_ticket_nums.push(num::AllocatedNum::alloc(\n                cs.namespace(|| format!(\"padding_{}\", partial_ticket_nums.len())),\n                || Ok(Fr::zero()),\n            )?);\n        }\n\n        // hash it\n        let partial_ticket_num = PoseidonFunction::hash_md_circuit::<_>(\n            &mut cs.namespace(|| \"partial_ticket_hash\"),\n            &partial_ticket_nums,\n        )?;\n\n        // allocate expected input\n        let expected_partial_ticket_num =\n            num::AllocatedNum::alloc(cs.namespace(|| \"partial_ticket\"), || {\n                partial_ticket\n                    .map(Into::into)\n                    .ok_or_else(|| SynthesisError::AssignmentMissing)\n            })?;\n\n        expected_partial_ticket_num.inputize(cs.namespace(|| \"partial_ticket_input\"))?;\n\n        // check equality\n        constraint::equal(\n            cs,\n            || \"enforce partial_ticket is correct\",\n            &partial_ticket_num,\n            &expected_partial_ticket_num,\n        );\n\n        Ok(())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use std::collections::BTreeMap;\n\n    use ff::Field;\n    use paired::bls12_381::{Bls12, Fr};\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::{\n        compound_proof::CompoundProof,\n        gadgets::TestConstraintSystem,\n        hasher::{Domain, HashFunction, Hasher, PedersenHasher, PoseidonHasher},\n        merkle::{generate_tree, get_base_tree_count, LCTree, MerkleTreeTrait},\n        proof::ProofScheme,\n        sector::SectorId,\n        util::NODE_SIZE,\n    };\n    use typenum::{U0, U8};\n\n    use crate::election::{self, ElectionPoSt, ElectionPoStCompound};\n\n    #[test]\n    fn test_election_post_circuit_pedersen() {\n        test_election_post_circuit::<LCTree<PedersenHasher, U8, U0, U0>>(389_883);\n    }\n\n    #[test]\n    fn test_election_post_circuit_poseidon() {\n        test_election_post_circuit::<LCTree<PoseidonHasher, U8, U0, U0>>(24_426);\n    }\n\n    fn test_election_post_circuit<Tree: 'static + MerkleTreeTrait>(expected_constraints: usize) {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 64 * get_base_tree_count::<Tree>();\n        let sector_size = leaves * NODE_SIZE;\n\n        let randomness = <Tree::Hasher as Hasher>::Domain::random(rng);\n        let prover_id = <Tree::Hasher as Hasher>::Domain::random(rng);\n\n        let pub_params = election::PublicParams {\n            sector_size: sector_size as u64,\n            challenge_count: 20,\n            challenged_nodes: 1,\n        };\n\n        let mut sectors: Vec<SectorId> = Vec::new();\n        let mut trees = BTreeMap::new();\n\n        // Construct and store an MT using a named store.\n        let temp_dir = tempdir::TempDir::new(\"tree\").unwrap();\n        let temp_path = temp_dir.path();\n\n        for i in 0..5 {\n            sectors.push(i.into());\n            let (_data, tree) =\n                generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n            trees.insert(i.into(), tree);\n        }\n\n        let candidates = election::generate_candidates::<Tree>(\n            &pub_params,\n            &sectors,\n            &trees,\n            prover_id,\n            randomness,\n        )\n        .unwrap();\n\n        let candidate = &candidates[0];\n        let tree = trees.remove(&candidate.sector_id).unwrap();\n        let comm_r_last = tree.root();\n        let comm_c = <Tree::Hasher as Hasher>::Domain::random(rng);\n        let comm_r = <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last);\n\n        let pub_inputs = election::PublicInputs {\n            randomness,\n            sector_id: candidate.sector_id,\n            prover_id,\n            comm_r,\n            partial_ticket: candidate.partial_ticket,\n            sector_challenge_index: 0,\n        };\n\n        let priv_inputs = election::PrivateInputs::<Tree> {\n            tree,\n            comm_c,\n            comm_r_last,\n        };\n\n        let proof = ElectionPoSt::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs)\n            .expect(\"proving failed\");\n\n        let is_valid = ElectionPoSt::<Tree>::verify(&pub_params, &pub_inputs, &proof)\n            .expect(\"verification failed\");\n        assert!(is_valid);\n\n        // actual circuit test\n\n        let paths = proof\n            .paths()\n            .iter()\n            .map(|p| {\n                p.iter()\n                    .map(|v| {\n                        (\n                            v.0.iter().copied().map(Into::into).map(Some).collect(),\n                            Some(v.1),\n                        )\n                    })\n                    .collect::<Vec<_>>()\n            })\n            .collect();\n        let leafs: Vec<_> = proof.leafs().iter().map(|l| Some((*l).into())).collect();\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n\n        let instance = ElectionPoStCircuit::<Tree> {\n            leafs,\n            paths,\n            comm_r: Some(comm_r.into()),\n            comm_c: Some(comm_c.into()),\n            comm_r_last: Some(comm_r_last.into()),\n            partial_ticket: Some(candidate.partial_ticket.into()),\n            randomness: Some(randomness.into()),\n            prover_id: Some(prover_id.into()),\n            sector_id: Some(candidate.sector_id.into()),\n            _t: PhantomData,\n        };\n\n        instance\n            .synthesize(&mut cs)\n            .expect(\"failed to synthesize circuit\");\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n        assert_eq!(cs.num_inputs(), 23, \"wrong number of inputs\");\n        assert_eq!(\n            cs.num_constraints(),\n            expected_constraints,\n            \"wrong number of constraints\"\n        );\n        assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n\n        let generated_inputs =\n            ElectionPoStCompound::<Tree>::generate_public_inputs(&pub_inputs, &pub_params, None)\n                .unwrap();\n        let expected_inputs = cs.get_inputs();\n\n        for ((input, label), generated_input) in\n            expected_inputs.iter().skip(1).zip(generated_inputs.iter())\n        {\n            assert_eq!(input, generated_input, \"{}\", label);\n        }\n\n        assert_eq!(\n            generated_inputs.len(),\n            expected_inputs.len() - 1,\n            \"inputs are not the same length\"\n        );\n    }\n}\n"
  },
  {
    "path": "storage-proofs/post/src/election/compound.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellperson::Circuit;\nuse generic_array::typenum;\nuse paired::bls12_381::{Bls12, Fr};\nuse typenum::marker_traits::Unsigned;\n\nuse storage_proofs_core::{\n    compound_proof::{CircuitComponent, CompoundProof},\n    drgraph,\n    error::Result,\n    gadgets::por::PoRCompound,\n    merkle::MerkleTreeTrait,\n    parameter_cache::{CacheableParameters, ParameterSetMetadata},\n    por,\n    proof::ProofScheme,\n    util::NODE_SIZE,\n};\n\nuse crate::election::{self, ElectionPoSt, ElectionPoStCircuit};\n\npub struct ElectionPoStCompound<Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    _t: PhantomData<Tree>,\n}\n\nimpl<C: Circuit<Bls12>, P: ParameterSetMetadata, Tree: MerkleTreeTrait> CacheableParameters<C, P>\n    for ElectionPoStCompound<Tree>\n{\n    fn cache_prefix() -> String {\n        format!(\"proof-of-spacetime-election-{}\", Tree::display())\n    }\n}\n\nimpl<'a, Tree> CompoundProof<'a, ElectionPoSt<'a, Tree>, ElectionPoStCircuit<Tree>>\n    for ElectionPoStCompound<Tree>\nwhere\n    Tree: 'static + MerkleTreeTrait,\n{\n    fn generate_public_inputs(\n        pub_inputs: &<ElectionPoSt<'a, Tree> as ProofScheme<'a>>::PublicInputs,\n        pub_params: &<ElectionPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Result<Vec<Fr>> {\n        let mut inputs = Vec::new();\n\n        let por_pub_params = por::PublicParams {\n            leaves: (pub_params.sector_size as usize / NODE_SIZE),\n            private: true,\n        };\n\n        // 1. Inputs for verifying comm_r = H(comm_c || comm_r_last)\n\n        inputs.push(pub_inputs.comm_r.into());\n\n        // 2. Inputs for verifying inclusion paths\n\n        for n in 0..pub_params.challenge_count {\n            let challenged_leaf_start = election::generate_leaf_challenge(\n                &pub_params,\n                pub_inputs.randomness,\n                pub_inputs.sector_challenge_index,\n                n as u64,\n            )?;\n            for i in 0..pub_params.challenged_nodes {\n                let por_pub_inputs = por::PublicInputs {\n                    commitment: None,\n                    challenge: challenged_leaf_start as usize + i,\n                };\n                let por_inputs = PoRCompound::<Tree>::generate_public_inputs(\n                    &por_pub_inputs,\n                    &por_pub_params,\n                    None,\n                )?;\n\n                inputs.extend(por_inputs);\n            }\n        }\n\n        // 3. Inputs for verifying partial_ticket generation\n        inputs.push(pub_inputs.partial_ticket);\n\n        Ok(inputs)\n    }\n\n    fn circuit(\n        pub_in: &<ElectionPoSt<'a, Tree> as ProofScheme<'a>>::PublicInputs,\n        _priv_in: <ElectionPoStCircuit<Tree> as CircuitComponent>::ComponentPrivateInputs,\n        vanilla_proof: &<ElectionPoSt<'a, Tree> as ProofScheme<'a>>::Proof,\n        _pub_params: &<ElectionPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Result<ElectionPoStCircuit<Tree>> {\n        let comm_r = pub_in.comm_r.into();\n        let comm_c = vanilla_proof.comm_c.into();\n        let comm_r_last = vanilla_proof.comm_r_last().into();\n\n        let leafs: Vec<_> = vanilla_proof\n            .leafs()\n            .iter()\n            .map(|c| Some((*c).into()))\n            .collect();\n\n        let paths: Vec<Vec<_>> = vanilla_proof\n            .paths()\n            .iter()\n            .map(|v| {\n                v.iter()\n                    .map(|p| {\n                        (\n                            (*p).0.iter().copied().map(Into::into).map(Some).collect(),\n                            Some(p.1),\n                        )\n                    })\n                    .collect()\n            })\n            .collect();\n\n        Ok(ElectionPoStCircuit {\n            leafs,\n            comm_r: Some(comm_r),\n            comm_c: Some(comm_c),\n            comm_r_last: Some(comm_r_last),\n            paths,\n            partial_ticket: Some(pub_in.partial_ticket),\n            randomness: Some(pub_in.randomness.into()),\n            prover_id: Some(pub_in.prover_id.into()),\n            sector_id: Some(pub_in.sector_id.into()),\n            _t: PhantomData,\n        })\n    }\n\n    fn blank_circuit(\n        pub_params: &<ElectionPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n    ) -> ElectionPoStCircuit<Tree> {\n        let challenges_count = pub_params.challenged_nodes * pub_params.challenge_count;\n        let height =\n            drgraph::graph_height::<Tree::Arity>(pub_params.sector_size as usize / NODE_SIZE);\n\n        let leafs = vec![None; challenges_count];\n        let paths = vec![\n            vec![(vec![None; Tree::Arity::to_usize() - 1], None); height - 1];\n            challenges_count\n        ];\n\n        ElectionPoStCircuit {\n            comm_r: None,\n            comm_c: None,\n            comm_r_last: None,\n            partial_ticket: None,\n            leafs,\n            paths,\n            randomness: None,\n            prover_id: None,\n            sector_id: None,\n            _t: PhantomData,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use std::collections::BTreeMap;\n\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n\n    use storage_proofs_core::{\n        compound_proof,\n        gadgets::{MetricCS, TestConstraintSystem},\n        hasher::{Domain, HashFunction, Hasher, PedersenHasher, PoseidonHasher},\n        merkle::{generate_tree, get_base_tree_count, LCTree, MerkleTreeTrait},\n        proof::NoRequirements,\n        sector::SectorId,\n    };\n    use typenum::{U0, U8};\n\n    use crate::election;\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn election_post_test_compound_pedersen() {\n        election_post_test_compound::<LCTree<PedersenHasher, U8, U0, U0>>();\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn election_post_test_compound_poseidon() {\n        election_post_test_compound::<LCTree<PoseidonHasher, U8, U0, U0>>();\n    }\n\n    fn election_post_test_compound<Tree: 'static + MerkleTreeTrait>() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 64 * get_base_tree_count::<Tree>();\n        let sector_size = (leaves * NODE_SIZE) as u64;\n        let randomness = <Tree::Hasher as Hasher>::Domain::random(rng);\n        let prover_id = <Tree::Hasher as Hasher>::Domain::random(rng);\n\n        let setup_params = compound_proof::SetupParams {\n            vanilla_params: election::SetupParams {\n                sector_size,\n                challenge_count: 20,\n                challenged_nodes: 1,\n            },\n            partitions: None,\n            priority: true,\n        };\n\n        let mut sectors: Vec<SectorId> = Vec::new();\n        let mut trees = BTreeMap::new();\n\n        // Construct and store an MT using a named store.\n        let temp_dir = tempdir::TempDir::new(\"tree\").unwrap();\n        let temp_path = temp_dir.path();\n\n        for i in 0..5 {\n            sectors.push(i.into());\n            let (_data, tree) =\n                generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n            trees.insert(i.into(), tree);\n        }\n\n        let pub_params = ElectionPoStCompound::<Tree>::setup(&setup_params).expect(\"setup failed\");\n\n        let candidates = election::generate_candidates::<Tree>(\n            &pub_params.vanilla_params,\n            &sectors,\n            &trees,\n            prover_id,\n            randomness,\n        )\n        .unwrap();\n\n        let candidate = &candidates[0];\n        let tree = trees.remove(&candidate.sector_id).unwrap();\n        let comm_r_last = tree.root();\n        let comm_c = <Tree::Hasher as Hasher>::Domain::random(rng);\n        let comm_r = <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last);\n\n        let pub_inputs = election::PublicInputs {\n            randomness,\n            sector_id: candidate.sector_id,\n            prover_id,\n            comm_r,\n            partial_ticket: candidate.partial_ticket,\n            sector_challenge_index: 0,\n        };\n\n        let priv_inputs = election::PrivateInputs::<Tree> {\n            tree,\n            comm_c,\n            comm_r_last,\n        };\n\n        {\n            let (circuit, inputs) =\n                ElectionPoStCompound::circuit_for_test(&pub_params, &pub_inputs, &priv_inputs)\n                    .unwrap();\n\n            let mut cs = TestConstraintSystem::new();\n\n            circuit.synthesize(&mut cs).expect(\"failed to synthesize\");\n\n            if !cs.is_satisfied() {\n                panic!(\n                    \"failed to satisfy: {:?}\",\n                    cs.which_is_unsatisfied().unwrap()\n                );\n            }\n            assert!(\n                cs.verify(&inputs),\n                \"verification failed with TestContraintSystem and generated inputs\"\n            );\n        }\n\n        // Use this to debug differences between blank and regular circuit generation.\n        {\n            let (circuit1, _inputs) =\n                ElectionPoStCompound::circuit_for_test(&pub_params, &pub_inputs, &priv_inputs)\n                    .unwrap();\n            let blank_circuit =\n                ElectionPoStCompound::<Tree>::blank_circuit(&pub_params.vanilla_params);\n\n            let mut cs_blank = MetricCS::new();\n            blank_circuit\n                .synthesize(&mut cs_blank)\n                .expect(\"failed to synthesize\");\n\n            let a = cs_blank.pretty_print_list();\n\n            let mut cs1 = TestConstraintSystem::new();\n            circuit1.synthesize(&mut cs1).expect(\"failed to synthesize\");\n            let b = cs1.pretty_print_list();\n\n            for (i, (a, b)) in a.chunks(100).zip(b.chunks(100)).enumerate() {\n                assert_eq!(a, b, \"failed at chunk {}\", i);\n            }\n        }\n        let blank_groth_params =\n            ElectionPoStCompound::<Tree>::groth_params(Some(rng), &pub_params.vanilla_params)\n                .expect(\"failed to generate groth params\");\n\n        let proof = ElectionPoStCompound::prove(\n            &pub_params,\n            &pub_inputs,\n            &priv_inputs,\n            &blank_groth_params,\n        )\n        .expect(\"failed while proving\");\n\n        let verified =\n            ElectionPoStCompound::verify(&pub_params, &pub_inputs, &proof, &NoRequirements)\n                .expect(\"failed while verifying\");\n\n        assert!(verified);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/post/src/election/mod.rs",
    "content": "mod circuit;\nmod compound;\nmod vanilla;\n\npub use circuit::*;\npub use compound::*;\npub use vanilla::*;\n"
  },
  {
    "path": "storage-proofs/post/src/election/vanilla.rs",
    "content": "use std::collections::BTreeMap;\nuse std::fmt;\nuse std::marker::PhantomData;\n\nuse anyhow::{bail, ensure, Context};\nuse byteorder::{ByteOrder, LittleEndian};\nuse generic_array::typenum;\nuse log::trace;\nuse merkletree::store::StoreConfig;\nuse paired::bls12_381::Fr;\nuse rayon::prelude::*;\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\nuse typenum::Unsigned;\n\nuse storage_proofs_core::{\n    error::{Error, Result},\n    fr32::fr_into_bytes,\n    hasher::{Domain, HashFunction, Hasher, PoseidonDomain, PoseidonFunction, PoseidonMDArity},\n    measurements::{measure_op, Operation},\n    merkle::{MerkleProof, MerkleProofTrait, MerkleTreeTrait, MerkleTreeWrapper},\n    parameter_cache::ParameterSetMetadata,\n    proof::{NoRequirements, ProofScheme},\n    sector::*,\n    util::NODE_SIZE,\n};\n\n#[derive(Debug, Clone)]\npub struct SetupParams {\n    /// Size of the sector in bytes.\n    pub sector_size: u64,\n    pub challenge_count: usize,\n    pub challenged_nodes: usize,\n}\n\n#[derive(Debug, Clone)]\npub struct PublicParams {\n    /// Size of the sector in bytes.\n    pub sector_size: u64,\n    pub challenge_count: usize,\n    pub challenged_nodes: usize,\n}\n\nimpl ParameterSetMetadata for PublicParams {\n    fn identifier(&self) -> String {\n        format!(\n            \"ElectionPoSt::PublicParams{{sector_size: {}, count: {}, nodes: {}}}\",\n            self.sector_size(),\n            self.challenge_count,\n            self.challenged_nodes,\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        self.sector_size\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct PublicInputs<T: Domain> {\n    pub randomness: T,\n    pub sector_id: SectorId,\n    pub prover_id: T,\n    pub comm_r: T,\n    pub partial_ticket: Fr,\n    pub sector_challenge_index: u64,\n}\n\n#[derive(Debug)]\npub struct PrivateInputs<Tree: MerkleTreeTrait> {\n    pub tree: MerkleTreeWrapper<\n        Tree::Hasher,\n        Tree::Store,\n        Tree::Arity,\n        Tree::SubTreeArity,\n        Tree::TopTreeArity,\n    >,\n    pub comm_c: <Tree::Hasher as Hasher>::Domain,\n    pub comm_r_last: <Tree::Hasher as Hasher>::Domain,\n}\n\n/// The candidate data, that is needed for ticket generation.\n#[derive(Clone, Serialize, Deserialize)]\npub struct Candidate {\n    pub sector_id: SectorId,\n    pub partial_ticket: Fr,\n    pub ticket: [u8; 32],\n    pub sector_challenge_index: u64,\n}\n\nimpl fmt::Debug for Candidate {\n    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"Candidate\")\n            .field(\"sector_id\", &self.sector_id)\n            .field(\"partial_ticket\", &self.partial_ticket)\n            .field(\"ticket\", &hex::encode(&self.ticket))\n            .field(\"sector_challenge_index\", &self.sector_challenge_index)\n            .finish()\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Proof<P: MerkleProofTrait> {\n    #[serde(bound(\n        serialize = \"MerkleProof<P::Hasher, P::Arity, P::SubTreeArity, P::TopTreeArity>: Serialize\",\n        deserialize = \"MerkleProof<P::Hasher, P::Arity, P::SubTreeArity, P::TopTreeArity>: serde::de::DeserializeOwned\"\n    ))]\n    inclusion_proofs: Vec<MerkleProof<P::Hasher, P::Arity, P::SubTreeArity, P::TopTreeArity>>,\n    pub ticket: [u8; 32],\n    pub comm_c: <P::Hasher as Hasher>::Domain,\n}\n\nimpl<P: MerkleProofTrait> Proof<P> {\n    pub fn leafs(&self) -> Vec<<P::Hasher as Hasher>::Domain> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProof::leaf)\n            .collect()\n    }\n\n    pub fn comm_r_last(&self) -> <P::Hasher as Hasher>::Domain {\n        self.inclusion_proofs[0].root()\n    }\n\n    pub fn commitments(&self) -> Vec<<P::Hasher as Hasher>::Domain> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProof::root)\n            .collect()\n    }\n\n    #[allow(clippy::type_complexity)]\n    pub fn paths(&self) -> Vec<Vec<(Vec<<P::Hasher as Hasher>::Domain>, usize)>> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProof::path)\n            .collect()\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct ElectionPoSt<'a, Tree>\nwhere\n    Tree: 'a + MerkleTreeTrait,\n{\n    _t: PhantomData<&'a Tree>,\n}\n\n#[allow(clippy::type_complexity)]\npub fn generate_candidates<Tree: MerkleTreeTrait>(\n    pub_params: &PublicParams,\n    challenged_sectors: &[SectorId],\n    trees: &BTreeMap<\n        SectorId,\n        MerkleTreeWrapper<\n            Tree::Hasher,\n            Tree::Store,\n            Tree::Arity,\n            Tree::SubTreeArity,\n            Tree::TopTreeArity,\n        >,\n    >,\n    prover_id: <Tree::Hasher as Hasher>::Domain,\n    randomness: <Tree::Hasher as Hasher>::Domain,\n) -> Result<Vec<Candidate>> {\n    challenged_sectors\n        .par_iter()\n        .enumerate()\n        .map(|(sector_challenge_index, sector_id)| {\n            let tree = match trees.get(sector_id) {\n                Some(tree) => tree,\n                None => bail!(Error::MissingPrivateInput(\"tree\", (*sector_id).into())),\n            };\n\n            generate_candidate::<Tree>(\n                pub_params,\n                tree,\n                prover_id,\n                *sector_id,\n                randomness,\n                sector_challenge_index as u64,\n            )\n        })\n        .collect()\n}\n\nfn generate_candidate<Tree: MerkleTreeTrait>(\n    pub_params: &PublicParams,\n    tree: &MerkleTreeWrapper<\n        Tree::Hasher,\n        Tree::Store,\n        Tree::Arity,\n        Tree::SubTreeArity,\n        Tree::TopTreeArity,\n    >,\n    prover_id: <Tree::Hasher as Hasher>::Domain,\n    sector_id: SectorId,\n    randomness: <Tree::Hasher as Hasher>::Domain,\n    sector_challenge_index: u64,\n) -> Result<Candidate> {\n    let randomness_fr: Fr = randomness.into();\n    let prover_id_fr: Fr = prover_id.into();\n    let mut data: Vec<PoseidonDomain> = vec![\n        randomness_fr.into(),\n        prover_id_fr.into(),\n        Fr::from(sector_id).into(),\n    ];\n\n    for n in 0..pub_params.challenge_count {\n        let challenge =\n            generate_leaf_challenge(pub_params, randomness, sector_challenge_index, n as u64)?;\n\n        let val: Fr = measure_op(Operation::PostReadChallengedRange, || {\n            tree.read_at(challenge as usize)\n        })?\n        .into();\n        data.push(val.into());\n    }\n\n    // pad for md\n    let arity = PoseidonMDArity::to_usize();\n    while data.len() % arity != 0 {\n        data.push(PoseidonDomain::default());\n    }\n\n    let partial_ticket: Fr = measure_op(Operation::PostPartialTicketHash, || {\n        PoseidonFunction::hash_md(&data)\n    })\n    .into();\n\n    // ticket = sha256(partial_ticket)\n    let ticket = finalize_ticket(&partial_ticket);\n\n    Ok(Candidate {\n        sector_challenge_index,\n        sector_id,\n        partial_ticket,\n        ticket,\n    })\n}\n\npub fn finalize_ticket(partial_ticket: &Fr) -> [u8; 32] {\n    let bytes = fr_into_bytes(partial_ticket);\n    let ticket_hash = Sha256::digest(&bytes);\n    let mut ticket = [0u8; 32];\n    ticket.copy_from_slice(&ticket_hash[..]);\n    ticket\n}\n\npub fn is_valid_sector_challenge_index(challenge_count: u64, index: u64) -> bool {\n    index < challenge_count\n}\n\npub fn generate_sector_challenges<T: Domain>(\n    randomness: T,\n    challenge_count: u64,\n    sectors: &OrderedSectorSet,\n) -> Result<Vec<SectorId>> {\n    (0..challenge_count)\n        .into_par_iter()\n        .map(|n| generate_sector_challenge(randomness, n as usize, sectors))\n        .collect()\n}\n\npub fn generate_sector_challenge<T: Domain>(\n    randomness: T,\n    n: usize,\n    sectors: &OrderedSectorSet,\n) -> Result<SectorId> {\n    let mut hasher = Sha256::new();\n    hasher.input(AsRef::<[u8]>::as_ref(&randomness));\n    hasher.input(&n.to_le_bytes()[..]);\n    let hash = hasher.result();\n\n    let sector_challenge = LittleEndian::read_u64(&hash.as_ref()[..8]);\n    let sector_index = (sector_challenge % sectors.len() as u64) as usize;\n    let sector = *sectors\n        .iter()\n        .nth(sector_index)\n        .context(\"invalid challenge generated\")?;\n\n    Ok(sector)\n}\n\n/// Generate all challenged leaf ranges for a single sector, such that the range fits into the sector.\npub fn generate_leaf_challenges<T: Domain>(\n    pub_params: &PublicParams,\n    randomness: T,\n    sector_challenge_index: u64,\n    challenge_count: usize,\n) -> Result<Vec<u64>> {\n    let mut challenges = Vec::with_capacity(challenge_count);\n\n    for leaf_challenge_index in 0..challenge_count {\n        let challenge = generate_leaf_challenge(\n            pub_params,\n            randomness,\n            sector_challenge_index,\n            leaf_challenge_index as u64,\n        )?;\n        challenges.push(challenge)\n    }\n\n    Ok(challenges)\n}\n\n/// Generates challenge, such that the range fits into the sector.\npub fn generate_leaf_challenge<T: Domain>(\n    pub_params: &PublicParams,\n    randomness: T,\n    sector_challenge_index: u64,\n    leaf_challenge_index: u64,\n) -> Result<u64> {\n    ensure!(\n        pub_params.sector_size > pub_params.challenged_nodes as u64 * NODE_SIZE as u64,\n        \"sector size {} is too small\",\n        pub_params.sector_size\n    );\n\n    let mut hasher = Sha256::new();\n    hasher.input(AsRef::<[u8]>::as_ref(&randomness));\n    hasher.input(&sector_challenge_index.to_le_bytes()[..]);\n    hasher.input(&leaf_challenge_index.to_le_bytes()[..]);\n    let hash = hasher.result();\n\n    let leaf_challenge = LittleEndian::read_u64(&hash.as_ref()[..8]);\n\n    let challenged_range_index = leaf_challenge\n        % (pub_params.sector_size / (pub_params.challenged_nodes * NODE_SIZE) as u64);\n\n    Ok(challenged_range_index * pub_params.challenged_nodes as u64)\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait> ProofScheme<'a> for ElectionPoSt<'a, Tree> {\n    type PublicParams = PublicParams;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<<Tree::Hasher as Hasher>::Domain>;\n    type PrivateInputs = PrivateInputs<Tree>;\n    type Proof = Proof<Tree::Proof>;\n    type Requirements = NoRequirements;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        Ok(PublicParams {\n            sector_size: sp.sector_size,\n            challenge_count: sp.challenge_count,\n            challenged_nodes: sp.challenged_nodes,\n        })\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        // 1. Inclusions proofs of all challenged leafs in all challenged ranges\n        let tree = &priv_inputs.tree;\n        let tree_leafs = tree.leafs();\n\n        let config_levels =\n            StoreConfig::default_cached_above_base_layer(tree_leafs, Tree::Arity::to_usize());\n\n        trace!(\n            \"Generating proof for tree of len {} with leafs {}, and cached_above_base_layers {}\",\n            tree.len(),\n            tree_leafs,\n            config_levels,\n        );\n\n        let inclusion_proofs = measure_op(Operation::PostInclusionProofs, || {\n            (0..pub_params.challenge_count)\n                .into_par_iter()\n                .flat_map(|n| {\n                    // TODO: replace unwrap with proper error handling\n                    let challenged_leaf_start = generate_leaf_challenge(\n                        pub_params,\n                        pub_inputs.randomness,\n                        pub_inputs.sector_challenge_index,\n                        n as u64,\n                    )\n                    .unwrap();\n                    (0..pub_params.challenged_nodes)\n                        .into_par_iter()\n                        .map(move |i| {\n                            tree.gen_cached_proof(challenged_leaf_start as usize + i, config_levels)\n                        })\n                })\n                .collect::<Result<Vec<_>>>()\n        })?;\n\n        // 2. correct generation of the ticket from the partial_ticket (add this to the candidate)\n        let ticket = measure_op(Operation::PostFinalizeTicket, || {\n            finalize_ticket(&pub_inputs.partial_ticket)\n        });\n\n        Ok(Proof {\n            inclusion_proofs,\n            ticket,\n            comm_c: priv_inputs.comm_c,\n        })\n    }\n\n    fn verify(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        proof: &Self::Proof,\n    ) -> Result<bool> {\n        // verify that H(Comm_c || Comm_r_last) == Comm_R\n        // comm_r_last is the root of the proof\n        let comm_r_last = proof.inclusion_proofs[0].root();\n        let comm_c = proof.comm_c;\n        let comm_r = &pub_inputs.comm_r;\n\n        if AsRef::<[u8]>::as_ref(&<Tree::Hasher as Hasher>::Function::hash2(\n            &comm_c,\n            &comm_r_last,\n        )) != AsRef::<[u8]>::as_ref(comm_r)\n        {\n            return Ok(false);\n        }\n\n        for n in 0..pub_params.challenge_count {\n            let challenged_leaf_start = generate_leaf_challenge(\n                pub_params,\n                pub_inputs.randomness,\n                pub_inputs.sector_challenge_index,\n                n as u64,\n            )?;\n            for i in 0..pub_params.challenged_nodes {\n                let merkle_proof = &proof.inclusion_proofs[n * pub_params.challenged_nodes + i];\n\n                // validate all comm_r_lasts match\n                if merkle_proof.root() != comm_r_last {\n                    return Ok(false);\n                }\n\n                // validate the path length\n                let expected_path_length =\n                    merkle_proof.expected_len(pub_params.sector_size as usize / NODE_SIZE);\n\n                if expected_path_length != merkle_proof.path().len() {\n                    return Ok(false);\n                }\n\n                if !merkle_proof.validate(challenged_leaf_start as usize + i) {\n                    return Ok(false);\n                }\n            }\n        }\n\n        Ok(true)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n    use typenum::{U0, U2, U8};\n\n    use storage_proofs_core::{\n        hasher::{PedersenHasher, PoseidonHasher},\n        merkle::{generate_tree, get_base_tree_count, LCTree},\n    };\n\n    fn test_election_post<Tree: 'static + MerkleTreeTrait>() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 64 * get_base_tree_count::<Tree>();\n        let sector_size = leaves * NODE_SIZE;\n\n        let pub_params = PublicParams {\n            sector_size: sector_size as u64,\n            challenge_count: 40,\n            challenged_nodes: 1,\n        };\n\n        let randomness = <Tree::Hasher as Hasher>::Domain::random(rng);\n        let prover_id = <Tree::Hasher as Hasher>::Domain::random(rng);\n\n        let mut sectors: Vec<SectorId> = Vec::new();\n        let mut trees = BTreeMap::new();\n\n        // Construct and store an MT using a named store.\n        let temp_dir = tempdir::TempDir::new(\"tree\").unwrap();\n        let temp_path = temp_dir.path();\n\n        for i in 0..5 {\n            sectors.push(i.into());\n            let (_data, tree) =\n                generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n            trees.insert(i.into(), tree);\n        }\n\n        let candidates =\n            generate_candidates::<Tree>(&pub_params, &sectors, &trees, prover_id, randomness)\n                .unwrap();\n\n        let candidate = &candidates[0];\n        let tree = trees.remove(&candidate.sector_id).unwrap();\n        let comm_r_last = tree.root();\n        let comm_c = <Tree::Hasher as Hasher>::Domain::random(rng);\n        let comm_r = <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last);\n\n        let pub_inputs = PublicInputs {\n            randomness,\n            sector_id: candidate.sector_id,\n            prover_id,\n            comm_r,\n            partial_ticket: candidate.partial_ticket,\n            sector_challenge_index: 0,\n        };\n\n        let priv_inputs = PrivateInputs::<Tree> {\n            tree,\n            comm_c,\n            comm_r_last,\n        };\n\n        let proof = ElectionPoSt::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs)\n            .expect(\"proving failed\");\n\n        let is_valid = ElectionPoSt::<Tree>::verify(&pub_params, &pub_inputs, &proof)\n            .expect(\"verification failed\");\n\n        assert!(is_valid);\n    }\n\n    #[test]\n    fn election_post_pedersen() {\n        test_election_post::<LCTree<PedersenHasher, U8, U0, U0>>();\n    }\n\n    #[test]\n    fn election_post_poseidon() {\n        test_election_post::<LCTree<PoseidonHasher, U8, U0, U0>>();\n    }\n\n    #[test]\n    fn election_post_poseidon_8_8() {\n        test_election_post::<LCTree<PoseidonHasher, U8, U8, U0>>();\n    }\n\n    #[test]\n    fn election_post_poseidon_8_8_2() {\n        test_election_post::<LCTree<PoseidonHasher, U8, U8, U2>>();\n    }\n}\n"
  },
  {
    "path": "storage-proofs/post/src/fallback/circuit.rs",
    "content": "use bellperson::gadgets::num;\nuse bellperson::{Circuit, ConstraintSystem, SynthesisError};\nuse paired::bls12_381::{Bls12, Fr};\n\nuse storage_proofs_core::{\n    compound_proof::CircuitComponent,\n    error::Result,\n    gadgets::constraint,\n    gadgets::por::{AuthPath, PoRCircuit},\n    gadgets::variables::Root,\n    hasher::{HashFunction, Hasher},\n    merkle::MerkleTreeTrait,\n    por,\n    util::NODE_SIZE,\n};\n\nuse super::vanilla::{PublicParams, PublicSector, SectorProof};\n\n/// This is the `FallbackPoSt` circuit.\npub struct FallbackPoStCircuit<Tree: MerkleTreeTrait> {\n    pub prover_id: Option<Fr>,\n    pub sectors: Vec<Sector<Tree>>,\n}\n\n#[derive(Clone)]\npub struct Sector<Tree: MerkleTreeTrait> {\n    pub comm_r: Option<Fr>,\n    pub comm_c: Option<Fr>,\n    pub comm_r_last: Option<Fr>,\n    pub leafs: Vec<Option<Fr>>,\n    pub paths: Vec<AuthPath<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>,\n    pub id: Option<Fr>,\n}\n\nimpl<Tree: 'static + MerkleTreeTrait> Sector<Tree> {\n    pub fn circuit(\n        sector: &PublicSector<<Tree::Hasher as Hasher>::Domain>,\n        vanilla_proof: &SectorProof<Tree::Proof>,\n    ) -> Result<Self> {\n        let leafs = vanilla_proof\n            .leafs()\n            .iter()\n            .map(|l| Some((*l).into()))\n            .collect();\n\n        let paths = vanilla_proof\n            .as_options()\n            .into_iter()\n            .map(Into::into)\n            .collect();\n\n        Ok(Sector {\n            leafs,\n            id: Some(sector.id.into()),\n            comm_r: Some(sector.comm_r.into()),\n            comm_c: Some(vanilla_proof.comm_c.into()),\n            comm_r_last: Some(vanilla_proof.comm_r_last.into()),\n            paths,\n        })\n    }\n\n    pub fn blank_circuit(pub_params: &PublicParams) -> Self {\n        let challenges_count = pub_params.challenge_count;\n        let leaves = pub_params.sector_size as usize / NODE_SIZE;\n\n        let por_params = por::PublicParams {\n            leaves,\n            private: true,\n        };\n        let leafs = vec![None; challenges_count];\n        let paths = vec![AuthPath::blank(por_params.leaves); challenges_count];\n\n        Sector {\n            id: None,\n            comm_r: None,\n            comm_c: None,\n            comm_r_last: None,\n            leafs,\n            paths,\n        }\n    }\n}\n\nimpl<Tree: 'static + MerkleTreeTrait> Circuit<Bls12> for &Sector<Tree> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let Sector {\n            comm_r,\n            comm_c,\n            comm_r_last,\n            leafs,\n            paths,\n            ..\n        } = self;\n\n        assert_eq!(paths.len(), leafs.len());\n\n        // 1. Verify comm_r\n        let comm_r_last_num = num::AllocatedNum::alloc(cs.namespace(|| \"comm_r_last\"), || {\n            comm_r_last\n                .map(Into::into)\n                .ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        let comm_c_num = num::AllocatedNum::alloc(cs.namespace(|| \"comm_c\"), || {\n            comm_c\n                .map(Into::into)\n                .ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        let comm_r_num = num::AllocatedNum::alloc(cs.namespace(|| \"comm_r\"), || {\n            comm_r\n                .map(Into::into)\n                .ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        comm_r_num.inputize(cs.namespace(|| \"comm_r_input\"))?;\n\n        // 1. Verify H(Comm_C || comm_r_last) == comm_r\n        {\n            let hash_num = <Tree::Hasher as Hasher>::Function::hash2_circuit(\n                cs.namespace(|| \"H_comm_c_comm_r_last\"),\n                &comm_c_num,\n                &comm_r_last_num,\n            )?;\n\n            // Check actual equality\n            constraint::equal(\n                cs,\n                || \"enforce_comm_c_comm_r_last_hash_comm_r\",\n                &comm_r_num,\n                &hash_num,\n            );\n        }\n\n        // 2. Verify Inclusion Paths\n        for (i, (leaf, path)) in leafs.iter().zip(paths.iter()).enumerate() {\n            PoRCircuit::<Tree>::synthesize(\n                cs.namespace(|| format!(\"challenge_inclusion_{}\", i)),\n                Root::Val(*leaf),\n                path.clone(),\n                Root::from_allocated::<CS>(comm_r_last_num.clone()),\n                true,\n            )?;\n        }\n\n        Ok(())\n    }\n}\n\n#[derive(Clone, Default)]\npub struct ComponentPrivateInputs {}\n\nimpl<Tree: MerkleTreeTrait> CircuitComponent for FallbackPoStCircuit<Tree> {\n    type ComponentPrivateInputs = ComponentPrivateInputs;\n}\n\nimpl<Tree: 'static + MerkleTreeTrait> Circuit<Bls12> for FallbackPoStCircuit<Tree> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        for (i, sector) in self.sectors.iter().enumerate() {\n            let cs = &mut cs.namespace(|| format!(\"sector_{}\", i));\n\n            sector.synthesize(cs)?;\n        }\n\n        Ok(())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use ff::Field;\n    use generic_array::typenum::{U0, U2, U4, U8};\n    use paired::bls12_381::{Bls12, Fr};\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::{\n        compound_proof::CompoundProof,\n        gadgets::TestConstraintSystem,\n        hasher::{Domain, HashFunction, Hasher, PedersenHasher, PoseidonHasher},\n        merkle::{generate_tree, get_base_tree_count, LCTree, MerkleTreeTrait, OctMerkleTree},\n        proof::ProofScheme,\n        util::NODE_SIZE,\n    };\n\n    use crate::fallback::{\n        self, FallbackPoSt, FallbackPoStCompound, PrivateInputs, PrivateSector, PublicInputs,\n        PublicSector,\n    };\n\n    #[test]\n    fn fallback_post_pedersen_single_partition_matching_base_8() {\n        fallback_post::<LCTree<PedersenHasher, U8, U0, U0>>(3, 3, 1, 19, 293_439);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_single_partition_matching_base_8() {\n        fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(3, 3, 1, 19, 16_869);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_single_partition_matching_sub_8_4() {\n        fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(3, 3, 1, 19, 22_674);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_single_partition_matching_top_8_4_2() {\n        fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(3, 3, 1, 19, 27_384);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_single_partition_smaller_base_8() {\n        fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(2, 3, 1, 19, 16_869);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_two_partitions_matching_base_8() {\n        fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(4, 2, 2, 13, 11_246);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_two_partitions_smaller_base_8() {\n        fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 3, 2, 19, 16_869);\n    }\n\n    #[test]\n    #[ignore]\n    fn metric_fallback_post_circuit_poseidon() {\n        use storage_proofs_core::gadgets::BenchCS;\n\n        let params = fallback::SetupParams {\n            sector_size: 1024 * 1024 * 1024 * 32 as u64,\n            challenge_count: 10,\n            sector_count: 5,\n        };\n\n        let pp = FallbackPoSt::<OctMerkleTree<PoseidonHasher>>::setup(&params).unwrap();\n\n        let mut cs = BenchCS::<Bls12>::new();\n        FallbackPoStCompound::<OctMerkleTree<PoseidonHasher>>::blank_circuit(&pp)\n            .synthesize(&mut cs)\n            .unwrap();\n\n        assert_eq!(cs.num_constraints(), 266_665);\n    }\n\n    fn fallback_post<Tree: 'static + MerkleTreeTrait>(\n        total_sector_count: usize,\n        sector_count: usize,\n        partitions: usize,\n        expected_num_inputs: usize,\n        expected_constraints: usize,\n    ) where\n        Tree::Store: 'static,\n    {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 64 * get_base_tree_count::<Tree>();\n        let sector_size = leaves * NODE_SIZE;\n        let randomness = <Tree::Hasher as Hasher>::Domain::random(rng);\n        let prover_id = <Tree::Hasher as Hasher>::Domain::random(rng);\n\n        let pub_params = fallback::PublicParams {\n            sector_size: sector_size as u64,\n            challenge_count: 5,\n            sector_count,\n        };\n\n        // Construct and store an MT using a named DiskStore.\n        let temp_dir = tempdir::TempDir::new(\"level_cache_tree_v1\").unwrap();\n        let temp_path = temp_dir.path();\n\n        let mut pub_sectors = Vec::new();\n        let mut priv_sectors = Vec::new();\n        let mut trees = Vec::new();\n\n        for _i in 0..total_sector_count {\n            let (_data, tree) =\n                generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n            trees.push(tree);\n        }\n\n        for (i, tree) in trees.iter().enumerate() {\n            let comm_c = <Tree::Hasher as Hasher>::Domain::random(rng);\n            let comm_r_last = tree.root();\n\n            priv_sectors.push(PrivateSector {\n                tree,\n                comm_c,\n                comm_r_last,\n            });\n\n            let comm_r = <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last);\n            pub_sectors.push(PublicSector {\n                id: (i as u64).into(),\n                comm_r,\n            });\n        }\n\n        let pub_inputs = PublicInputs {\n            randomness,\n            prover_id,\n            sectors: &pub_sectors,\n            k: None,\n        };\n\n        let priv_inputs = PrivateInputs::<Tree> {\n            sectors: &priv_sectors,\n        };\n\n        let proofs = FallbackPoSt::<Tree>::prove_all_partitions(\n            &pub_params,\n            &pub_inputs,\n            &priv_inputs,\n            partitions,\n        )\n        .expect(\"proving failed\");\n        assert_eq!(proofs.len(), partitions);\n\n        let is_valid =\n            FallbackPoSt::<Tree>::verify_all_partitions(&pub_params, &pub_inputs, &proofs)\n                .expect(\"verification failed\");\n        assert!(is_valid);\n\n        // actual circuit test\n\n        for (j, proof) in proofs.iter().enumerate() {\n            // iterates over each partition\n            let circuit_sectors = proof\n                .sectors\n                .iter()\n                .enumerate()\n                .map(|(i, proof)| {\n                    // index into sectors by the correct offset\n                    let i = j * sector_count + i;\n\n                    if i < pub_sectors.len() {\n                        Sector::circuit(&pub_sectors[i], proof)\n                    } else {\n                        // duplicated last one\n                        let k = pub_sectors.len() - 1;\n                        Sector::circuit(&pub_sectors[k], proof)\n                    }\n                })\n                .collect::<Result<_>>()\n                .unwrap();\n\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let instance = FallbackPoStCircuit::<Tree> {\n                sectors: circuit_sectors,\n                prover_id: Some(prover_id.into()),\n            };\n\n            instance\n                .synthesize(&mut cs)\n                .expect(\"failed to synthesize circuit\");\n\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n            assert_eq!(\n                cs.num_inputs(),\n                expected_num_inputs,\n                \"wrong number of inputs\"\n            );\n            assert_eq!(\n                cs.num_constraints(),\n                expected_constraints,\n                \"wrong number of constraints\"\n            );\n            assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n\n            let generated_inputs = FallbackPoStCompound::<Tree>::generate_public_inputs(\n                &pub_inputs,\n                &pub_params,\n                Some(j),\n            )\n            .unwrap();\n            let expected_inputs = cs.get_inputs();\n\n            for ((input, label), generated_input) in\n                expected_inputs.iter().skip(1).zip(generated_inputs.iter())\n            {\n                assert_eq!(input, generated_input, \"{}\", label);\n            }\n\n            assert_eq!(\n                generated_inputs.len(),\n                expected_inputs.len() - 1,\n                \"inputs are not the same length\"\n            );\n\n            assert!(\n                cs.verify(&generated_inputs),\n                \"verification failed with TestContraintSystem and generated inputs\"\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/post/src/fallback/compound.rs",
    "content": "use std::marker::PhantomData;\n\nuse anyhow::{anyhow, ensure};\nuse bellperson::Circuit;\nuse paired::bls12_381::{Bls12, Fr};\n\nuse storage_proofs_core::{\n    compound_proof::{CircuitComponent, CompoundProof},\n    error::Result,\n    gadgets::por::PoRCompound,\n    merkle::MerkleTreeTrait,\n    parameter_cache::{CacheableParameters, ParameterSetMetadata},\n    por,\n    proof::ProofScheme,\n    util::NODE_SIZE,\n};\n\nuse super::circuit::Sector;\nuse crate::fallback::{self, FallbackPoSt, FallbackPoStCircuit};\n\npub struct FallbackPoStCompound<Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    _t: PhantomData<Tree>,\n}\n\nimpl<C: Circuit<Bls12>, P: ParameterSetMetadata, Tree: MerkleTreeTrait> CacheableParameters<C, P>\n    for FallbackPoStCompound<Tree>\n{\n    fn cache_prefix() -> String {\n        format!(\"proof-of-spacetime-fallback-{}\", Tree::display())\n    }\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait>\n    CompoundProof<'a, FallbackPoSt<'a, Tree>, FallbackPoStCircuit<Tree>>\n    for FallbackPoStCompound<Tree>\n{\n    fn generate_public_inputs(\n        pub_inputs: &<FallbackPoSt<'a, Tree> as ProofScheme<'a>>::PublicInputs,\n        pub_params: &<FallbackPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n        partition_k: Option<usize>,\n    ) -> Result<Vec<Fr>> {\n        let mut inputs = Vec::new();\n\n        let por_pub_params = por::PublicParams {\n            leaves: (pub_params.sector_size as usize / NODE_SIZE),\n            private: true,\n        };\n\n        let num_sectors_per_chunk = pub_params.sector_count;\n\n        let partition_index = partition_k.unwrap_or(0);\n\n        let sectors = pub_inputs\n            .sectors\n            .chunks(num_sectors_per_chunk)\n            .nth(partition_index)\n            .ok_or_else(|| anyhow!(\"invalid number of sectors/partition index\"))?;\n\n        for (i, sector) in sectors.iter().enumerate() {\n            // 1. Inputs for verifying comm_r = H(comm_c || comm_r_last)\n            inputs.push(sector.comm_r.into());\n\n            // 2. Inputs for verifying inclusion paths\n            for n in 0..pub_params.challenge_count {\n                let challenge_index = ((partition_index * pub_params.sector_count + i)\n                    * pub_params.challenge_count\n                    + n) as u64;\n                let challenged_leaf_start = fallback::generate_leaf_challenge(\n                    &pub_params,\n                    pub_inputs.randomness,\n                    sector.id.into(),\n                    challenge_index,\n                )?;\n\n                let por_pub_inputs = por::PublicInputs {\n                    commitment: None,\n                    challenge: challenged_leaf_start as usize,\n                };\n                let por_inputs = PoRCompound::<Tree>::generate_public_inputs(\n                    &por_pub_inputs,\n                    &por_pub_params,\n                    partition_k,\n                )?;\n\n                inputs.extend(por_inputs);\n            }\n        }\n        let num_inputs_per_sector = inputs.len() / sectors.len();\n\n        // duplicate last one if too little sectors available\n        while inputs.len() / num_inputs_per_sector < num_sectors_per_chunk {\n            let s = inputs[inputs.len() - num_inputs_per_sector..].to_vec();\n            inputs.extend_from_slice(&s);\n        }\n        assert_eq!(inputs.len(), num_inputs_per_sector * num_sectors_per_chunk);\n\n        Ok(inputs)\n    }\n\n    fn circuit(\n        pub_in: &<FallbackPoSt<'a, Tree> as ProofScheme<'a>>::PublicInputs,\n        _priv_in: <FallbackPoStCircuit<Tree> as CircuitComponent>::ComponentPrivateInputs,\n        vanilla_proof: &<FallbackPoSt<'a, Tree> as ProofScheme<'a>>::Proof,\n        pub_params: &<FallbackPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n        partition_k: Option<usize>,\n    ) -> Result<FallbackPoStCircuit<Tree>> {\n        let num_sectors_per_chunk = pub_params.sector_count;\n        ensure!(\n            pub_params.sector_count == vanilla_proof.sectors.len(),\n            \"vanilla proofs must equal sector_count: {} != {}\",\n            num_sectors_per_chunk,\n            vanilla_proof.sectors.len(),\n        );\n\n        let partition_index = partition_k.unwrap_or(0);\n        let sectors = pub_in\n            .sectors\n            .chunks(num_sectors_per_chunk)\n            .nth(partition_index)\n            .ok_or_else(|| anyhow!(\"invalid number of sectors/partition index\"))?;\n\n        let mut res_sectors = Vec::with_capacity(vanilla_proof.sectors.len());\n\n        for (i, vanilla_proof) in vanilla_proof.sectors.iter().enumerate() {\n            let pub_sector = if i < sectors.len() {\n                &sectors[i]\n            } else {\n                // Repeat the last sector, iff there are too little inputs to fill the circuit.\n                &sectors[sectors.len() - 1]\n            };\n\n            res_sectors.push(Sector::circuit(pub_sector, vanilla_proof)?);\n        }\n\n        assert_eq!(res_sectors.len(), num_sectors_per_chunk);\n\n        Ok(FallbackPoStCircuit {\n            prover_id: Some(pub_in.prover_id.into()),\n            sectors: res_sectors,\n        })\n    }\n\n    fn blank_circuit(\n        pub_params: &<FallbackPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n    ) -> FallbackPoStCircuit<Tree> {\n        let sectors = (0..pub_params.sector_count)\n            .map(|_| Sector::blank_circuit(pub_params))\n            .collect();\n\n        FallbackPoStCircuit {\n            prover_id: None,\n            sectors,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use generic_array::typenum::{U0, U2, U4, U8};\n    use pretty_assertions::assert_eq;\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::{\n        compound_proof,\n        gadgets::{MetricCS, TestConstraintSystem},\n        hasher::{Domain, HashFunction, Hasher, PedersenHasher, PoseidonHasher},\n        merkle::{generate_tree, get_base_tree_count, LCTree, MerkleTreeTrait},\n    };\n\n    use crate::fallback::{\n        self, ChallengeRequirements, PrivateInputs, PrivateSector, PublicInputs, PublicSector,\n    };\n\n    #[ignore]\n    #[test]\n    fn fallback_post_pedersen_single_partition_matching_base_8() {\n        fallback_post::<LCTree<PedersenHasher, U8, U0, U0>>(3, 3, 1);\n    }\n\n    #[ignore]\n    #[test]\n    fn fallback_post_poseidon_single_partition_matching_base_8() {\n        fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(3, 3, 1);\n    }\n\n    #[ignore]\n    #[test]\n    fn fallback_post_poseidon_single_partition_matching_sub_8_4() {\n        fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(3, 3, 1);\n    }\n\n    #[ignore]\n    #[test]\n    fn fallback_post_poseidon_single_partition_matching_top_8_4_2() {\n        fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(3, 3, 1);\n    }\n\n    #[ignore]\n    #[test]\n    fn fallback_post_poseidon_single_partition_smaller_base_8() {\n        fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(2, 3, 1);\n    }\n\n    #[ignore]\n    #[test]\n    fn fallback_post_poseidon_two_partitions_matching_base_8() {\n        fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(4, 2, 2);\n    }\n\n    #[ignore]\n    #[test]\n    fn fallback_post_poseidon_two_partitions_smaller_base_8() {\n        fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 3, 2);\n    }\n\n    fn fallback_post<Tree: 'static + MerkleTreeTrait>(\n        total_sector_count: usize,\n        sector_count: usize,\n        partitions: usize,\n    ) where\n        Tree::Store: 'static,\n    {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 64 * get_base_tree_count::<Tree>();\n        let sector_size = (leaves * NODE_SIZE) as u64;\n        let randomness = <Tree::Hasher as Hasher>::Domain::random(rng);\n        let prover_id = <Tree::Hasher as Hasher>::Domain::random(rng);\n        let challenge_count = 2;\n\n        let setup_params = compound_proof::SetupParams {\n            vanilla_params: fallback::SetupParams {\n                sector_size: sector_size as u64,\n                challenge_count,\n                sector_count,\n            },\n            partitions: Some(partitions),\n            priority: false,\n        };\n\n        // Construct and store an MT using a named DiskStore.\n        let temp_dir = tempdir::TempDir::new(\"level_cache_tree_v1\").unwrap();\n        let temp_path = temp_dir.path();\n\n        let mut pub_sectors = Vec::new();\n        let mut priv_sectors = Vec::new();\n        let mut trees = Vec::new();\n\n        for _i in 0..total_sector_count {\n            let (_data, tree) =\n                generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n            trees.push(tree);\n        }\n        for (i, tree) in trees.iter().enumerate() {\n            let comm_c = <Tree::Hasher as Hasher>::Domain::random(rng);\n            let comm_r_last = tree.root();\n\n            priv_sectors.push(PrivateSector {\n                tree,\n                comm_c,\n                comm_r_last,\n            });\n\n            let comm_r = <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last);\n            pub_sectors.push(PublicSector {\n                id: (i as u64).into(),\n                comm_r,\n            });\n        }\n\n        let pub_params = FallbackPoStCompound::<Tree>::setup(&setup_params).expect(\"setup failed\");\n\n        let pub_inputs = PublicInputs {\n            randomness,\n            prover_id,\n            sectors: &pub_sectors,\n            k: None,\n        };\n\n        let priv_inputs = PrivateInputs::<Tree> {\n            sectors: &priv_sectors,\n        };\n\n        // Use this to debug differences between blank and regular circuit generation.\n        {\n            let circuits =\n                FallbackPoStCompound::circuit_for_test_all(&pub_params, &pub_inputs, &priv_inputs)\n                    .unwrap();\n            let blank_circuit =\n                FallbackPoStCompound::<Tree>::blank_circuit(&pub_params.vanilla_params);\n\n            let mut cs_blank = MetricCS::new();\n            blank_circuit\n                .synthesize(&mut cs_blank)\n                .expect(\"failed to synthesize\");\n\n            let a = cs_blank.pretty_print_list();\n\n            for (circuit1, _inputs) in circuits.into_iter() {\n                let mut cs1 = TestConstraintSystem::new();\n                circuit1.synthesize(&mut cs1).expect(\"failed to synthesize\");\n                let b = cs1.pretty_print_list();\n\n                for (i, (a, b)) in a.chunks(100).zip(b.chunks(100)).enumerate() {\n                    assert_eq!(a, b, \"failed at chunk {}\", i);\n                }\n            }\n        }\n\n        {\n            let circuits =\n                FallbackPoStCompound::circuit_for_test_all(&pub_params, &pub_inputs, &priv_inputs)\n                    .unwrap();\n\n            for (circuit, inputs) in circuits.into_iter() {\n                let mut cs = TestConstraintSystem::new();\n\n                circuit.synthesize(&mut cs).expect(\"failed to synthesize\");\n\n                if !cs.is_satisfied() {\n                    panic!(\n                        \"failed to satisfy: {:?}\",\n                        cs.which_is_unsatisfied().unwrap()\n                    );\n                }\n                assert!(\n                    cs.verify(&inputs),\n                    \"verification failed with TestContraintSystem and generated inputs\"\n                );\n            }\n        }\n\n        let blank_groth_params =\n            FallbackPoStCompound::<Tree>::groth_params(Some(rng), &pub_params.vanilla_params)\n                .expect(\"failed to generate groth params\");\n\n        let proof = FallbackPoStCompound::prove(\n            &pub_params,\n            &pub_inputs,\n            &priv_inputs,\n            &blank_groth_params,\n        )\n        .expect(\"failed while proving\");\n\n        let verified = FallbackPoStCompound::verify(\n            &pub_params,\n            &pub_inputs,\n            &proof,\n            &ChallengeRequirements {\n                minimum_challenge_count: total_sector_count * challenge_count,\n            },\n        )\n        .expect(\"failed while verifying\");\n\n        assert!(verified);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/post/src/fallback/mod.rs",
    "content": "mod circuit;\nmod compound;\nmod vanilla;\n\npub use circuit::*;\npub use compound::*;\npub use vanilla::*;\n"
  },
  {
    "path": "storage-proofs/post/src/fallback/vanilla.rs",
    "content": "use std::marker::PhantomData;\n\nuse anyhow::ensure;\nuse byteorder::{ByteOrder, LittleEndian};\nuse generic_array::typenum::Unsigned;\nuse log::trace;\nuse merkletree::store::StoreConfig;\nuse paired::bls12_381::Fr;\nuse rayon::prelude::*;\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\n\nuse storage_proofs_core::{\n    error::Result,\n    hasher::{Domain, HashFunction, Hasher},\n    merkle::{MerkleProof, MerkleProofTrait, MerkleTreeTrait, MerkleTreeWrapper},\n    parameter_cache::ParameterSetMetadata,\n    proof::ProofScheme,\n    sector::*,\n    util::NODE_SIZE,\n};\n\n#[derive(Debug, Clone)]\npub struct SetupParams {\n    /// Size of the sector in bytes.\n    pub sector_size: u64,\n    /// Number of challenges per sector.\n    pub challenge_count: usize,\n    /// Number of challenged sectors.\n    pub sector_count: usize,\n}\n\n#[derive(Debug, Clone)]\npub struct PublicParams {\n    /// Size of the sector in bytes.\n    pub sector_size: u64,\n    /// Number of challenges per sector.\n    pub challenge_count: usize,\n    /// Number of challenged sectors.\n    pub sector_count: usize,\n}\n\n#[derive(Debug, Default)]\npub struct ChallengeRequirements {\n    /// The sum of challenges across all challenged sectors. (even across partitions)\n    pub minimum_challenge_count: usize,\n}\n\nimpl ParameterSetMetadata for PublicParams {\n    fn identifier(&self) -> String {\n        format!(\n            \"FallbackPoSt::PublicParams{{sector_size: {}, challenge_count: {}, sector_count: {}}}\",\n            self.sector_size(),\n            self.challenge_count,\n            self.sector_count,\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        self.sector_size\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct PublicInputs<'a, T: Domain> {\n    pub randomness: T,\n    pub prover_id: T,\n    pub sectors: &'a [PublicSector<T>],\n    /// Partition index\n    pub k: Option<usize>,\n}\n\n#[derive(Debug, Clone)]\npub struct PublicSector<T: Domain> {\n    pub id: SectorId,\n    pub comm_r: T,\n}\n\n#[derive(Debug)]\npub struct PrivateSector<'a, Tree: MerkleTreeTrait> {\n    pub tree: &'a MerkleTreeWrapper<\n        Tree::Hasher,\n        Tree::Store,\n        Tree::Arity,\n        Tree::SubTreeArity,\n        Tree::TopTreeArity,\n    >,\n    pub comm_c: <Tree::Hasher as Hasher>::Domain,\n    pub comm_r_last: <Tree::Hasher as Hasher>::Domain,\n}\n\n#[derive(Debug)]\npub struct PrivateInputs<'a, Tree: MerkleTreeTrait> {\n    pub sectors: &'a [PrivateSector<'a, Tree>],\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Proof<P: MerkleProofTrait> {\n    #[serde(bound(\n        serialize = \"SectorProof<P>: Serialize\",\n        deserialize = \"SectorProof<P>: Deserialize<'de>\"\n    ))]\n    pub sectors: Vec<SectorProof<P>>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct SectorProof<Proof: MerkleProofTrait> {\n    #[serde(bound(\n        serialize = \"MerkleProof<Proof::Hasher, Proof::Arity, Proof::SubTreeArity, Proof::TopTreeArity>: Serialize\",\n        deserialize = \"MerkleProof<Proof::Hasher, Proof::Arity, Proof::SubTreeArity, Proof::TopTreeArity>: serde::de::DeserializeOwned\"\n    ))]\n    inclusion_proofs:\n        Vec<MerkleProof<Proof::Hasher, Proof::Arity, Proof::SubTreeArity, Proof::TopTreeArity>>,\n    pub comm_c: <Proof::Hasher as Hasher>::Domain,\n    pub comm_r_last: <Proof::Hasher as Hasher>::Domain,\n}\n\nimpl<P: MerkleProofTrait> SectorProof<P> {\n    pub fn leafs(&self) -> Vec<<P::Hasher as Hasher>::Domain> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProofTrait::leaf)\n            .collect()\n    }\n\n    pub fn comm_r_last(&self) -> <P::Hasher as Hasher>::Domain {\n        self.inclusion_proofs[0].root()\n    }\n\n    pub fn commitments(&self) -> Vec<<P::Hasher as Hasher>::Domain> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProofTrait::root)\n            .collect()\n    }\n\n    #[allow(clippy::type_complexity)]\n    pub fn paths(&self) -> Vec<Vec<(Vec<<P::Hasher as Hasher>::Domain>, usize)>> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProofTrait::path)\n            .collect()\n    }\n\n    pub fn as_options(&self) -> Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProofTrait::as_options)\n            .collect()\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct FallbackPoSt<'a, Tree>\nwhere\n    Tree: 'a + MerkleTreeTrait,\n{\n    _t: PhantomData<&'a Tree>,\n}\n\npub fn generate_sector_challenges<T: Domain>(\n    randomness: T,\n    challenge_count: usize,\n    sector_set_len: u64,\n    prover_id: T,\n) -> Result<Vec<u64>> {\n    (0..challenge_count)\n        .map(|n| generate_sector_challenge(randomness, n, sector_set_len, prover_id))\n        .collect()\n}\n\n/// Generate a single sector challenge.\npub fn generate_sector_challenge<T: Domain>(\n    randomness: T,\n    n: usize,\n    sector_set_len: u64,\n    prover_id: T,\n) -> Result<u64> {\n    let mut hasher = Sha256::new();\n    hasher.input(AsRef::<[u8]>::as_ref(&prover_id));\n    hasher.input(AsRef::<[u8]>::as_ref(&randomness));\n    hasher.input(&n.to_le_bytes()[..]);\n\n    let hash = hasher.result();\n\n    let sector_challenge = LittleEndian::read_u64(&hash.as_ref()[..8]);\n    let sector_index = sector_challenge % sector_set_len;\n\n    Ok(sector_index)\n}\n\n/// Generate all challenged leaf ranges for a single sector, such that the range fits into the sector.\npub fn generate_leaf_challenges<T: Domain>(\n    pub_params: &PublicParams,\n    randomness: T,\n    sector_id: u64,\n    challenge_count: usize,\n) -> Result<Vec<u64>> {\n    let mut challenges = Vec::with_capacity(challenge_count);\n\n    for leaf_challenge_index in 0..challenge_count {\n        let challenge = generate_leaf_challenge(\n            pub_params,\n            randomness,\n            sector_id,\n            leaf_challenge_index as u64,\n        )?;\n        challenges.push(challenge)\n    }\n\n    Ok(challenges)\n}\n\n/// Generates challenge, such that the range fits into the sector.\npub fn generate_leaf_challenge<T: Domain>(\n    pub_params: &PublicParams,\n    randomness: T,\n    sector_id: u64,\n    leaf_challenge_index: u64,\n) -> Result<u64> {\n    let mut hasher = Sha256::new();\n    hasher.input(AsRef::<[u8]>::as_ref(&randomness));\n    hasher.input(&sector_id.to_le_bytes()[..]);\n    hasher.input(&leaf_challenge_index.to_le_bytes()[..]);\n    let hash = hasher.result();\n\n    let leaf_challenge = LittleEndian::read_u64(&hash.as_ref()[..8]);\n\n    let challenged_range_index = leaf_challenge % (pub_params.sector_size / NODE_SIZE as u64);\n\n    Ok(challenged_range_index)\n}\n\nimpl<'a, Tree: 'a + MerkleTreeTrait> ProofScheme<'a> for FallbackPoSt<'a, Tree> {\n    type PublicParams = PublicParams;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<'a, <Tree::Hasher as Hasher>::Domain>;\n    type PrivateInputs = PrivateInputs<'a, Tree>;\n    type Proof = Proof<Tree::Proof>;\n    type Requirements = ChallengeRequirements;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        Ok(PublicParams {\n            sector_size: sp.sector_size,\n            challenge_count: sp.challenge_count,\n            sector_count: sp.sector_count,\n        })\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        let proofs = Self::prove_all_partitions(pub_params, pub_inputs, priv_inputs, 1)?;\n        let k = match pub_inputs.k {\n            None => 0,\n            Some(k) => k,\n        };\n        // Because partition proofs require a common setup, the general ProofScheme implementation,\n        // which makes use of `ProofScheme::prove` cannot be used here. Instead, we need to prove all\n        // partitions in one pass, as implemented by `prove_all_partitions` below.\n        assert!(\n            k < 1,\n            \"It is a programmer error to call StackedDrg::prove with more than one partition.\"\n        );\n\n        Ok(proofs[k].to_owned())\n    }\n\n    fn prove_all_partitions<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n        partition_count: usize,\n    ) -> Result<Vec<Self::Proof>> {\n        ensure!(\n            priv_inputs.sectors.len() == pub_inputs.sectors.len(),\n            \"inconsistent number of private and public sectors {} != {}\",\n            priv_inputs.sectors.len(),\n            pub_inputs.sectors.len(),\n        );\n\n        let num_sectors_per_chunk = pub_params.sector_count;\n        let num_sectors = pub_inputs.sectors.len();\n\n        ensure!(\n            num_sectors <= partition_count * num_sectors_per_chunk,\n            \"cannot prove the provided number of sectors: {} > {} * {}\",\n            num_sectors,\n            partition_count,\n            num_sectors_per_chunk,\n        );\n\n        let mut partition_proofs = Vec::new();\n\n        for (j, (pub_sectors_chunk, priv_sectors_chunk)) in pub_inputs\n            .sectors\n            .chunks(num_sectors_per_chunk)\n            .zip(priv_inputs.sectors.chunks(num_sectors_per_chunk))\n            .enumerate()\n        {\n            trace!(\"proving partition {}\", j);\n\n            let mut proofs = Vec::with_capacity(num_sectors_per_chunk);\n\n            for (i, (pub_sector, priv_sector)) in pub_sectors_chunk\n                .iter()\n                .zip(priv_sectors_chunk.iter())\n                .enumerate()\n            {\n                let tree = priv_sector.tree;\n                let sector_id = pub_sector.id;\n                let tree_leafs = tree.leafs();\n                let levels = StoreConfig::default_cached_above_base_layer(\n                    tree_leafs,\n                    Tree::Arity::to_usize(),\n                );\n\n                trace!(\n                    \"Generating proof for tree leafs {}, and cached_layers {}\",\n                    tree_leafs,\n                    levels,\n                );\n\n                let inclusion_proofs = (0..pub_params.challenge_count)\n                    .into_par_iter()\n                    .map(|n| {\n                        let challenge_index = ((j * num_sectors_per_chunk + i)\n                            * pub_params.challenge_count\n                            + n) as u64;\n                        let challenged_leaf_start = generate_leaf_challenge(\n                            pub_params,\n                            pub_inputs.randomness,\n                            sector_id.into(),\n                            challenge_index,\n                        )?;\n\n                        tree.gen_cached_proof(challenged_leaf_start as usize, levels)\n                    })\n                    .collect::<Result<Vec<_>>>()?;\n\n                proofs.push(SectorProof {\n                    inclusion_proofs,\n                    comm_c: priv_sector.comm_c,\n                    comm_r_last: priv_sector.comm_r_last,\n                });\n            }\n\n            // If there were less than the required number of sectors provided, we duplicate the last one\n            // to pad the proof out, such that it works in the circuit part.\n            while proofs.len() < num_sectors_per_chunk {\n                proofs.push(proofs[proofs.len() - 1].clone());\n            }\n\n            partition_proofs.push(Proof { sectors: proofs });\n        }\n\n        Ok(partition_proofs)\n    }\n\n    fn verify_all_partitions(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        partition_proofs: &[Self::Proof],\n    ) -> Result<bool> {\n        let challenge_count = pub_params.challenge_count;\n        let num_sectors_per_chunk = pub_params.sector_count;\n        let num_sectors = pub_inputs.sectors.len();\n\n        ensure!(\n            num_sectors <= num_sectors_per_chunk * partition_proofs.len(),\n            \"inconsistent number of sectors: {} > {} * {}\",\n            num_sectors,\n            num_sectors_per_chunk,\n            partition_proofs.len(),\n        );\n\n        for (j, (proof, pub_sectors_chunk)) in partition_proofs\n            .iter()\n            .zip(pub_inputs.sectors.chunks(num_sectors_per_chunk))\n            .enumerate()\n        {\n            ensure!(\n                pub_sectors_chunk.len() <= num_sectors_per_chunk,\n                \"inconsistent number of public sectors: {} > {}\",\n                pub_sectors_chunk.len(),\n                num_sectors_per_chunk,\n            );\n            ensure!(\n                proof.sectors.len() == num_sectors_per_chunk,\n                \"invalid number of sectors in the partition proof {}: {} != {}\",\n                j,\n                proof.sectors.len(),\n                num_sectors_per_chunk,\n            );\n            for (i, (pub_sector, sector_proof)) in pub_sectors_chunk\n                .iter()\n                .zip(proof.sectors.iter())\n                .enumerate()\n            {\n                let sector_id = pub_sector.id;\n                let comm_r = &pub_sector.comm_r;\n                let comm_c = sector_proof.comm_c;\n                let inclusion_proofs = &sector_proof.inclusion_proofs;\n\n                // Verify that H(Comm_c || Comm_r_last) == Comm_R\n\n                // comm_r_last is the root of the proof\n                let comm_r_last = inclusion_proofs[0].root();\n\n                if AsRef::<[u8]>::as_ref(&<Tree::Hasher as Hasher>::Function::hash2(\n                    &comm_c,\n                    &comm_r_last,\n                )) != AsRef::<[u8]>::as_ref(comm_r)\n                {\n                    return Ok(false);\n                }\n\n                ensure!(\n                    challenge_count == inclusion_proofs.len(),\n                    \"unexpected umber of inclusion proofs: {} != {}\",\n                    challenge_count,\n                    inclusion_proofs.len()\n                );\n\n                for (n, inclusion_proof) in inclusion_proofs.iter().enumerate() {\n                    let challenge_index =\n                        ((j * num_sectors_per_chunk + i) * pub_params.challenge_count + n) as u64;\n                    let challenged_leaf_start = generate_leaf_challenge(\n                        pub_params,\n                        pub_inputs.randomness,\n                        sector_id.into(),\n                        challenge_index,\n                    )?;\n\n                    // validate all comm_r_lasts match\n                    if inclusion_proof.root() != comm_r_last {\n                        return Ok(false);\n                    }\n\n                    // validate the path length\n                    let expected_path_length =\n                        inclusion_proof.expected_len(pub_params.sector_size as usize / NODE_SIZE);\n\n                    if expected_path_length != inclusion_proof.path().len() {\n                        return Ok(false);\n                    }\n\n                    if !inclusion_proof.validate(challenged_leaf_start as usize) {\n                        return Ok(false);\n                    }\n                }\n            }\n        }\n\n        Ok(true)\n    }\n\n    fn satisfies_requirements(\n        public_params: &Self::PublicParams,\n        requirements: &Self::Requirements,\n        partitions: usize,\n    ) -> bool {\n        partitions * public_params.sector_count * public_params.challenge_count\n            >= requirements.minimum_challenge_count\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use generic_array::typenum::{U0, U2, U4, U8};\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n\n    use storage_proofs_core::{\n        hasher::{PedersenHasher, PoseidonHasher},\n        merkle::{generate_tree, get_base_tree_count, LCTree, MerkleTreeTrait},\n    };\n\n    fn test_fallback_post<Tree: MerkleTreeTrait>(\n        total_sector_count: usize,\n        sector_count: usize,\n        partitions: usize,\n    ) where\n        Tree::Store: 'static,\n    {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 64 * get_base_tree_count::<Tree>();\n        let sector_size = leaves * NODE_SIZE;\n\n        let pub_params = PublicParams {\n            sector_size: sector_size as u64,\n            challenge_count: 10,\n            sector_count,\n        };\n\n        let randomness = <Tree::Hasher as Hasher>::Domain::random(rng);\n        let prover_id = <Tree::Hasher as Hasher>::Domain::random(rng);\n\n        // Construct and store an MT using a named DiskStore.\n        let temp_dir = tempdir::TempDir::new(\"level_cache_tree\").unwrap();\n        let temp_path = temp_dir.path();\n\n        let mut pub_sectors = Vec::new();\n        let mut priv_sectors = Vec::new();\n        let mut trees = Vec::new();\n\n        for _i in 0..total_sector_count {\n            let (_data, tree) =\n                generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n            trees.push(tree);\n        }\n        for (i, tree) in trees.iter().enumerate() {\n            let comm_c = <Tree::Hasher as Hasher>::Domain::random(rng);\n            let comm_r_last = tree.root();\n\n            priv_sectors.push(PrivateSector {\n                tree,\n                comm_c,\n                comm_r_last,\n            });\n\n            let comm_r = <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last);\n            pub_sectors.push(PublicSector {\n                id: (i as u64).into(),\n                comm_r,\n            });\n        }\n\n        let pub_inputs = PublicInputs {\n            randomness,\n            prover_id,\n            sectors: &pub_sectors,\n            k: None,\n        };\n\n        let priv_inputs = PrivateInputs::<Tree> {\n            sectors: &priv_sectors[..],\n        };\n\n        let proof = FallbackPoSt::<Tree>::prove_all_partitions(\n            &pub_params,\n            &pub_inputs,\n            &priv_inputs,\n            partitions,\n        )\n        .expect(\"proving failed\");\n\n        let is_valid =\n            FallbackPoSt::<Tree>::verify_all_partitions(&pub_params, &pub_inputs, &proof)\n                .expect(\"verification failed\");\n\n        assert!(is_valid);\n    }\n\n    #[test]\n    fn fallback_post_pedersen_single_partition_matching_base_8() {\n        test_fallback_post::<LCTree<PedersenHasher, U8, U0, U0>>(5, 5, 1);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_single_partition_matching_base_8() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 5, 1);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_single_partition_smaller_base_8() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(3, 5, 1);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_two_partitions_matching_base_8() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(4, 2, 2);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_two_partitions_smaller_base_8() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 3, 2);\n    }\n\n    #[test]\n    fn fallback_post_pedersen_single_partition_matching_sub_8_4() {\n        test_fallback_post::<LCTree<PedersenHasher, U8, U4, U0>>(5, 5, 1);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_single_partition_matching_sub_8_4() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(5, 5, 1);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_single_partition_smaller_sub_8_4() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(3, 5, 1);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_two_partitions_matching_sub_8_4() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(4, 2, 2);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_two_partitions_matching_sub_8_8() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U8, U0>>(4, 2, 2);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_two_partitions_smaller_sub_8_4() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(5, 3, 2);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_two_partitions_smaller_sub_8_8() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U8, U0>>(5, 3, 2);\n    }\n\n    #[test]\n    fn fallback_post_pedersen_single_partition_matching_top_8_4_2() {\n        test_fallback_post::<LCTree<PedersenHasher, U8, U4, U2>>(5, 5, 1);\n    }\n    #[test]\n    fn fallback_post_pedersen_single_partition_matching_top_8_8_2() {\n        test_fallback_post::<LCTree<PedersenHasher, U8, U8, U2>>(5, 5, 1);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_single_partition_matching_top_8_4_2() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(5, 5, 1);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_single_partition_matching_top_8_8_2() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U8, U2>>(5, 5, 1);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_single_partition_smaller_top_8_4_2() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(3, 5, 1);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_two_partitions_matching_top_8_4_2() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(4, 2, 2);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_two_partitions_smaller_top_8_4_2() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(5, 3, 2);\n    }\n\n    #[test]\n    fn fallback_post_poseidon_two_partitions_smaller_top_8_8_2() {\n        test_fallback_post::<LCTree<PoseidonHasher, U8, U8, U2>>(5, 3, 2);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/post/src/lib.rs",
    "content": "pub mod election;\npub mod fallback;\npub mod rational;\n\n#[cfg(test)]\npub(crate) const TEST_SEED: [u8; 16] = [\n    0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, 0xe5,\n];\n"
  },
  {
    "path": "storage-proofs/post/src/rational/circuit.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellperson::gadgets::num;\nuse bellperson::{Circuit, ConstraintSystem, SynthesisError};\nuse paired::bls12_381::{Bls12, Fr};\n\nuse storage_proofs_core::{\n    compound_proof::CircuitComponent,\n    error::Result,\n    gadgets::constraint,\n    gadgets::por::PoRCircuit,\n    gadgets::variables::Root,\n    hasher::{HashFunction, Hasher},\n    merkle::MerkleTreeTrait,\n};\n\n/// This is the `RationalPoSt` circuit.\npub struct RationalPoStCircuit<Tree: MerkleTreeTrait> {\n    /// Paramters for the engine.\n    pub comm_rs: Vec<Option<Fr>>,\n    pub comm_cs: Vec<Option<Fr>>,\n    pub comm_r_lasts: Vec<Option<Fr>>,\n    pub leafs: Vec<Option<Fr>>,\n    #[allow(clippy::type_complexity)]\n    pub paths: Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>,\n    pub _t: PhantomData<Tree>,\n}\n\n#[derive(Clone, Default)]\npub struct ComponentPrivateInputs {}\n\nimpl<'a, Tree: MerkleTreeTrait> CircuitComponent for RationalPoStCircuit<Tree> {\n    type ComponentPrivateInputs = ComponentPrivateInputs;\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait> Circuit<Bls12> for RationalPoStCircuit<Tree> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let comm_rs = self.comm_rs;\n        let comm_cs = self.comm_cs;\n        let comm_r_lasts = self.comm_r_lasts;\n        let leafs = self.leafs;\n        let paths = self.paths;\n\n        assert_eq!(paths.len(), leafs.len());\n        assert_eq!(paths.len(), comm_rs.len());\n        assert_eq!(paths.len(), comm_cs.len());\n        assert_eq!(paths.len(), comm_r_lasts.len());\n\n        for (((i, comm_r_last), comm_c), comm_r) in comm_r_lasts\n            .iter()\n            .enumerate()\n            .zip(comm_cs.iter())\n            .zip(comm_rs.iter())\n        {\n            let comm_r_last_num =\n                num::AllocatedNum::alloc(cs.namespace(|| format!(\"comm_r_last_{}\", i)), || {\n                    comm_r_last\n                        .map(Into::into)\n                        .ok_or_else(|| SynthesisError::AssignmentMissing)\n                })?;\n\n            let comm_c_num =\n                num::AllocatedNum::alloc(cs.namespace(|| format!(\"comm_c_{}\", i)), || {\n                    comm_c\n                        .map(Into::into)\n                        .ok_or_else(|| SynthesisError::AssignmentMissing)\n                })?;\n\n            let comm_r_num =\n                num::AllocatedNum::alloc(cs.namespace(|| format!(\"comm_r_{}\", i)), || {\n                    comm_r\n                        .map(Into::into)\n                        .ok_or_else(|| SynthesisError::AssignmentMissing)\n                })?;\n\n            comm_r_num.inputize(cs.namespace(|| format!(\"comm_r_{}_input\", i)))?;\n\n            // Verify H(Comm_C || comm_r_last) == comm_r\n            {\n                let hash_num = <Tree::Hasher as Hasher>::Function::hash2_circuit(\n                    cs.namespace(|| format!(\"H_comm_c_comm_r_last_{}\", i)),\n                    &comm_c_num,\n                    &comm_r_last_num,\n                )?;\n\n                // Check actual equality\n                constraint::equal(\n                    cs,\n                    || format!(\"enforce_comm_c_comm_r_last_hash_comm_r_{}\", i),\n                    &comm_r_num,\n                    &hash_num,\n                );\n            }\n\n            PoRCircuit::<Tree>::synthesize(\n                cs.namespace(|| format!(\"challenge_inclusion{}\", i)),\n                Root::Val(leafs[i]),\n                paths[i].clone().into(),\n                Root::from_allocated::<CS>(comm_r_last_num),\n                true,\n            )?;\n        }\n\n        Ok(())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use std::collections::BTreeMap;\n\n    use ff::Field;\n    use paired::bls12_381::{Bls12, Fr};\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    use storage_proofs_core::{\n        compound_proof::CompoundProof,\n        gadgets::TestConstraintSystem,\n        hasher::{Domain, HashFunction, Hasher, PedersenHasher, PoseidonHasher},\n        merkle::{generate_tree, get_base_tree_count, BinaryMerkleTree},\n        proof::ProofScheme,\n        sector::OrderedSectorSet,\n        util::NODE_SIZE,\n    };\n\n    use crate::rational::{self, derive_challenges, RationalPoSt, RationalPoStCompound};\n\n    #[test]\n    fn test_rational_post_circuit_pedersen() {\n        test_rational_post_circuit::<BinaryMerkleTree<PedersenHasher>>(16_490);\n    }\n\n    #[test]\n    fn test_rational_post_circuit_poseidon() {\n        test_rational_post_circuit::<BinaryMerkleTree<PoseidonHasher>>(3_770);\n    }\n\n    fn test_rational_post_circuit<Tree: 'static + MerkleTreeTrait>(expected_constraints: usize) {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 32 * get_base_tree_count::<Tree>();\n        let sector_size = (leaves * NODE_SIZE) as u64;\n        let challenges_count = 2;\n\n        let pub_params = rational::PublicParams {\n            sector_size,\n            challenges_count,\n        };\n\n        // Construct and store an MT using a named DiskStore.\n        let temp_dir = tempdir::TempDir::new(\"tree\").unwrap();\n        let temp_path = temp_dir.path();\n\n        let (_data1, tree1) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n        let (_data2, tree2) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n\n        let faults = OrderedSectorSet::new();\n        let mut sectors = OrderedSectorSet::new();\n        sectors.insert(0.into());\n        sectors.insert(1.into());\n\n        let seed = (0..leaves).map(|_| rng.gen()).collect::<Vec<u8>>();\n        let challenges =\n            derive_challenges(challenges_count, sector_size, &sectors, &seed, &faults).unwrap();\n        let comm_r_lasts_raw = vec![tree1.root(), tree2.root()];\n        let comm_r_lasts: Vec<_> = challenges\n            .iter()\n            .map(|c| comm_r_lasts_raw[u64::from(c.sector) as usize])\n            .collect();\n\n        let comm_cs: Vec<<Tree::Hasher as Hasher>::Domain> = challenges\n            .iter()\n            .map(|_c| <Tree::Hasher as Hasher>::Domain::random(rng))\n            .collect();\n\n        let comm_rs: Vec<_> = comm_cs\n            .iter()\n            .zip(comm_r_lasts.iter())\n            .map(|(comm_c, comm_r_last)| {\n                <Tree::Hasher as Hasher>::Function::hash2(comm_c, comm_r_last)\n            })\n            .collect();\n\n        let pub_inputs = rational::PublicInputs {\n            challenges: &challenges,\n            faults: &faults,\n            comm_rs: &comm_rs,\n        };\n\n        let mut trees = BTreeMap::new();\n        trees.insert(0.into(), &tree1);\n        trees.insert(1.into(), &tree2);\n\n        let priv_inputs = rational::PrivateInputs::<Tree> {\n            trees: &trees,\n            comm_cs: &comm_cs,\n            comm_r_lasts: &comm_r_lasts,\n        };\n\n        let proof = RationalPoSt::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs)\n            .expect(\"proving failed\");\n\n        let is_valid = RationalPoSt::<Tree>::verify(&pub_params, &pub_inputs, &proof)\n            .expect(\"verification failed\");\n        assert!(is_valid);\n\n        // actual circuit test\n\n        let paths: Vec<_> = proof\n            .paths()\n            .iter()\n            .map(|p| {\n                p.iter()\n                    .map(|v| {\n                        (\n                            v.0.iter().copied().map(Into::into).map(Some).collect(),\n                            Some(v.1),\n                        )\n                    })\n                    .collect::<Vec<_>>()\n            })\n            .collect();\n        let leafs: Vec<_> = proof.leafs().iter().map(|l| Some((*l).into())).collect();\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n\n        let instance = RationalPoStCircuit::<Tree> {\n            leafs,\n            paths,\n            comm_rs: comm_rs.iter().copied().map(|c| Some(c.into())).collect(),\n            comm_cs: comm_cs.into_iter().map(|c| Some(c.into())).collect(),\n            comm_r_lasts: comm_r_lasts.into_iter().map(|c| Some(c.into())).collect(),\n            _t: PhantomData,\n        };\n\n        instance\n            .synthesize(&mut cs)\n            .expect(\"failed to synthesize circuit\");\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n        assert_eq!(cs.num_inputs(), 5, \"wrong number of inputs\");\n        assert_eq!(\n            cs.num_constraints(),\n            expected_constraints,\n            \"wrong number of constraints\"\n        );\n        assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n\n        let generated_inputs =\n            RationalPoStCompound::<Tree>::generate_public_inputs(&pub_inputs, &pub_params, None)\n                .unwrap();\n        let expected_inputs = cs.get_inputs();\n\n        for ((input, label), generated_input) in\n            expected_inputs.iter().skip(1).zip(generated_inputs.iter())\n        {\n            assert_eq!(input, generated_input, \"{}\", label);\n        }\n\n        assert_eq!(\n            generated_inputs.len(),\n            expected_inputs.len() - 1,\n            \"inputs are not the same length\"\n        );\n    }\n}\n"
  },
  {
    "path": "storage-proofs/post/src/rational/compound.rs",
    "content": "use std::marker::PhantomData;\n\nuse anyhow::ensure;\nuse bellperson::{Circuit, ConstraintSystem, SynthesisError};\nuse generic_array::typenum;\nuse paired::bls12_381::{Bls12, Fr};\n\nuse storage_proofs_core::{\n    compound_proof::{CircuitComponent, CompoundProof},\n    drgraph,\n    error::Result,\n    gadgets::por::PoRCompound,\n    merkle::MerkleTreeTrait,\n    parameter_cache::{CacheableParameters, ParameterSetMetadata},\n    por,\n    proof::ProofScheme,\n    util::NODE_SIZE,\n};\n\nuse super::{RationalPoSt, RationalPoStCircuit};\n\npub struct RationalPoStCompound<Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    _t: PhantomData<Tree>,\n}\n\nimpl<C: Circuit<Bls12>, P: ParameterSetMetadata, Tree: MerkleTreeTrait> CacheableParameters<C, P>\n    for RationalPoStCompound<Tree>\n{\n    fn cache_prefix() -> String {\n        format!(\"proof-of-spacetime-rational-{}\", Tree::display())\n    }\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait>\n    CompoundProof<'a, RationalPoSt<'a, Tree>, RationalPoStCircuit<Tree>>\n    for RationalPoStCompound<Tree>\nwhere\n    Tree: 'static + MerkleTreeTrait,\n{\n    fn generate_public_inputs(\n        pub_in: &<RationalPoSt<'a, Tree> as ProofScheme<'a>>::PublicInputs,\n        pub_params: &<RationalPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Result<Vec<Fr>> {\n        let mut inputs = Vec::new();\n\n        let por_pub_params = por::PublicParams {\n            leaves: (pub_params.sector_size as usize / NODE_SIZE),\n            private: true,\n        };\n\n        ensure!(\n            pub_in.challenges.len() == pub_in.comm_rs.len(),\n            \"Missmatch in challenges and comm_rs\"\n        );\n\n        for (challenge, comm_r) in pub_in.challenges.iter().zip(pub_in.comm_rs.iter()) {\n            inputs.push((*comm_r).into());\n\n            let por_pub_inputs = por::PublicInputs {\n                commitment: None,\n                challenge: challenge.leaf as usize,\n            };\n            let por_inputs = PoRCompound::<Tree>::generate_public_inputs(\n                &por_pub_inputs,\n                &por_pub_params,\n                None,\n            )?;\n\n            inputs.extend(por_inputs);\n        }\n\n        Ok(inputs)\n    }\n\n    fn circuit(\n        pub_in: &<RationalPoSt<'a, Tree> as ProofScheme<'a>>::PublicInputs,\n        _priv_in: <RationalPoStCircuit<Tree> as CircuitComponent>::ComponentPrivateInputs,\n        vanilla_proof: &<RationalPoSt<'a, Tree> as ProofScheme<'a>>::Proof,\n        _pub_params: &<RationalPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Result<RationalPoStCircuit<Tree>> {\n        let comm_rs: Vec<_> = pub_in.comm_rs.iter().map(|c| Some((*c).into())).collect();\n        let comm_cs: Vec<_> = vanilla_proof\n            .comm_cs\n            .iter()\n            .map(|c| Some((*c).into()))\n            .collect();\n\n        let comm_r_lasts: Vec<_> = vanilla_proof\n            .commitments()\n            .into_iter()\n            .map(|c| Some(c.into()))\n            .collect();\n\n        let leafs: Vec<_> = vanilla_proof\n            .leafs()\n            .iter()\n            .map(|c| Some((*c).into()))\n            .collect();\n\n        let paths: Vec<Vec<_>> = vanilla_proof\n            .paths()\n            .iter()\n            .map(|v| {\n                v.iter()\n                    .map(|p| {\n                        (\n                            (*p).0.iter().copied().map(Into::into).map(Some).collect(),\n                            Some(p.1),\n                        )\n                    })\n                    .collect()\n            })\n            .collect();\n\n        Ok(RationalPoStCircuit {\n            leafs,\n            comm_rs,\n            comm_cs,\n            comm_r_lasts,\n            paths,\n            _t: PhantomData,\n        })\n    }\n\n    fn blank_circuit(\n        pub_params: &<RationalPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n    ) -> RationalPoStCircuit<Tree> {\n        let challenges_count = pub_params.challenges_count;\n        let height =\n            drgraph::graph_height::<typenum::U2>(pub_params.sector_size as usize / NODE_SIZE);\n\n        let comm_rs = vec![None; challenges_count];\n        let comm_cs = vec![None; challenges_count];\n        let comm_r_lasts = vec![None; challenges_count];\n        let leafs = vec![None; challenges_count];\n        let paths = vec![vec![(vec![None; 1], None); height - 1]; challenges_count];\n\n        RationalPoStCircuit {\n            comm_rs,\n            comm_cs,\n            comm_r_lasts,\n            leafs,\n            paths,\n            _t: PhantomData,\n        }\n    }\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait> RationalPoStCircuit<Tree> {\n    #[allow(clippy::type_complexity)]\n    pub fn synthesize<CS: ConstraintSystem<Bls12>>(\n        cs: &mut CS,\n        leafs: Vec<Option<Fr>>,\n        comm_rs: Vec<Option<Fr>>,\n        comm_cs: Vec<Option<Fr>>,\n        comm_r_lasts: Vec<Option<Fr>>,\n        paths: Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>,\n    ) -> Result<(), SynthesisError> {\n        Self {\n            leafs,\n            comm_rs,\n            comm_cs,\n            comm_r_lasts,\n            paths,\n            _t: PhantomData,\n        }\n        .synthesize(cs)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use std::collections::BTreeMap;\n\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::{\n        compound_proof,\n        gadgets::TestConstraintSystem,\n        hasher::{Domain, HashFunction, Hasher, PedersenHasher, PoseidonHasher},\n        merkle::{generate_tree, get_base_tree_count, BinaryMerkleTree},\n        proof::NoRequirements,\n        sector::OrderedSectorSet,\n    };\n\n    use crate::rational::{self, derive_challenges};\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn rational_post_test_compound_pedersen() {\n        rational_post_test_compound::<BinaryMerkleTree<PedersenHasher>>();\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn rational_post_test_compound_poseidon() {\n        rational_post_test_compound::<BinaryMerkleTree<PoseidonHasher>>();\n    }\n\n    fn rational_post_test_compound<Tree: 'static + MerkleTreeTrait>() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 32 * get_base_tree_count::<Tree>();\n        let sector_size = (leaves * NODE_SIZE) as u64;\n        let challenges_count = 2;\n\n        let setup_params = compound_proof::SetupParams {\n            vanilla_params: rational::SetupParams {\n                sector_size,\n                challenges_count,\n            },\n            partitions: None,\n            priority: true,\n        };\n\n        let pub_params = RationalPoStCompound::<Tree>::setup(&setup_params).expect(\"setup failed\");\n\n        // Construct and store an MT using a named DiskStore.\n        let temp_dir = tempdir::TempDir::new(\"tree\").unwrap();\n        let temp_path = temp_dir.path();\n\n        let (_data1, tree1) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n        let (_data2, tree2) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n\n        let faults = OrderedSectorSet::new();\n        let mut sectors = OrderedSectorSet::new();\n        sectors.insert(0.into());\n        sectors.insert(1.into());\n\n        let seed = (0..leaves).map(|_| rng.gen()).collect::<Vec<u8>>();\n        let challenges =\n            derive_challenges(challenges_count, sector_size, &sectors, &seed, &faults).unwrap();\n\n        let comm_r_lasts_raw = vec![tree1.root(), tree2.root()];\n        let comm_r_lasts: Vec<_> = challenges\n            .iter()\n            .map(|c| comm_r_lasts_raw[u64::from(c.sector) as usize])\n            .collect();\n\n        let comm_cs: Vec<<Tree::Hasher as Hasher>::Domain> = challenges\n            .iter()\n            .map(|_c| <Tree::Hasher as Hasher>::Domain::random(rng))\n            .collect();\n\n        let comm_rs: Vec<_> = comm_cs\n            .iter()\n            .zip(comm_r_lasts.iter())\n            .map(|(comm_c, comm_r_last)| {\n                <Tree::Hasher as Hasher>::Function::hash2(comm_c, comm_r_last)\n            })\n            .collect();\n\n        let pub_inputs = rational::PublicInputs {\n            challenges: &challenges,\n            faults: &faults,\n            comm_rs: &comm_rs,\n        };\n\n        let mut trees = BTreeMap::new();\n        trees.insert(0.into(), &tree1);\n        trees.insert(1.into(), &tree2);\n\n        let priv_inputs = rational::PrivateInputs::<Tree> {\n            trees: &trees,\n            comm_r_lasts: &comm_r_lasts,\n            comm_cs: &comm_cs,\n        };\n\n        let gparams =\n            RationalPoStCompound::<Tree>::groth_params(Some(rng), &pub_params.vanilla_params)\n                .expect(\"failed to create groth params\");\n\n        let proof =\n            RationalPoStCompound::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs, &gparams)\n                .expect(\"proving failed\");\n\n        let (circuit, inputs) =\n            RationalPoStCompound::<Tree>::circuit_for_test(&pub_params, &pub_inputs, &priv_inputs)\n                .unwrap();\n\n        {\n            let mut cs = TestConstraintSystem::new();\n\n            circuit.synthesize(&mut cs).expect(\"failed to synthesize\");\n            assert!(cs.is_satisfied());\n            assert!(cs.verify(&inputs));\n        }\n\n        let verified =\n            RationalPoStCompound::<Tree>::verify(&pub_params, &pub_inputs, &proof, &NoRequirements)\n                .expect(\"failed while verifying\");\n\n        assert!(verified);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/post/src/rational/mod.rs",
    "content": "mod circuit;\nmod compound;\nmod vanilla;\n\npub use self::circuit::*;\npub use self::compound::*;\npub use self::vanilla::*;\n"
  },
  {
    "path": "storage-proofs/post/src/rational/vanilla.rs",
    "content": "use std::collections::{BTreeMap, HashSet};\nuse std::marker::PhantomData;\n\nuse anyhow::{bail, ensure, Context};\nuse byteorder::{ByteOrder, LittleEndian};\nuse serde::{Deserialize, Serialize};\n\nuse storage_proofs_core::{\n    error::{Error, Result},\n    hasher::{Domain, HashFunction, Hasher},\n    merkle::{MerkleProof, MerkleProofTrait, MerkleTreeTrait, MerkleTreeWrapper},\n    parameter_cache::ParameterSetMetadata,\n    proof::{NoRequirements, ProofScheme},\n    sector::*,\n    util::NODE_SIZE,\n};\n\n#[derive(Debug, Clone)]\npub struct SetupParams {\n    /// The size of a sector.\n    pub sector_size: u64,\n    // TODO: can we drop this?\n    /// How many challenges there are in total.\n    pub challenges_count: usize,\n}\n\n#[derive(Debug, Clone)]\npub struct PublicParams {\n    /// The size of a sector.\n    pub sector_size: u64,\n    /// How many challenges there are in total.\n    pub challenges_count: usize,\n}\n\nimpl ParameterSetMetadata for PublicParams {\n    fn identifier(&self) -> String {\n        format!(\n            \"RationalPoSt::PublicParams{{sector_size: {} challenges_count: {}}}\",\n            self.sector_size(),\n            self.challenges_count,\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        self.sector_size\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct PublicInputs<'a, T: 'a + Domain> {\n    /// The challenges, which leafs to prove.\n    pub challenges: &'a [Challenge],\n    pub faults: &'a OrderedSectorSet,\n    pub comm_rs: &'a [T],\n}\n\n#[derive(Debug, Clone)]\n#[allow(clippy::type_complexity)]\npub struct PrivateInputs<'a, Tree: 'a + MerkleTreeTrait> {\n    pub trees: &'a BTreeMap<\n        SectorId,\n        &'a MerkleTreeWrapper<\n            Tree::Hasher,\n            Tree::Store,\n            Tree::Arity,\n            Tree::SubTreeArity,\n            Tree::TopTreeArity,\n        >,\n    >,\n    pub comm_cs: &'a Vec<<Tree::Hasher as Hasher>::Domain>,\n    pub comm_r_lasts: &'a Vec<<Tree::Hasher as Hasher>::Domain>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Proof<P: MerkleProofTrait> {\n    #[serde(bound(\n        serialize = \"MerkleProof<P::Hasher, P::Arity, P::SubTreeArity, P::TopTreeArity>: Serialize\",\n        deserialize = \"MerkleProof<P::Hasher, P::Arity, P::SubTreeArity, P::TopTreeArity>: serde::de::DeserializeOwned\"\n    ))]\n    inclusion_proofs: Vec<MerkleProof<P::Hasher, P::Arity, P::SubTreeArity, P::TopTreeArity>>,\n    pub comm_cs: Vec<<P::Hasher as Hasher>::Domain>,\n}\n\nimpl<P: MerkleProofTrait> Proof<P> {\n    pub fn leafs(&self) -> Vec<<P::Hasher as Hasher>::Domain> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProof::leaf)\n            .collect()\n    }\n\n    pub fn commitments(&self) -> Vec<<P::Hasher as Hasher>::Domain> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProof::root)\n            .collect()\n    }\n\n    #[allow(clippy::type_complexity)]\n    pub fn paths(&self) -> Vec<Vec<(Vec<<P::Hasher as Hasher>::Domain>, usize)>> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProof::path)\n            .collect()\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct RationalPoSt<'a, Tree>\nwhere\n    Tree: 'a + MerkleTreeTrait,\n{\n    _t: PhantomData<&'a Tree>,\n}\n\nimpl<'a, Tree: 'a + MerkleTreeTrait> ProofScheme<'a> for RationalPoSt<'a, Tree> {\n    type PublicParams = PublicParams;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<'a, <Tree::Hasher as Hasher>::Domain>;\n    type PrivateInputs = PrivateInputs<'a, Tree>;\n    type Proof = Proof<Tree::Proof>;\n    type Requirements = NoRequirements;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        Ok(PublicParams {\n            sector_size: sp.sector_size,\n            challenges_count: sp.challenges_count,\n        })\n    }\n\n    fn prove<'b>(\n        _pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        ensure!(\n            pub_inputs.challenges.len() == pub_inputs.comm_rs.len(),\n            \"mismatched challenges and comm_rs\"\n        );\n        ensure!(\n            pub_inputs.challenges.len() == priv_inputs.comm_cs.len(),\n            \"mismatched challenges and comm_cs\"\n        );\n        ensure!(\n            pub_inputs.challenges.len() == priv_inputs.comm_r_lasts.len(),\n            \"mismatched challenges and comm_r_lasts\"\n        );\n        let challenges = pub_inputs.challenges;\n\n        let proofs = challenges\n            .iter()\n            .zip(priv_inputs.comm_r_lasts.iter())\n            .map(|(challenge, comm_r_last)| {\n                let challenged_leaf = challenge.leaf;\n\n                if let Some(tree) = priv_inputs.trees.get(&challenge.sector) {\n                    ensure!(comm_r_last == &tree.root(), Error::InvalidCommitment);\n\n                    tree.gen_cached_proof(challenged_leaf as usize, None)\n                } else {\n                    bail!(Error::MalformedInput);\n                }\n            })\n            .collect::<Result<Vec<_>>>()?;\n\n        Ok(Proof {\n            inclusion_proofs: proofs,\n            comm_cs: priv_inputs.comm_cs.to_vec(),\n        })\n    }\n\n    fn verify(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        proof: &Self::Proof,\n    ) -> Result<bool> {\n        let challenges = pub_inputs.challenges;\n\n        ensure!(\n            challenges.len() == pub_inputs.comm_rs.len() as usize,\n            Error::MalformedInput\n        );\n\n        ensure!(\n            challenges.len() == proof.inclusion_proofs.len(),\n            Error::MalformedInput\n        );\n\n        // validate each proof\n        for (((merkle_proof, challenge), comm_r), comm_c) in proof\n            .inclusion_proofs\n            .iter()\n            .zip(challenges.iter())\n            .zip(pub_inputs.comm_rs.iter())\n            .zip(proof.comm_cs.iter())\n        {\n            let challenged_leaf = challenge.leaf;\n\n            // verify that H(Comm_c || Comm_r_last) == Comm_R\n            // comm_r_last is the root of the proof\n            let comm_r_last = merkle_proof.root();\n\n            if AsRef::<[u8]>::as_ref(&<Tree::Hasher as Hasher>::Function::hash2(\n                comm_c,\n                &comm_r_last,\n            )) != AsRef::<[u8]>::as_ref(&comm_r)\n            {\n                return Ok(false);\n            }\n\n            // validate the path length\n            let expected_path_length =\n                merkle_proof.expected_len(pub_params.sector_size as usize / NODE_SIZE);\n\n            if expected_path_length != merkle_proof.path().len() {\n                return Ok(false);\n            }\n\n            if !merkle_proof.validate(challenged_leaf as usize) {\n                return Ok(false);\n            }\n        }\n\n        Ok(true)\n    }\n}\n\n/// A challenge specifying a sector and leaf.\n#[derive(Debug, Clone, PartialEq, Eq)]\npub struct Challenge {\n    // The identifier of the challenged sector.\n    pub sector: SectorId,\n    // The leaf index this challenge points at.\n    pub leaf: u64,\n}\n\n/// Rational PoSt specific challenge derivation.\npub fn derive_challenges(\n    challenge_count: usize,\n    sector_size: u64,\n    sectors: &OrderedSectorSet,\n    seed: &[u8],\n    faults: &OrderedSectorSet,\n) -> Result<Vec<Challenge>> {\n    (0..challenge_count)\n        .map(|n| {\n            let mut attempt = 0;\n            let mut attempted_sectors = HashSet::new();\n            loop {\n                let c = derive_challenge(seed, n as u64, attempt, sector_size, sectors)?;\n\n                // check for faulty sector\n                if !faults.contains(&c.sector) {\n                    // valid challenge, not found\n                    return Ok(c);\n                } else {\n                    attempt += 1;\n                    attempted_sectors.insert(c.sector);\n\n                    ensure!(\n                        attempted_sectors.len() < sectors.len(),\n                        \"all sectors are faulty\"\n                    );\n                }\n            }\n        })\n        .collect()\n}\n\nfn derive_challenge(\n    seed: &[u8],\n    n: u64,\n    attempt: u64,\n    sector_size: u64,\n    sectors: &OrderedSectorSet,\n) -> Result<Challenge> {\n    let mut data = seed.to_vec();\n    data.extend_from_slice(&n.to_le_bytes()[..]);\n    data.extend_from_slice(&attempt.to_le_bytes()[..]);\n\n    let hash = blake2b_simd::blake2b(&data);\n    let challenge_bytes = hash.as_bytes();\n    let sector_challenge = LittleEndian::read_u64(&challenge_bytes[..8]);\n    let leaf_challenge = LittleEndian::read_u64(&challenge_bytes[8..16]);\n\n    let sector_index = (sector_challenge % sectors.len() as u64) as usize;\n    let sector = *sectors\n        .iter()\n        .nth(sector_index)\n        .context(\"invalid challenge generated\")?;\n\n    Ok(Challenge {\n        sector,\n        leaf: leaf_challenge % (sector_size / NODE_SIZE as u64),\n    })\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use generic_array::typenum;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n    use typenum::{U0, U2, U8};\n\n    use storage_proofs_core::{\n        hasher::{Blake2sHasher, Domain, Hasher, PedersenHasher, PoseidonHasher, Sha256Hasher},\n        merkle::{generate_tree, get_base_tree_count, LCTree, MerkleTreeTrait},\n    };\n\n    fn test_rational_post<Tree: MerkleTreeTrait>()\n    where\n        Tree::Store: 'static,\n    {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 64 * get_base_tree_count::<Tree>();\n        let sector_size = leaves as u64 * 32;\n        let challenges_count = 8;\n\n        let pub_params = PublicParams {\n            sector_size,\n            challenges_count,\n        };\n\n        // Construct and store an MT using a named store.\n        let temp_dir = tempdir::TempDir::new(\"tree\").unwrap();\n        let temp_path = temp_dir.path();\n\n        let (_data1, tree1) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n        let (_data2, tree2) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n\n        let seed = (0..leaves).map(|_| rng.gen()).collect::<Vec<u8>>();\n        let mut faults = OrderedSectorSet::new();\n        faults.insert(139.into());\n        faults.insert(1.into());\n        faults.insert(32.into());\n\n        let mut sectors = OrderedSectorSet::new();\n        sectors.insert(891.into());\n        sectors.insert(139.into());\n        sectors.insert(32.into());\n        sectors.insert(1.into());\n\n        let mut trees = BTreeMap::new();\n        trees.insert(139.into(), &tree1); // faulty with tree\n        trees.insert(891.into(), &tree2);\n        // other two faults don't have a tree available\n\n        let challenges =\n            derive_challenges(challenges_count, sector_size, &sectors, &seed, &faults).unwrap();\n\n        // the only valid sector to challenge is 891\n        assert!(\n            challenges.iter().all(|c| c.sector == 891.into()),\n            \"invalid challenge generated\"\n        );\n\n        let comm_r_lasts = challenges\n            .iter()\n            .map(|c| trees.get(&c.sector).unwrap().root())\n            .collect::<Vec<_>>();\n\n        let comm_cs: Vec<<Tree::Hasher as Hasher>::Domain> = challenges\n            .iter()\n            .map(|_c| <Tree::Hasher as Hasher>::Domain::random(rng))\n            .collect();\n\n        let comm_rs: Vec<<Tree::Hasher as Hasher>::Domain> = comm_cs\n            .iter()\n            .zip(comm_r_lasts.iter())\n            .map(|(comm_c, comm_r_last)| {\n                <Tree::Hasher as Hasher>::Function::hash2(comm_c, comm_r_last)\n            })\n            .collect();\n\n        let pub_inputs = PublicInputs {\n            challenges: &challenges,\n            comm_rs: &comm_rs,\n            faults: &faults,\n        };\n\n        let priv_inputs = PrivateInputs::<Tree> {\n            trees: &trees,\n            comm_cs: &comm_cs,\n            comm_r_lasts: &comm_r_lasts,\n        };\n\n        let proof = RationalPoSt::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs)\n            .expect(\"proving failed\");\n\n        let is_valid = RationalPoSt::<Tree>::verify(&pub_params, &pub_inputs, &proof)\n            .expect(\"verification failed\");\n\n        assert!(is_valid);\n    }\n\n    #[test]\n    fn rational_post_pedersen() {\n        test_rational_post::<LCTree<PedersenHasher, U8, U0, U0>>();\n    }\n\n    #[test]\n    fn rational_post_sha256() {\n        test_rational_post::<LCTree<Sha256Hasher, U8, U0, U0>>();\n    }\n\n    #[test]\n    fn rational_post_blake2s() {\n        test_rational_post::<LCTree<Blake2sHasher, U8, U0, U0>>();\n    }\n\n    #[test]\n    fn rational_post_poseidon() {\n        test_rational_post::<LCTree<PoseidonHasher, U8, U0, U0>>();\n    }\n\n    #[test]\n    fn rational_post_poseidon_8_8() {\n        test_rational_post::<LCTree<PoseidonHasher, U8, U8, U0>>();\n    }\n\n    #[test]\n    fn rational_post_poseidon_8_8_2() {\n        test_rational_post::<LCTree<PoseidonHasher, U8, U8, U2>>();\n    }\n\n    fn test_rational_post_validates_challenge_identity<Tree: 'static + MerkleTreeTrait>() {\n        let rng = &mut XorShiftRng::from_seed(crate::TEST_SEED);\n\n        let leaves = 64 * get_base_tree_count::<Tree>();\n        let sector_size = leaves as u64 * 32;\n        let challenges_count = 2;\n\n        let pub_params = PublicParams {\n            sector_size,\n            challenges_count,\n        };\n\n        // Construct and store an MT using a named store.\n        let temp_dir = tempdir::TempDir::new(\"tree\").unwrap();\n        let temp_path = temp_dir.path();\n\n        let (_data, tree) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n        let seed = (0..leaves).map(|_| rng.gen()).collect::<Vec<u8>>();\n        let mut faults = OrderedSectorSet::new();\n        faults.insert(1.into());\n        let mut sectors = OrderedSectorSet::new();\n        sectors.insert(0.into());\n        sectors.insert(1.into());\n\n        let mut trees = BTreeMap::new();\n        trees.insert(0.into(), &tree);\n\n        let challenges =\n            derive_challenges(challenges_count, sector_size, &sectors, &seed, &faults).unwrap();\n        let comm_r_lasts = challenges\n            .iter()\n            .map(|c| trees.get(&c.sector).unwrap().root())\n            .collect::<Vec<_>>();\n\n        let comm_cs: Vec<<Tree::Hasher as Hasher>::Domain> = challenges\n            .iter()\n            .map(|_c| <Tree::Hasher as Hasher>::Domain::random(rng))\n            .collect();\n\n        let comm_rs: Vec<<Tree::Hasher as Hasher>::Domain> = comm_cs\n            .iter()\n            .zip(comm_r_lasts.iter())\n            .map(|(comm_c, comm_r_last)| {\n                <Tree::Hasher as Hasher>::Function::hash2(comm_c, comm_r_last)\n            })\n            .collect();\n\n        let pub_inputs = PublicInputs {\n            challenges: &challenges,\n            faults: &faults,\n            comm_rs: &comm_rs,\n        };\n\n        let priv_inputs = PrivateInputs::<Tree> {\n            trees: &trees,\n            comm_cs: &comm_cs,\n            comm_r_lasts: &comm_r_lasts,\n        };\n\n        let proof = RationalPoSt::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs)\n            .expect(\"proving failed\");\n\n        let seed = (0..leaves).map(|_| rng.gen()).collect::<Vec<u8>>();\n        let challenges =\n            derive_challenges(challenges_count, sector_size, &sectors, &seed, &faults).unwrap();\n        let comm_r_lasts = challenges.iter().map(|_c| tree.root()).collect::<Vec<_>>();\n\n        let comm_cs: Vec<<Tree::Hasher as Hasher>::Domain> = challenges\n            .iter()\n            .map(|_c| <Tree::Hasher as Hasher>::Domain::random(rng))\n            .collect();\n\n        let comm_rs: Vec<<Tree::Hasher as Hasher>::Domain> = comm_cs\n            .iter()\n            .zip(comm_r_lasts.iter())\n            .map(|(comm_c, comm_r_last)| {\n                <Tree::Hasher as Hasher>::Function::hash2(comm_c, comm_r_last)\n            })\n            .collect();\n\n        let different_pub_inputs = PublicInputs {\n            challenges: &challenges,\n            faults: &faults,\n            comm_rs: &comm_rs,\n        };\n\n        let verified = RationalPoSt::<Tree>::verify(&pub_params, &different_pub_inputs, &proof)\n            .expect(\"verification failed\");\n\n        // A proof created with a the wrong challenge not be verified!\n        assert!(!verified);\n    }\n\n    #[test]\n    fn rational_post_actually_validates_challenge_identity_sha256() {\n        test_rational_post_validates_challenge_identity::<LCTree<Sha256Hasher, U8, U0, U0>>();\n    }\n\n    #[test]\n    fn rational_post_actually_validates_challenge_identity_blake2s() {\n        test_rational_post_validates_challenge_identity::<LCTree<Blake2sHasher, U8, U0, U0>>();\n    }\n\n    #[test]\n    fn rational_post_actually_validates_challenge_identity_pedersen() {\n        test_rational_post_validates_challenge_identity::<LCTree<PedersenHasher, U8, U0, U0>>();\n    }\n\n    #[test]\n    fn rational_post_actually_validates_challenge_identity_poseidon() {\n        test_rational_post_validates_challenge_identity::<LCTree<PoseidonHasher, U8, U0, U0>>();\n    }\n\n    #[test]\n    fn rational_post_actually_validates_challenge_identity_poseidon_8_8() {\n        test_rational_post_validates_challenge_identity::<LCTree<PoseidonHasher, U8, U8, U0>>();\n    }\n\n    #[test]\n    fn rational_post_actually_validates_challenge_identity_poseidon_8_8_2() {\n        test_rational_post_validates_challenge_identity::<LCTree<PoseidonHasher, U8, U8, U2>>();\n    }\n\n    #[test]\n    fn test_derive_challenges_fails_on_all_faulty() {\n        use std::collections::BTreeSet;\n\n        let mut sectors = BTreeSet::new();\n        sectors.insert(SectorId::from(1));\n        sectors.insert(SectorId::from(2));\n\n        let mut faults = BTreeSet::new();\n        faults.insert(SectorId::from(1));\n        faults.insert(SectorId::from(2));\n\n        let seed = vec![0u8];\n\n        assert!(derive_challenges(10, 1024, &sectors, &seed, &faults).is_err());\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/batchpost.rs",
    "content": "use std::marker::PhantomData;\n\nuse byteorder::{LittleEndian, WriteBytesExt};\nuse num_bigint::BigUint;\nuse num_traits::cast::ToPrimitive;\nuse serde::de::Deserialize;\nuse serde::ser::Serialize;\n\nuse crate::crypto::blake2s::blake2s;\nuse crate::error::Result;\nuse crate::hasher::{Domain, Hasher};\nuse crate::merkle::MerkleTree;\nuse crate::merklepor;\nuse crate::proof::ProofScheme;\nuse crate::util::data_at_node;\n\n#[derive(Clone, Debug)]\npub struct PublicParams {\n    /// The public params passed for the individual merklepors.\n    pub params: merklepor::PublicParams,\n    /// How many pors `prove` runs.\n    pub batch_count: usize,\n}\n\n#[derive(Debug)]\npub struct SetupParams {}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Proof<H: Hasher> {\n    #[serde(bound(\n        serialize = \"merklepor::Proof<H>: Serialize\",\n        deserialize = \"merklepor::Proof<H>: Deserialize<'de>\"\n    ))]\n    pub proofs: Vec<merklepor::Proof<H>>,\n    pub challenges: Vec<usize>,\n}\n\n#[derive(Debug, Clone)]\npub struct PublicInputs<'a, T: 'a + Domain> {\n    /// The root hash of the underlying merkle tree.\n    pub commitment: T,\n    /// The inital challenge, which leaf to prove.\n    pub challenge: usize,\n    /// The prover id.\n    pub replica_id: &'a T,\n}\n\n/// The inputs that are only available to the prover.\n#[derive(Debug)]\npub struct PrivateInputs<'a, H: 'a + Hasher> {\n    /// The underlying data.\n    pub data: &'a [u8],\n    /// The underlying merkle tree.\n    pub tree: &'a MerkleTree<H::Domain, H::Function>,\n}\n\nimpl<'a, H: Hasher> PrivateInputs<'a, H> {\n    pub fn new(data: &'a [u8], tree: &'a MerkleTree<H::Domain, H::Function>) -> Self {\n        PrivateInputs { data, tree }\n    }\n}\n\n#[derive(Default, Debug)]\npub struct BatchPoST<H: Hasher> {\n    _h: PhantomData<H>,\n}\n\nimpl<'a, H: 'a + Hasher> ProofScheme<'a> for BatchPoST<H> {\n    type PublicParams = PublicParams;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<'a, H::Domain>;\n    type PrivateInputs = PrivateInputs<'a, H>;\n    type Proof = Proof<H>;\n\n    fn setup(_sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        // merklepor does not have a setup currently\n        unimplemented!(\"not used\")\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        // initalize challenge\n        let mut challenge = pub_inputs.challenge;\n        let count = pub_params.batch_count;\n\n        let mut proofs = Vec::with_capacity(count);\n        let mut challenges = Vec::with_capacity(count);\n\n        // push the first challenge\n        challenges.push(challenge);\n\n        for i in 0..count {\n            // execute a single merklepor\n            let proof = merklepor::MerklePoR::prove(\n                &pub_params.params,\n                &merklepor::PublicInputs {\n                    commitment: Some(pub_inputs.commitment),\n                    challenge,\n                },\n                &merklepor::PrivateInputs::new(\n                    H::Domain::try_from_bytes(data_at_node(priv_inputs.data, challenge)?)?,\n                    priv_inputs.tree,\n                ),\n            )?;\n\n            challenge = derive_challenge(\n                pub_inputs.replica_id,\n                i,\n                challenge,\n                &proof,\n                pub_params.params.leaves,\n            )?;\n\n            challenges.push(challenge);\n            proofs.push(proof);\n        }\n\n        Ok(Proof { proofs, challenges })\n    }\n\n    fn verify(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        proof: &Self::Proof,\n    ) -> Result<bool> {\n        let count = pub_params.batch_count;\n\n        // ensure lengths match\n        if proof.proofs.len() + 1 != proof.challenges.len() {\n            println!(\n                \"invalid lengths {} != {}\",\n                proof.proofs.len() + 1,\n                proof.challenges.len()\n            );\n            return Ok(false);\n        }\n\n        for i in 0..count {\n            // verify the proof\n            if !merklepor::MerklePoR::verify(\n                &pub_params.params,\n                &merklepor::PublicInputs {\n                    challenge: proof.challenges[i],\n                    commitment: Some(pub_inputs.commitment),\n                },\n                &proof.proofs[i],\n            )? {\n                println!(\"proof does not verify\");\n                return Ok(false);\n            }\n            // verify the challenges are correct\n            let challenge = derive_challenge(\n                pub_inputs.replica_id,\n                i,\n                proof.challenges[i],\n                &proof.proofs[i],\n                pub_params.params.leaves,\n            )?;\n\n            if challenge != proof.challenges[i + 1] {\n                println!(\"challenges dont match\");\n                return Ok(false);\n            }\n        }\n\n        Ok(true)\n    }\n}\n\n#[cfg(target_pointer_width = \"64\")]\nfn write_usize(target: &mut Vec<u8>, value: usize) -> ::std::result::Result<(), ::std::io::Error> {\n    target.write_u64::<LittleEndian>(value as u64)\n}\n\n#[cfg(target_pointer_width = \"32\")]\nfn write_usize(target: &mut Vec<u8>, value: usize) -> ::std::result::Result<(), ::std::io::ERror> {\n    target.write_u32::<LittleEndian>(value as u32)\n}\n\n/// Derives a new challenge, given the inputs, by concatenating the `replica_id`, the round `i`, the current `challenge` and the serialized `proof` and hashing them.\nfn derive_challenge<H: Hasher>(\n    replica_id: &H::Domain,\n    i: usize,\n    challenge: usize,\n    proof: &merklepor::Proof<H>,\n    leaves: usize,\n) -> Result<usize> {\n    let mut bytes = replica_id.into_bytes();\n\n    write_usize(&mut bytes, i)?;\n    write_usize(&mut bytes, challenge)?;\n    bytes.extend(proof.serialize());\n\n    let hash = blake2s(bytes.as_slice());\n\n    // challenge is created by interpreting the hash as a biguint in little endian\n    // and then running mod leaves on it.\n\n    let big_challenge = BigUint::from_bytes_le(hash.as_slice());\n    let big_mod_challenge = big_challenge % leaves;\n\n    Ok(big_mod_challenge\n        .to_usize()\n        .expect(\"must fit into usize after mod operation\"))\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use pairing::bls12_381::Bls12;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n\n    use crate::drgraph::{new_seed, BucketGraph, Graph};\n    use crate::fr32::fr_into_bytes;\n    use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher};\n    use crate::merklepor;\n\n    fn test_batchpost<H: Hasher>() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let replica_id: H::Domain = rng.gen();\n        let pub_params = PublicParams {\n            params: merklepor::PublicParams {\n                leaves: 32,\n                private: false,\n            },\n            batch_count: 10,\n        };\n\n        let data: Vec<u8> = (0..32)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n        let graph = BucketGraph::<H>::new(32, 16, 0, new_seed());\n        let tree = graph.merkle_tree(data.as_slice()).unwrap();\n\n        let pub_inputs = PublicInputs::<H::Domain> {\n            challenge: 3,\n            commitment: tree.root(),\n            replica_id: &replica_id,\n        };\n\n        let priv_inputs = PrivateInputs::<H>::new(data.as_slice(), &tree);\n\n        let proof = BatchPoST::<H>::prove(&pub_params, &pub_inputs, &priv_inputs).unwrap();\n\n        assert!(\n            BatchPoST::<H>::verify(&pub_params, &pub_inputs, &proof).unwrap(),\n            \"failed to verify\"\n        );\n\n        // mess with a single part of the proof\n        {\n            let mut proof = proof;\n            proof.challenges[0] = proof.challenges[0] + 1;\n            assert!(\n                !BatchPoST::<H>::verify(&pub_params, &pub_inputs, &proof).unwrap(),\n                \"verified invalid proof\"\n            );\n        }\n    }\n\n    #[test]\n    fn batchpost_pedersen() {\n        test_batchpost::<PedersenHasher>();\n    }\n\n    #[test]\n    fn batchpost_sha256() {\n        test_batchpost::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn batchpost_blake2s() {\n        test_batchpost::<Blake2sHasher>();\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/beacon_post.rs",
    "content": "use std::marker::PhantomData;\nuse std::{thread, time};\n\nuse byteorder::{ByteOrder, LittleEndian};\nuse serde::de::Deserialize;\nuse serde::ser::Serialize;\n\nuse crate::error::{Error, Result};\nuse crate::hasher::{Domain, Hasher};\nuse crate::merkle::MerkleTree;\nuse crate::parameter_cache::ParameterSetIdentifier;\nuse crate::proof::ProofScheme;\nuse crate::vdf::Vdf;\nuse crate::vdf_post;\n\n#[derive(Clone, Debug)]\npub struct SetupParams<T: Domain, V: Vdf<T>> {\n    pub vdf_post_setup_params: vdf_post::SetupParams<T, V>,\n    pub post_periods_count: usize,\n}\n\n#[derive(Clone, Debug)]\npub struct PublicParams<T: Domain, V: Vdf<T>> {\n    pub vdf_post_pub_params: vdf_post::PublicParams<T, V>,\n    pub post_periods_count: usize,\n}\n\nimpl<T: Domain, V: Vdf<T>> ParameterSetIdentifier for PublicParams<T, V> {\n    fn parameter_set_identifier(&self) -> String {\n        format!(\n            \"beacon_post::PublicParams{{vdf_post_pub_params: {}, post_periods_count: {}\",\n            self.vdf_post_pub_params.parameter_set_identifier(),\n            self.post_periods_count\n        )\n    }\n}\n\n#[derive(Clone, Debug)]\npub struct PublicInputs<T: Domain> {\n    /// The root hashes of the merkle trees of the sealed sectors.\n    pub commitments: Vec<T>,\n}\n\n#[derive(Clone, Debug)]\npub struct PrivateInputs<'a, H: 'a + Hasher> {\n    pub replicas: &'a [&'a [u8]],\n    pub trees: &'a [&'a MerkleTree<H::Domain, H::Function>],\n    _h: PhantomData<H>,\n}\n\nimpl<'a, H: 'a + Hasher> PrivateInputs<'a, H> {\n    pub fn new(\n        replicas: &'a [&'a [u8]],\n        trees: &'a [&'a MerkleTree<H::Domain, H::Function>],\n    ) -> Self {\n        PrivateInputs {\n            replicas,\n            trees,\n            _h: PhantomData,\n        }\n    }\n}\n\n/// Beacon-PoSt\n/// This is one construction of a Proof-of-Spacetime.\n/// It currently only supports proving over a single sector.\n#[derive(Clone, Debug, Serialize, Deserialize)]\npub struct Proof<'a, H: Hasher + 'a, V: Vdf<H::Domain>>(\n    #[serde(bound(\n        serialize = \"vdf_post::Proof<'a, H, V>: Serialize\",\n        deserialize = \"vdf_post::Proof<'a, H, V>: Deserialize<'de>\"\n    ))]\n    Vec<vdf_post::Proof<'a, H, V>>,\n);\n\nimpl<'a, H: Hasher + 'a, V: Vdf<H::Domain>> Proof<'a, H, V> {\n    pub fn proofs(&self) -> &[vdf_post::Proof<'a, H, V>] {\n        &self.0\n    }\n}\n\n#[derive(Clone, Debug, Default)]\npub struct BeaconPoSt<H: Hasher, V: Vdf<H::Domain>> {\n    _t: PhantomData<H>,\n    _v: PhantomData<V>,\n}\n\n#[derive(Clone, Debug, Default)]\npub struct Beacon {\n    count: usize,\n}\n\n// TODO: We should make Beacon a trait and parameterize BeaconPoSt on that trait.\n// This will allow for multiple Beacon implementations, particularly for tests.\n// `Beacon::get(…)` should never block for values of `t` which are in the past.\nimpl Beacon {\n    pub fn get<T: Domain>(&mut self, t: usize) -> T {\n        // TODO: actual beacon\n\n        if self.count < t {\n            // sleep a bit, to simulate delay\n            thread::sleep(time::Duration::from_millis(10));\n            self.count += 1;\n        }\n\n        let mut bytes = [0u8; 32];\n        LittleEndian::write_u32(&mut bytes, t as u32);\n        T::try_from_bytes(&bytes).expect(\"invalid beacon element\")\n    }\n}\n\nimpl<'a, H: Hasher, V: Vdf<H::Domain>> ProofScheme<'a> for BeaconPoSt<H, V>\nwhere\n    H: 'a,\n{\n    type PublicParams = PublicParams<H::Domain, V>;\n    type SetupParams = SetupParams<H::Domain, V>;\n    type PublicInputs = PublicInputs<H::Domain>;\n    type PrivateInputs = PrivateInputs<'a, H>;\n    type Proof = Proof<'a, H, V>;\n\n    fn setup(sp: &SetupParams<H::Domain, V>) -> Result<PublicParams<H::Domain, V>> {\n        Ok(PublicParams {\n            vdf_post_pub_params: vdf_post::VDFPoSt::<H, V>::setup(&sp.vdf_post_setup_params)?,\n            post_periods_count: sp.post_periods_count,\n        })\n    }\n\n    fn prove<'b>(\n        pub_params: &'b PublicParams<H::Domain, V>,\n        pub_inputs: &'b PublicInputs<H::Domain>,\n        priv_inputs: &'b PrivateInputs<'a, H>,\n    ) -> Result<Proof<'a, H, V>> {\n        let sectors_count = pub_params.vdf_post_pub_params.sectors_count;\n        let post_periods_count = pub_params.post_periods_count;\n\n        if priv_inputs.replicas.len() != sectors_count {\n            return Err(Error::MalformedInput);\n        }\n\n        if priv_inputs.trees.len() != sectors_count {\n            return Err(Error::MalformedInput);\n        }\n\n        let mut proofs_vdf_post = Vec::with_capacity(post_periods_count);\n\n        let mut beacon = Beacon::default();\n\n        for t in 0..post_periods_count {\n            // Run Beacon\n            let r = beacon.get::<H::Domain>(t);\n\n            // Generate proof\n            // TODO: avoid cloning\n            let pub_inputs_vdf_post = vdf_post::PublicInputs {\n                challenge_seed: r,\n                commitments: pub_inputs.commitments.clone(),\n            };\n\n            let priv_inputs_vdf_post = vdf_post::PrivateInputs::new(priv_inputs.trees);\n\n            proofs_vdf_post.push(vdf_post::VDFPoSt::prove(\n                &pub_params.vdf_post_pub_params,\n                &pub_inputs_vdf_post,\n                &priv_inputs_vdf_post,\n            )?);\n        }\n\n        Ok(Proof(proofs_vdf_post))\n    }\n\n    fn verify(\n        pub_params: &PublicParams<H::Domain, V>,\n        pub_inputs: &PublicInputs<H::Domain>,\n        proof: &Proof<H, V>,\n    ) -> Result<bool> {\n        let post_periods_count = pub_params.post_periods_count;\n\n        // VDF PoSt Verification\n\n        let mut beacon = Beacon::default();\n\n        for t in 0..post_periods_count {\n            // Generate challenges\n            let r = beacon.get::<H::Domain>(t);\n\n            // TODO: avoid cloning\n            let pub_inputs_vdf_post = vdf_post::PublicInputs {\n                challenge_seed: r,\n                commitments: pub_inputs.commitments.clone(),\n            };\n\n            if !vdf_post::VDFPoSt::verify(\n                &pub_params.vdf_post_pub_params,\n                &pub_inputs_vdf_post,\n                &proof.0[t],\n            )? {\n                return Ok(false);\n            }\n        }\n\n        Ok(true)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use pairing::bls12_381::Bls12;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n\n    use crate::drgraph::{new_seed, BucketGraph, Graph};\n    use crate::fr32::fr_into_bytes;\n    use crate::hasher::pedersen::{PedersenDomain, PedersenHasher};\n    use crate::vdf_sloth;\n\n    #[test]\n    fn test_beacon_post_basics() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let sp = SetupParams::<PedersenDomain, vdf_sloth::Sloth> {\n            vdf_post_setup_params: vdf_post::SetupParams::<PedersenDomain, vdf_sloth::Sloth> {\n                challenge_count: 10,\n                sector_size: 1024 * 32,\n                post_epochs: 3,\n                setup_params_vdf: vdf_sloth::SetupParams {\n                    key: rng.gen(),\n                    rounds: 1,\n                },\n                sectors_count: 2,\n            },\n            post_periods_count: 3,\n        };\n\n        let pub_params = BeaconPoSt::<PedersenHasher, vdf_sloth::Sloth>::setup(&sp).unwrap();\n\n        let data0: Vec<u8> = (0..1024)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n        let data1: Vec<u8> = (0..1024)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n\n        let graph0 = BucketGraph::<PedersenHasher>::new(1024, 5, 0, new_seed());\n        let tree0 = graph0.merkle_tree(data0.as_slice()).unwrap();\n        let graph1 = BucketGraph::<PedersenHasher>::new(1024, 5, 0, new_seed());\n        let tree1 = graph1.merkle_tree(data1.as_slice()).unwrap();\n\n        let pub_inputs = PublicInputs {\n            commitments: vec![tree0.root(), tree1.root()],\n        };\n\n        let priv_inputs = PrivateInputs::<PedersenHasher> {\n            trees: &[&tree0, &tree1],\n            replicas: &[&data0, &data1],\n            _h: PhantomData,\n        };\n\n        let proof = BeaconPoSt::prove(&pub_params, &pub_inputs, &priv_inputs).unwrap();\n\n        assert!(BeaconPoSt::verify(&pub_params, &pub_inputs, &proof).unwrap());\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/challenge_derivation.rs",
    "content": "use crate::crypto::blake2s::blake2s;\nuse crate::hasher::Domain;\nuse byteorder::{LittleEndian, WriteBytesExt};\nuse num_bigint::BigUint;\nuse num_traits::cast::ToPrimitive;\n\npub fn derive_challenges<D: Domain>(\n    n: usize,\n    layer: u8,\n    leaves: usize,\n    replica_id: &D,\n    commitment: &D,\n    k: u8,\n) -> Vec<usize> {\n    (0..n)\n        .map(|i| {\n            let mut bytes = replica_id.into_bytes();\n            let j = ((n * k as usize) + i) as u32;\n            bytes.extend(commitment.into_bytes());\n            bytes.push(layer);\n            bytes.write_u32::<LittleEndian>(j).unwrap();\n\n            let hash = blake2s(bytes.as_slice());\n            let big_challenge = BigUint::from_bytes_le(hash.as_slice());\n\n            // For now, we cannot try to prove the first or last node, so make sure the challenge can never be 0 or leaves - 1.\n            let big_mod_challenge = big_challenge % (leaves - 2);\n            big_mod_challenge.to_usize().unwrap() + 1\n        })\n        .collect()\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n    use crate::hasher::pedersen::PedersenDomain;\n    use rand::{thread_rng, Rng};\n    use std::collections::HashMap;\n\n    #[test]\n    fn challenge_derivation() {\n        let n = 200;\n        let leaves = 1 << 30;\n        let mut rng = thread_rng();\n        let replica_id: PedersenDomain = rng.gen();\n        let commitment: PedersenDomain = rng.gen();\n        let partitions = 5;\n        let total_challenges = partitions * n;\n        let layers = 100;\n\n        let mut layers_with_duplicates = 0;\n\n        for layer in 0..layers {\n            let mut histogram = HashMap::new();\n            for k in 0..partitions {\n                let challenges =\n                    derive_challenges(n, layer, leaves, &replica_id, &commitment, k as u8);\n\n                for challenge in challenges {\n                    let counter = histogram.entry(challenge).or_insert(0);\n                    *counter += 1;\n                }\n            }\n            let unique_challenges = histogram.len();\n            if unique_challenges < total_challenges {\n                layers_with_duplicates += 1;\n            }\n        }\n\n        // If we generate 100 layers with 1,000 challenges in each, at most two layers can contain\n        // any duplicates for this assertion to succeed.\n        assert!(layers_with_duplicates < 3);\n    }\n\n    #[test]\n    // This test shows that partitioning (k = 0..partitions) generates the same challenges as\n    // generating the same number of challenges with only one partition (k = 0).\n    fn challenge_partition_equivalence() {\n        let n = 40;\n        let leaves = 1 << 30;\n        let mut rng = thread_rng();\n        let replica_id: PedersenDomain = rng.gen();\n        let commitment: PedersenDomain = rng.gen();\n        let partitions = 5;\n        let layers = 100;\n        let total_challenges = n * partitions;\n        for layer in 0..layers {\n            let one_partition_challenges =\n                derive_challenges(total_challenges, layer, leaves, &replica_id, &commitment, 0);\n            let many_partition_challenges = (0..partitions)\n                .flat_map(|k| {\n                    derive_challenges(n, layer, leaves, &replica_id, &commitment, k as u8)\n                })\n                .collect::<Vec<_>>();\n\n            assert_eq!(one_partition_challenges, many_partition_challenges);\n        }\n    }\n\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/beacon_post.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellman::{Circuit, ConstraintSystem, SynthesisError};\nuse pairing::bls12_381::{Bls12, Fr};\nuse sapling_crypto::jubjub::JubjubEngine;\n\nuse crate::beacon_post::BeaconPoSt;\nuse crate::circuit::vdf_post;\nuse crate::compound_proof::{CircuitComponent, CompoundProof};\nuse crate::hasher::Hasher;\nuse crate::parameter_cache::{CacheableParameters, ParameterSetIdentifier};\nuse crate::proof::ProofScheme;\nuse crate::vdf::Vdf;\n\n/// This is the `Beacon-PoSt` circuit.\npub struct BeaconPoStCircuit<'a, E: JubjubEngine, H: Hasher, V: Vdf<H::Domain>> {\n    /// Parameters for the engine.\n    pub params: &'a E::Params,\n\n    // VDF-PoSt\n    pub challenge_seed: Option<E::Fr>,\n    pub vdf_key: Option<E::Fr>,\n    pub vdf_ys_vec: Vec<Vec<Option<E::Fr>>>,\n    pub vdf_xs_vec: Vec<Vec<Option<E::Fr>>>,\n    pub vdf_sloth_rounds: usize,\n    pub challenges_vec_vec: Vec<Vec<Vec<usize>>>,\n    pub challenged_sectors_vec_vec: Vec<Vec<Vec<usize>>>,\n    pub challenged_leafs_vec_vec: Vec<Vec<Vec<Option<E::Fr>>>>,\n    pub root_commitment: Option<E::Fr>,\n    pub commitments_vec_vec: Vec<Vec<Vec<Option<E::Fr>>>>,\n    pub paths_vec_vec: Vec<Vec<Vec<Vec<Option<(E::Fr, bool)>>>>>,\n    _h: PhantomData<H>,\n    _v: PhantomData<V>,\n}\n\npub struct BeaconPoStCompound {}\n\n#[derive(Clone, Default)]\npub struct ComponentPrivateInputs {}\n\nimpl<'a, E: JubjubEngine, H: Hasher, V: Vdf<H::Domain>> CircuitComponent\n    for BeaconPoStCircuit<'a, E, H, V>\n{\n    type ComponentPrivateInputs = ComponentPrivateInputs;\n}\n\nimpl<'a, H: Hasher, V: Vdf<H::Domain>>\n    CompoundProof<'a, Bls12, BeaconPoSt<H, V>, BeaconPoStCircuit<'a, Bls12, H, V>>\n    for BeaconPoStCompound\nwhere\n    <V as Vdf<H::Domain>>::PublicParams: Send + Sync,\n    <V as Vdf<H::Domain>>::Proof: Send + Sync,\n    V: Sync + Send,\n    H: 'a,\n{\n    fn generate_public_inputs(\n        _pub_in: &<BeaconPoSt<H, V> as ProofScheme<'a>>::PublicInputs,\n        _pub_params: &<BeaconPoSt<H, V> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Vec<Fr> {\n        unimplemented!();\n    }\n    fn circuit(\n        _pub_in: &<BeaconPoSt<H, V> as ProofScheme<'a>>::PublicInputs,\n        _component_private_inputs:<BeaconPoStCircuit<'a, Bls12,H,V> as CircuitComponent>::ComponentPrivateInputs,\n        _vanilla_proof: &<BeaconPoSt<H, V> as ProofScheme<'a>>::Proof,\n        _pub_params: &<BeaconPoSt<H, V> as ProofScheme<'a>>::PublicParams,\n        _engine_params: &'a <Bls12 as JubjubEngine>::Params,\n    ) -> BeaconPoStCircuit<'a, Bls12, H, V> {\n        unimplemented!()\n    }\n}\n\nimpl<E: JubjubEngine, C: Circuit<E>, P: ParameterSetIdentifier> CacheableParameters<E, C, P>\n    for BeaconPoStCompound\n{\n    fn cache_prefix() -> String {\n        String::from(\"beacon-post\")\n    }\n}\n\nimpl<'a, E: JubjubEngine, H: Hasher, V: Vdf<H::Domain>> Circuit<E>\n    for BeaconPoStCircuit<'a, E, H, V>\n{\n    fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let post_periods_count = self.vdf_ys_vec.len();\n\n        assert_eq!(self.vdf_xs_vec.len(), post_periods_count);\n        assert_eq!(self.challenged_leafs_vec_vec.len(), post_periods_count);\n        assert_eq!(self.commitments_vec_vec.len(), post_periods_count);\n        assert_eq!(self.paths_vec_vec.len(), post_periods_count);\n\n        for t in 0..post_periods_count {\n            let mut cs = cs.namespace(|| format!(\"t_{}\", t));\n            vdf_post::VDFPoStCircuit::synthesize(\n                &mut cs.namespace(|| \"vdf_post\"),\n                self.params,\n                self.challenge_seed,\n                self.vdf_key,\n                self.vdf_ys_vec[t].clone(),\n                self.vdf_xs_vec[t].clone(),\n                self.vdf_sloth_rounds,\n                self.challenges_vec_vec[t].clone(),\n                self.challenged_sectors_vec_vec[t].clone(),\n                self.challenged_leafs_vec_vec[t].clone(),\n                self.root_commitment,\n                self.commitments_vec_vec[t].clone(),\n                self.paths_vec_vec[t].clone(),\n            )?;\n        }\n\n        Ok(())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use pairing::Field;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n    use sapling_crypto::jubjub::JubjubBls12;\n\n    use crate::beacon_post::{self, Beacon};\n    use crate::circuit::test::*;\n    use crate::drgraph::{new_seed, BucketGraph, Graph};\n    use crate::fr32::fr_into_bytes;\n    use crate::hasher::pedersen::*;\n    use crate::vdf_post::{self, compute_root_commitment};\n    use crate::vdf_sloth;\n\n    #[test]\n    fn test_beacon_post_circuit_with_bls12_381() {\n        let params = &JubjubBls12::new();\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let lambda = 32;\n\n        let sp = beacon_post::SetupParams::<PedersenDomain, vdf_sloth::Sloth> {\n            vdf_post_setup_params: vdf_post::SetupParams::<PedersenDomain, vdf_sloth::Sloth> {\n                challenge_count: 4,\n                sector_size: 256 * lambda,\n                post_epochs: 3,\n                setup_params_vdf: vdf_sloth::SetupParams {\n                    key: rng.gen(),\n                    rounds: 1,\n                },\n                sectors_count: 2,\n            },\n            post_periods_count: 3,\n        };\n\n        let pub_params =\n            beacon_post::BeaconPoSt::<PedersenHasher, vdf_sloth::Sloth>::setup(&sp).unwrap();\n\n        let data0: Vec<u8> = (0..256)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n        let data1: Vec<u8> = (0..256)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n\n        let graph0 = BucketGraph::<PedersenHasher>::new(256, 5, 0, new_seed());\n        let tree0 = graph0.merkle_tree(data0.as_slice()).unwrap();\n        let graph1 = BucketGraph::<PedersenHasher>::new(256, 5, 0, new_seed());\n        let tree1 = graph1.merkle_tree(data1.as_slice()).unwrap();\n\n        let pub_inputs = beacon_post::PublicInputs {\n            commitments: vec![tree0.root(), tree1.root()],\n        };\n        let replicas = [&data0[..], &data1[..]];\n        let trees = [&tree0, &tree1];\n        let priv_inputs = beacon_post::PrivateInputs::new(&replicas[..], &trees[..]);\n\n        let proof = BeaconPoSt::prove(&pub_params, &pub_inputs, &priv_inputs).unwrap();\n\n        assert!(BeaconPoSt::verify(&pub_params, &pub_inputs, &proof).unwrap());\n\n        // actual circuit test\n\n        let vdf_ys_vec = proof\n            .proofs()\n            .iter()\n            .map(|proof| {\n                proof\n                    .ys\n                    .iter()\n                    .map(|y: &PedersenDomain| Some(y.clone().into()))\n                    .collect::<Vec<_>>()\n            })\n            .collect::<Vec<_>>();\n        let vdf_xs_vec = proof\n            .proofs()\n            .iter()\n            .map(|proof| {\n                proof\n                    .porep_proofs\n                    .iter()\n                    .take(vdf_ys_vec[0].len())\n                    .map(|p| Some(vdf_post::extract_vdf_input::<PedersenHasher>(p).into()))\n                    .collect()\n            })\n            .collect::<Vec<_>>();\n\n        let mut paths_vec_vec = Vec::new();\n        let mut challenged_leafs_vec_vec = Vec::new();\n        let mut commitments_vec_vec = Vec::new();\n        let mut challenges_vec_vec = Vec::new();\n        let mut challenged_sectors_vec_vec = Vec::new();\n        for p in proof.proofs() {\n            let mut paths_vec = Vec::new();\n            let mut challenged_leafs_vec = Vec::new();\n            let mut commitments_vec = Vec::new();\n\n            for porep_proof in &p.porep_proofs {\n                // -- paths\n                paths_vec.push(\n                    porep_proof\n                        .paths()\n                        .iter()\n                        .map(|p| {\n                            p.iter()\n                                .map(|v| Some((v.0.into(), v.1)))\n                                .collect::<Vec<_>>()\n                        })\n                        .collect::<Vec<_>>(),\n                );\n\n                // -- challenged leafs\n                challenged_leafs_vec.push(\n                    porep_proof\n                        .leafs()\n                        .iter()\n                        .map(|l| Some((**l).into()))\n                        .collect::<Vec<_>>(),\n                );\n\n                // -- commitments\n                commitments_vec.push(\n                    porep_proof\n                        .commitments()\n                        .iter()\n                        .map(|c| Some((**c).into()))\n                        .collect::<Vec<_>>(),\n                );\n            }\n\n            paths_vec_vec.push(paths_vec);\n            challenged_leafs_vec_vec.push(challenged_leafs_vec);\n            commitments_vec_vec.push(commitments_vec);\n            challenges_vec_vec.push(p.challenges.clone());\n            challenged_sectors_vec_vec.push(p.challenged_sectors.clone());\n        }\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n\n        let mut beacon = Beacon::default();\n\n        let instance = BeaconPoStCircuit::<Bls12, PedersenHasher, vdf_sloth::Sloth> {\n            params,\n            // beacon_randomness_vec,\n            // challenges_vec,\n            challenges_vec_vec,\n            challenged_sectors_vec_vec,\n            challenge_seed: Some(beacon.get::<PedersenDomain>(0).into()),\n            vdf_key: Some(pub_params.vdf_post_pub_params.pub_params_vdf.key.into()),\n            vdf_xs_vec,\n            vdf_ys_vec,\n            vdf_sloth_rounds: pub_params.vdf_post_pub_params.pub_params_vdf.rounds,\n            challenged_leafs_vec_vec,\n            paths_vec_vec,\n            root_commitment: Some(compute_root_commitment(&pub_inputs.commitments).into()),\n            commitments_vec_vec,\n            _h: PhantomData,\n            _v: PhantomData,\n        };\n\n        instance\n            .synthesize(&mut cs)\n            .expect(\"failed to synthesize circuit\");\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n        assert_eq!(cs.num_inputs(), 7, \"wrong number of inputs\");\n        assert_eq!(cs.num_constraints(), 132711, \"wrong number of constraints\");\n        assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/bench/mod.rs",
    "content": "use bellman::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable};\nuse pairing::{Engine, Field};\nuse std::cmp::Ordering;\n\n#[derive(Clone, Copy)]\nstruct OrderedVariable(Variable);\n\nimpl Eq for OrderedVariable {}\nimpl PartialEq for OrderedVariable {\n    fn eq(&self, other: &OrderedVariable) -> bool {\n        match (self.0.get_unchecked(), other.0.get_unchecked()) {\n            (Index::Input(ref a), Index::Input(ref b)) => a == b,\n            (Index::Aux(ref a), Index::Aux(ref b)) => a == b,\n            _ => false,\n        }\n    }\n}\nimpl PartialOrd for OrderedVariable {\n    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {\n        Some(self.cmp(other))\n    }\n}\nimpl Ord for OrderedVariable {\n    fn cmp(&self, other: &Self) -> Ordering {\n        match (self.0.get_unchecked(), other.0.get_unchecked()) {\n            (Index::Input(ref a), Index::Input(ref b)) => a.cmp(b),\n            (Index::Aux(ref a), Index::Aux(ref b)) => a.cmp(b),\n            (Index::Input(_), Index::Aux(_)) => Ordering::Less,\n            (Index::Aux(_), Index::Input(_)) => Ordering::Greater,\n        }\n    }\n}\n\nfn eval_lc<E: Engine>(terms: &[(Variable, E::Fr)], inputs: &[E::Fr], aux: &[E::Fr]) -> E::Fr {\n    let mut acc = E::Fr::zero();\n\n    for &(var, ref coeff) in terms {\n        let mut tmp = match var.get_unchecked() {\n            Index::Input(index) => inputs[index],\n            Index::Aux(index) => aux[index],\n        };\n\n        tmp.mul_assign(&coeff);\n        acc.add_assign(&tmp);\n    }\n\n    acc\n}\n\n#[derive(Debug)]\npub struct BenchCS<E: Engine> {\n    inputs: Vec<E::Fr>,\n    aux: Vec<E::Fr>,\n    a: Vec<E::Fr>,\n    b: Vec<E::Fr>,\n    c: Vec<E::Fr>,\n}\n\nimpl<E: Engine> BenchCS<E> {\n    pub fn new() -> Self {\n        BenchCS::default()\n    }\n\n    pub fn num_constraints(&self) -> usize {\n        self.a.len()\n    }\n}\n\nimpl<E: Engine> Default for BenchCS<E> {\n    fn default() -> Self {\n        BenchCS {\n            inputs: vec![E::Fr::one()],\n            aux: vec![],\n            a: vec![],\n            b: vec![],\n            c: vec![],\n        }\n    }\n}\n\nimpl<E: Engine> ConstraintSystem<E> for BenchCS<E> {\n    type Root = Self;\n\n    fn alloc<F, A, AR>(&mut self, _: A, f: F) -> Result<Variable, SynthesisError>\n    where\n        F: FnOnce() -> Result<E::Fr, SynthesisError>,\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n    {\n        self.aux.push(f()?);\n\n        Ok(Variable::new_unchecked(Index::Aux(self.aux.len() - 1)))\n    }\n\n    fn alloc_input<F, A, AR>(&mut self, _: A, f: F) -> Result<Variable, SynthesisError>\n    where\n        F: FnOnce() -> Result<E::Fr, SynthesisError>,\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n    {\n        self.inputs.push(f()?);\n\n        Ok(Variable::new_unchecked(Index::Input(self.inputs.len() - 1)))\n    }\n\n    fn enforce<A, AR, LA, LB, LC>(&mut self, _: A, a: LA, b: LB, c: LC)\n    where\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n        LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n        LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n        LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n    {\n        self.a.push(eval_lc::<E>(\n            a(LinearCombination::zero()).as_ref(),\n            &self.inputs,\n            &self.aux,\n        ));\n        self.b.push(eval_lc::<E>(\n            b(LinearCombination::zero()).as_ref(),\n            &self.inputs,\n            &self.aux,\n        ));\n        self.c.push(eval_lc::<E>(\n            c(LinearCombination::zero()).as_ref(),\n            &self.inputs,\n            &self.aux,\n        ));\n    }\n\n    fn push_namespace<NR, N>(&mut self, _: N)\n    where\n        NR: Into<String>,\n        N: FnOnce() -> NR,\n    {\n\n    }\n\n    fn pop_namespace(&mut self) {}\n\n    fn get_root(&mut self) -> &mut Self::Root {\n        self\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/constraint.rs",
    "content": "use bellman::ConstraintSystem;\nuse pairing::Engine;\nuse sapling_crypto::circuit::num;\n\n/// Adds a constraint to CS, enforcing an equality relationship between the allocated numbers a and b.\n///\n/// a == b\npub fn equal<E: Engine, A, AR, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    annotation: A,\n    a: &num::AllocatedNum<E>,\n    b: &num::AllocatedNum<E>,\n) where\n    A: FnOnce() -> AR,\n    AR: Into<String>,\n{\n    // a * 1 = b\n    cs.enforce(\n        annotation,\n        |lc| lc + a.get_variable(),\n        |lc| lc + CS::one(),\n        |lc| lc + b.get_variable(),\n    );\n}\n\n/// Adds a constraint to CS, enforcing a difference relationship between the allocated numbers a, b, and difference.\n///\n/// a - b = difference\npub fn difference<E: Engine, A, AR, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    annotation: A,\n    a: &num::AllocatedNum<E>,\n    b: &num::AllocatedNum<E>,\n    difference: &num::AllocatedNum<E>,\n) where\n    A: FnOnce() -> AR,\n    AR: Into<String>,\n{\n    //    difference = a-b\n    // => difference + b = a\n    // => (difference + b) * 1 = a\n    cs.enforce(\n        annotation,\n        |lc| lc + difference.get_variable() + b.get_variable(),\n        |lc| lc + CS::one(),\n        |lc| lc + a.get_variable(),\n    );\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/drgporep.rs",
    "content": "use bellman::{Circuit, ConstraintSystem, SynthesisError};\nuse pairing::bls12_381::{Bls12, Fr};\nuse pairing::PrimeField;\nuse sapling_crypto::circuit::boolean::{self, Boolean};\nuse sapling_crypto::circuit::{multipack, num};\nuse sapling_crypto::jubjub::JubjubEngine;\n\nuse crate::circuit::constraint;\nuse crate::circuit::kdf::kdf;\nuse crate::circuit::sloth;\nuse crate::circuit::variables::Root;\nuse crate::compound_proof::{CircuitComponent, CompoundProof};\nuse crate::drgporep::DrgPoRep;\nuse crate::drgraph::Graph;\nuse crate::fr32::fr_into_bytes;\nuse crate::merklepor;\nuse crate::parameter_cache::{CacheableParameters, ParameterSetIdentifier};\nuse crate::proof::ProofScheme;\nuse crate::util::{bytes_into_bits, bytes_into_boolean_vec};\nuse std::marker::PhantomData;\n\n/// DRG based Proof of Replication.\n///\n/// # Fields\n///\n/// * `params` - parameters for the curve\n/// * `sloth_iter` - How many rounds sloth should run for.\n///\n/// ----> Private `replica_node` - The replica node being proven.\n///\n/// * `replica_node` - The replica node being proven.\n/// * `replica_node_path` - The path of the replica node being proven.\n/// * `replica_root` - The merkle root of the replica.\n///\n/// * `replica_parents` - A list of all parents in the replica, with their value.\n/// * `replica_parents_paths` - A list of all parents paths in the replica.\n///\n/// ----> Private `data_node` - The data node being proven.\n///\n/// * `data_node_path` - The path of the data node being proven.\n/// * `data_root` - The merkle root of the data.\n/// * `replica_id` - The id of the replica.\n/// * `degree` - The degree of the graph.\n///\n//implement_drgporep!(\n//    DrgPoRepCircuit,\n//    DrgPoRepCompound,\n//    \"drg-proof-of-replication\",\n//    false\n//);\nuse crate::circuit::por::{PoRCircuit, PoRCompound};\nuse crate::hasher::{Domain, Hasher};\n\npub struct DrgPoRepCircuit<'a, E: JubjubEngine> {\n    params: &'a E::Params,\n    sloth_iter: usize,\n    replica_nodes: Vec<Option<E::Fr>>,\n    replica_nodes_paths: Vec<Vec<Option<(E::Fr, bool)>>>,\n    replica_root: Root<E>,\n    replica_parents: Vec<Vec<Option<E::Fr>>>,\n    replica_parents_paths: Vec<Vec<Vec<Option<(E::Fr, bool)>>>>,\n    data_nodes: Vec<Option<E::Fr>>,\n    data_nodes_paths: Vec<Vec<Option<(E::Fr, bool)>>>,\n    data_root: Root<E>,\n    replica_id: Option<E::Fr>,\n    degree: usize,\n    private: bool,\n}\n\nimpl<'a, E: JubjubEngine> DrgPoRepCircuit<'a, E> {\n    pub fn synthesize<CS>(\n        mut cs: CS,\n        params: &E::Params,\n        sloth_iter: usize,\n        replica_nodes: Vec<Option<E::Fr>>,\n        replica_nodes_paths: Vec<Vec<Option<(E::Fr, bool)>>>,\n        replica_root: Root<E>,\n        replica_parents: Vec<Vec<Option<E::Fr>>>,\n        replica_parents_paths: Vec<Vec<Vec<Option<(E::Fr, bool)>>>>,\n        data_nodes: Vec<Option<E::Fr>>,\n        data_nodes_paths: Vec<Vec<Option<(E::Fr, bool)>>>,\n        data_root: Root<E>,\n        replica_id: Option<E::Fr>,\n        degree: usize,\n        private: bool,\n    ) -> Result<(), SynthesisError>\n    where\n        E: JubjubEngine,\n        CS: ConstraintSystem<E>,\n    {\n        DrgPoRepCircuit {\n            params,\n            sloth_iter,\n            replica_nodes,\n            replica_nodes_paths,\n            replica_root,\n            replica_parents,\n            replica_parents_paths,\n            data_nodes,\n            data_nodes_paths,\n            data_root,\n            replica_id,\n            degree,\n            private,\n        }\n        .synthesize(&mut cs)\n    }\n}\n\n#[derive(Clone)]\npub struct ComponentPrivateInputs<E: JubjubEngine> {\n    pub comm_r: Option<Root<E>>,\n    pub comm_d: Option<Root<E>>,\n}\n\nimpl<E: JubjubEngine> Default for ComponentPrivateInputs<E> {\n    fn default() -> ComponentPrivateInputs<E> {\n        ComponentPrivateInputs {\n            comm_r: None,\n            comm_d: None,\n        }\n    }\n}\n\nimpl<'a, E: JubjubEngine> CircuitComponent for DrgPoRepCircuit<'a, E> {\n    type ComponentPrivateInputs = ComponentPrivateInputs<E>;\n}\n\npub struct DrgPoRepCompound<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H>,\n{\n    // Sad phantom is sad\n    _h: PhantomData<H>,\n    _g: PhantomData<G>,\n}\n\nimpl<E: JubjubEngine, C: Circuit<E>, H: Hasher, G: Graph<H>, P: ParameterSetIdentifier>\n    CacheableParameters<E, C, P> for DrgPoRepCompound<H, G>\n{\n    fn cache_prefix() -> String {\n        String::from(\"drg-proof-of-replication\")\n    }\n}\n\nimpl<'a, H, G> CompoundProof<'a, Bls12, DrgPoRep<'a, H, G>, DrgPoRepCircuit<'a, Bls12>>\n    for DrgPoRepCompound<H, G>\nwhere\n    H: 'a + Hasher,\n    G: 'a + Graph<H> + ParameterSetIdentifier + Sync + Send,\n{\n    fn generate_public_inputs(\n        pub_in: &<DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicInputs,\n        pub_params: &<DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicParams,\n        // We can ignore k because challenges are generated by caller and included\n        // in PublicInputs.\n        _k: Option<usize>,\n    ) -> Vec<Fr> {\n        let replica_id = pub_in.replica_id;\n        let challenges = &pub_in.challenges;\n        let (comm_r, comm_d) = match pub_in.tau {\n            None => (None, None),\n            Some(tau) => (Some(tau.comm_r), Some(tau.comm_d)),\n        };\n\n        let leaves = pub_params.graph.size();\n\n        let replica_id_bits = bytes_into_bits(&replica_id.into_bytes());\n\n        let packed_replica_id =\n            multipack::compute_multipacking::<Bls12>(&replica_id_bits[0..Fr::CAPACITY as usize]);\n\n        let por_pub_params = merklepor::PublicParams {\n            leaves,\n            private: comm_d.is_none(),\n        };\n\n        let mut input = Vec::new();\n        input.extend(packed_replica_id.clone());\n\n        for challenge in challenges {\n            let mut por_nodes = vec![*challenge];\n            let parents = pub_params.graph.parents(*challenge);\n            por_nodes.extend(parents);\n\n            for node in por_nodes {\n                let por_pub_inputs = merklepor::PublicInputs {\n                    commitment: comm_r,\n                    challenge: node,\n                };\n                let por_inputs = PoRCompound::<H>::generate_public_inputs(\n                    &por_pub_inputs,\n                    &por_pub_params,\n                    None,\n                );\n\n                input.extend(por_inputs);\n            }\n\n            let por_pub_inputs = merklepor::PublicInputs {\n                commitment: comm_d,\n                challenge: *challenge,\n            };\n\n            let por_inputs =\n                PoRCompound::<H>::generate_public_inputs(&por_pub_inputs, &por_pub_params, None);\n            input.extend(por_inputs);\n        }\n        input\n    }\n\n    fn circuit<'b>(\n        public_inputs: &'b <DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicInputs,\n        component_private_inputs: <DrgPoRepCircuit<'a, Bls12> as CircuitComponent>::ComponentPrivateInputs,\n        proof: &'b <DrgPoRep<'a, H, G> as ProofScheme<'a>>::Proof,\n        public_params: &'b <DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicParams,\n        engine_params: &'a <Bls12 as JubjubEngine>::Params,\n    ) -> DrgPoRepCircuit<'a, Bls12> {\n        let replica_nodes = proof\n            .replica_nodes\n            .iter()\n            .map(|node| Some(node.data.into()))\n            .collect();\n\n        let replica_nodes_paths = proof\n            .replica_nodes\n            .iter()\n            .map(|node| node.proof.as_options())\n            .collect();\n\n        let private_data_root = component_private_inputs.comm_d;\n        let private_replica_root = component_private_inputs.comm_r;\n        let replica_root =\n            private_replica_root.unwrap_or_else(|| Root::Val(Some(proof.replica_root.into())));\n        let data_root =\n            private_data_root.unwrap_or_else(|| Root::Val(Some((proof.data_root).into())));\n        let replica_id = Some(public_inputs.replica_id);\n\n        let replica_parents = proof\n            .replica_parents\n            .iter()\n            .map(|parents| {\n                parents\n                    .iter()\n                    .map(|(_, parent)| Some(parent.data.into()))\n                    .collect()\n            })\n            .collect();\n\n        let replica_parents_paths: Vec<Vec<_>> = proof\n            .replica_parents\n            .iter()\n            .map(|parents| {\n                let p: Vec<_> = parents\n                    .iter()\n                    .map(|(_, parent)| parent.proof.as_options())\n                    .collect();\n                p\n            })\n            .collect();\n\n        let data_nodes = proof\n            .nodes\n            .iter()\n            .map(|node| Some(node.data.into()))\n            .collect();\n\n        let data_nodes_paths = proof\n            .nodes\n            .iter()\n            .map(|node| node.proof.as_options())\n            .collect();\n\n        DrgPoRepCircuit {\n            params: engine_params,\n            sloth_iter: public_params.sloth_iter,\n            replica_nodes,\n            replica_nodes_paths,\n            replica_root,\n            replica_parents,\n            replica_parents_paths,\n            data_nodes,\n            data_nodes_paths,\n            data_root,\n            replica_id: replica_id.map(|f| f.into()),\n            degree: public_params.graph.degree(),\n            private: public_inputs.tau.is_none(),\n        }\n    }\n}\n\n///\n/// # Public Inputs\n///\n/// * [0] replica_id/0\n/// * [1] replica_id/1\n/// * [2] replica auth_path_bits\n/// * [3] replica commitment (root hash)\n/// * for i in 0..replica_parents.len()\n///   * [ ] replica parent auth_path_bits\n///   * [ ] replica parent commitment (root hash) // Same for all.\n/// * [r + 1] data auth_path_bits\n/// * [r + 2] data commitment (root hash)\n///\n///  Total = 6 + (2 * replica_parents.len())\n/// # Private Inputs\n///\n/// * [ ] replica value/0\n/// * for i in 0..replica_parents.len()\n///  * [ ] replica parent value/0\n/// * [ ] data value/\n///\n/// Total = 2 + replica_parents.len()\n///\nimpl<'a, E: JubjubEngine> Circuit<E> for DrgPoRepCircuit<'a, E> {\n    fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError>\n    where\n        E: JubjubEngine,\n    {\n        let params = self.params;\n\n        let replica_id = self.replica_id;\n        let replica_root = self.replica_root;\n        let data_root = self.data_root;\n\n        let degree = self.degree;\n\n        let raw_bytes; // Need let here so borrow in match lives long enough.\n        let replica_id_bytes = match replica_id {\n            Some(replica_id) => {\n                raw_bytes = fr_into_bytes::<E>(&replica_id);\n                Some(raw_bytes.as_slice())\n            }\n            // Used in parameter generation or when circuit is created only for\n            // structure and input count.\n            None => None,\n        };\n\n        // get the replica_id in bits\n        let replica_id_bits =\n            bytes_into_boolean_vec(cs.namespace(|| \"replica_id_bits\"), replica_id_bytes, 32)?;\n\n        multipack::pack_into_inputs(\n            cs.namespace(|| \"replica_id\"),\n            &replica_id_bits[0..Fr::CAPACITY as usize],\n        )?;\n\n        let replica_root_num = replica_root.allocated(cs.namespace(|| \"replica_root\"))?;\n        let replica_root_var = Root::Var(replica_root_num);\n\n        let data_root_num = data_root.allocated(cs.namespace(|| \"data_root\"))?;\n        let data_root_var = Root::Var(data_root_num);\n\n        for i in 0..self.data_nodes.len() {\n            let mut cs = cs.namespace(|| format!(\"challenge_{}\", i));\n            // ensure that all inputs are well formed\n            let replica_node_path = &self.replica_nodes_paths[i];\n            let replica_parents_paths = &self.replica_parents_paths[i];\n            let data_node_path = &self.data_nodes_paths[i];\n\n            let replica_node = &self.replica_nodes[i];\n            let replica_parents = &self.replica_parents[i];\n            let data_node = &self.data_nodes[i];\n\n            assert_eq!(data_node_path.len(), replica_node_path.len());\n\n            // Inclusion checks\n            {\n                let mut cs = cs.namespace(|| \"inclusion_checks\");\n\n                PoRCircuit::synthesize(\n                    cs.namespace(|| \"replica_inclusion\"),\n                    &params,\n                    *replica_node,\n                    replica_node_path.clone(),\n                    replica_root_var.clone(),\n                    self.private,\n                )?;\n\n                // validate each replica_parents merkle proof\n                for i in 0..replica_parents.len() {\n                    PoRCircuit::synthesize(\n                        cs.namespace(|| format!(\"parents_inclusion_{}\", i)),\n                        &params,\n                        replica_parents[i],\n                        replica_parents_paths[i].clone(),\n                        replica_root_var.clone(),\n                        self.private,\n                    )?;\n                }\n\n                // validate data node commitment\n                PoRCircuit::synthesize(\n                    cs.namespace(|| \"data_inclusion\"),\n                    &params,\n                    *data_node,\n                    data_node_path.clone(),\n                    data_root_var.clone(),\n                    self.private,\n                )?;\n            }\n\n            // Encoding checks\n            {\n                let mut cs = cs.namespace(|| \"encoding_checks\");\n                // get the parents into bits\n                let parents_bits: Vec<Vec<Boolean>> = {\n                    replica_parents\n                        .iter()\n                        .enumerate()\n                        .map(|(i, val)| -> Result<Vec<Boolean>, SynthesisError> {\n                            let mut v = boolean::field_into_boolean_vec_le(\n                                cs.namespace(|| format!(\"parents_{}_bits\", i)),\n                                *val,\n                            )?;\n                            // sad padding is sad\n                            while v.len() < 256 {\n                                v.push(boolean::Boolean::Constant(false));\n                            }\n                            Ok(v)\n                        })\n                        .collect::<Result<Vec<Vec<Boolean>>, SynthesisError>>()?\n                };\n\n                // generate the encryption key\n                let key = kdf(\n                    cs.namespace(|| \"kdf\"),\n                    replica_id_bits.clone(),\n                    parents_bits,\n                    degree,\n                )?;\n\n                let decoded = sloth::decode(\n                    cs.namespace(|| \"sloth_decode\"),\n                    &key,\n                    *replica_node,\n                    self.sloth_iter,\n                )?;\n\n                // TODO this should not be here, instead, this should be the leaf Fr in the data_auth_path\n                // TODO also note that we need to change/makesurethat the leaves are the data, instead of hashes of the data\n                let expected = num::AllocatedNum::alloc(cs.namespace(|| \"data node\"), || {\n                    data_node.ok_or_else(|| SynthesisError::AssignmentMissing)\n                })?;\n\n                // ensure the encrypted data and data_node match\n                constraint::equal(&mut cs, || \"equality\", &expected, &decoded);\n            }\n        }\n        // profit!\n        Ok(())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::circuit::test::*;\n    use crate::compound_proof;\n    use crate::drgporep;\n    use crate::drgraph::{graph_height, new_seed, BucketGraph};\n    use crate::fr32::{bytes_into_fr, fr_into_bytes};\n    use crate::hasher::pedersen::*;\n    use crate::porep::PoRep;\n    use crate::proof::ProofScheme;\n    use crate::util::data_at_node;\n    use pairing::Field;\n    use rand::Rand;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n    use sapling_crypto::jubjub::JubjubBls12;\n\n    #[test]\n    fn drgporep_input_circuit_with_bls12_381() {\n        let params = &JubjubBls12::new();\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let nodes = 12;\n        let degree = 6;\n        let challenge = 2;\n        let sloth_iter = 1;\n\n        let replica_id: Fr = rng.gen();\n\n        let mut data: Vec<u8> = (0..nodes)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&Fr::rand(rng)))\n            .collect();\n\n        // TODO: don't clone everything\n        let original_data = data.clone();\n        let data_node: Option<Fr> = Some(\n            bytes_into_fr::<Bls12>(\n                data_at_node(&original_data, challenge).expect(\"failed to read original data\"),\n            )\n            .unwrap(),\n        );\n\n        let sp = drgporep::SetupParams {\n            drg: drgporep::DrgParams {\n                nodes,\n                degree,\n                expansion_degree: 0,\n                seed: new_seed(),\n            },\n            sloth_iter,\n        };\n\n        let pp = drgporep::DrgPoRep::<PedersenHasher, BucketGraph<_>>::setup(&sp)\n            .expect(\"failed to create drgporep setup\");\n        let (tau, aux) = drgporep::DrgPoRep::<PedersenHasher, _>::replicate(\n            &pp,\n            &replica_id.into(),\n            data.as_mut_slice(),\n            None,\n        )\n        .expect(\"failed to replicate\");\n\n        let pub_inputs = drgporep::PublicInputs {\n            replica_id: replica_id.into(),\n            challenges: vec![challenge],\n            tau: Some(tau.into()),\n        };\n        let priv_inputs = drgporep::PrivateInputs::<PedersenHasher> { aux: &aux };\n\n        let proof_nc =\n            drgporep::DrgPoRep::<PedersenHasher, _>::prove(&pp, &pub_inputs, &priv_inputs)\n                .expect(\"failed to prove\");\n\n        assert!(\n            drgporep::DrgPoRep::<PedersenHasher, _>::verify(&pp, &pub_inputs, &proof_nc)\n                .expect(\"failed to verify\"),\n            \"failed to verify (non circuit)\"\n        );\n\n        let replica_node: Option<Fr> = Some(proof_nc.replica_nodes[0].data.into());\n\n        let replica_node_path = proof_nc.replica_nodes[0].proof.as_options();\n        let replica_root = Root::Val(Some((proof_nc.replica_root).into()));\n        let replica_parents = proof_nc.replica_parents[0]\n            .iter()\n            .map(|(_, parent)| Some(parent.data.into()))\n            .collect();\n        let replica_parents_paths: Vec<_> = proof_nc.replica_parents[0]\n            .iter()\n            .map(|(_, parent)| parent.proof.as_options())\n            .collect();\n\n        let data_node_path = proof_nc.nodes[0].proof.as_options();\n        let data_root = Root::Val(Some((proof_nc.data_root).into()));\n        let replica_id = Some(replica_id);\n\n        assert!(\n            proof_nc.nodes[0].proof.validate(challenge),\n            \"failed to verify data commitment\"\n        );\n        assert!(\n            proof_nc.nodes[0]\n                .proof\n                .validate_data(&fr_into_bytes::<Bls12>(&data_node.unwrap())),\n            \"failed to verify data commitment with data\"\n        );\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        DrgPoRepCircuit::synthesize(\n            cs.namespace(|| \"drgporep\"),\n            params,\n            sloth_iter,\n            vec![replica_node],\n            vec![replica_node_path],\n            replica_root,\n            vec![replica_parents],\n            vec![replica_parents_paths],\n            vec![data_node],\n            vec![data_node_path],\n            data_root,\n            replica_id,\n            degree,\n            false,\n        )\n        .expect(\"failed to synthesize circuit\");\n\n        if !cs.is_satisfied() {\n            println!(\n                \"failed to satisfy: {:?}\",\n                cs.which_is_unsatisfied().unwrap()\n            );\n        }\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n        assert_eq!(cs.num_inputs(), 18, \"wrong number of inputs\");\n        assert_eq!(cs.num_constraints(), 131216, \"wrong number of constraints\");\n\n        assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n\n        assert_eq!(\n            cs.get_input(1, \"drgporep/replica_id/input 0\"),\n            replica_id.unwrap()\n        );\n    }\n\n    #[test]\n    fn drgporep_input_circuit_num_constraints() {\n        let params = &JubjubBls12::new();\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        // 1 GB\n        let n = (1 << 30) / 32;\n        let m = 6;\n        let tree_depth = graph_height(n);\n        let sloth_iter = 1;\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        DrgPoRepCircuit::synthesize(\n            cs.namespace(|| \"drgporep\"),\n            params,\n            sloth_iter,\n            vec![Some(Fr::rand(rng)); 1],\n            vec![vec![Some((Fr::rand(rng), false)); tree_depth]; 1],\n            Root::Val(Some(Fr::rand(rng))),\n            vec![vec![Some(Fr::rand(rng)); m]; 1],\n            vec![vec![vec![Some((Fr::rand(rng), false)); tree_depth]; m]; 1],\n            vec![Some(Fr::rand(rng)); 1],\n            vec![vec![Some((Fr::rand(rng), false)); tree_depth]; 1],\n            Root::Val(Some(Fr::rand(rng))),\n            Some(Fr::rand(rng)),\n            m,\n            false,\n        )\n        .expect(\"failed to synthesize circuit\");\n\n        assert_eq!(cs.num_inputs(), 18, \"wrong number of inputs\");\n        assert_eq!(cs.num_constraints(), 363392, \"wrong number of constraints\");\n    }\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn drgporep_test_compound() {\n        let params = &JubjubBls12::new();\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let nodes = 5;\n        let degree = 2;\n        let challenges = vec![1, 3];\n        let sloth_iter = 1;\n\n        let replica_id: Fr = rng.gen();\n        let mut data: Vec<u8> = (0..nodes)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&Fr::rand(rng)))\n            .collect();\n\n        // Only generate seed once. It would be bad if we used different seeds in the same test.\n        let seed = new_seed();\n\n        let setup_params = compound_proof::SetupParams {\n            vanilla_params: &drgporep::SetupParams {\n                drg: drgporep::DrgParams {\n                    nodes,\n                    degree,\n                    expansion_degree: 0,\n                    seed,\n                },\n                sloth_iter,\n            },\n            engine_params: params,\n            partitions: None,\n        };\n\n        let public_params =\n            DrgPoRepCompound::<PedersenHasher, BucketGraph<_>>::setup(&setup_params)\n                .expect(\"setup failed\");\n\n        let (tau, aux) = drgporep::DrgPoRep::<PedersenHasher, _>::replicate(\n            &public_params.vanilla_params,\n            &replica_id.into(),\n            data.as_mut_slice(),\n            None,\n        )\n        .expect(\"failed to replicate\");\n\n        let public_inputs = drgporep::PublicInputs::<PedersenDomain> {\n            replica_id: replica_id.into(),\n            challenges,\n            tau: Some(tau),\n        };\n        let private_inputs = drgporep::PrivateInputs { aux: &aux };\n\n        // This duplication is necessary so public_params don't outlive public_inputs and private_inputs.\n        let setup_params = compound_proof::SetupParams {\n            vanilla_params: &drgporep::SetupParams {\n                drg: drgporep::DrgParams {\n                    nodes,\n                    degree,\n                    expansion_degree: 0,\n                    seed,\n                },\n                sloth_iter,\n            },\n            engine_params: params,\n            partitions: None,\n        };\n\n        let public_params =\n            DrgPoRepCompound::<PedersenHasher, BucketGraph<_>>::setup(&setup_params)\n                .expect(\"setup failed\");\n\n        let proof = DrgPoRepCompound::<PedersenHasher, _>::prove(\n            &public_params,\n            &public_inputs,\n            &private_inputs,\n            None,\n        )\n        .expect(\"failed while proving\");\n\n        let (circuit, inputs) = DrgPoRepCompound::<PedersenHasher, _>::circuit_for_test(\n            &public_params,\n            &public_inputs,\n            &private_inputs,\n        );\n\n        let mut cs = TestConstraintSystem::new();\n\n        let _ = circuit.synthesize(&mut cs);\n        assert!(cs.is_satisfied());\n        assert!(cs.verify(&inputs));\n\n        let verified =\n            DrgPoRepCompound::<PedersenHasher, _>::verify(&public_params, &public_inputs, &proof)\n                .expect(\"failed while verifying\");\n\n        assert!(verified);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/kdf.rs",
    "content": "use bellman::{ConstraintSystem, SynthesisError};\nuse sapling_crypto::circuit::blake2s::blake2s as blake2s_circuit;\nuse sapling_crypto::circuit::boolean::Boolean;\nuse sapling_crypto::circuit::{multipack, num};\nuse sapling_crypto::jubjub::JubjubEngine;\n\n/// Key derivation function, using pedersen hashes as the underlying primitive.\npub fn kdf<E, CS>(\n    mut cs: CS,\n    id: Vec<Boolean>,\n    parents: Vec<Vec<Boolean>>,\n    m: usize,\n) -> Result<num::AllocatedNum<E>, SynthesisError>\nwhere\n    E: JubjubEngine,\n    CS: ConstraintSystem<E>,\n{\n    // ciphertexts will become a buffer of the layout\n    // id | encodedParentNode1 | encodedParentNode1 | ...\n    let ciphertexts = parents.into_iter().fold(id, |mut acc, parent| {\n        acc.extend(parent);\n        acc\n    });\n\n    assert_eq!(ciphertexts.len(), 8 * 32 * (1 + m), \"invalid input length\");\n\n    let personalization = vec![0u8; 8];\n    let alloc_bits = blake2s_circuit(cs.namespace(|| \"hash\"), &ciphertexts[..], &personalization)?;\n    let fr = match alloc_bits[0].get_value() {\n        Some(_) => {\n            let bits = alloc_bits\n                .iter()\n                .map(|v| v.get_value().unwrap())\n                .collect::<Vec<bool>>();\n            // TODO: figure out if we can avoid this\n            let frs = multipack::compute_multipacking::<E>(&bits);\n            Ok(frs[0])\n        }\n        None => Err(SynthesisError::AssignmentMissing),\n    };\n\n    num::AllocatedNum::<E>::alloc(cs.namespace(|| \"num\"), || fr)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::kdf;\n    use crate::circuit::test::TestConstraintSystem;\n    use crate::crypto;\n    use crate::fr32::fr_into_bytes;\n    use crate::util::bytes_into_boolean_vec;\n    use bellman::ConstraintSystem;\n    use pairing::bls12_381::Bls12;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n    use sapling_crypto::circuit::boolean::Boolean;\n\n    #[test]\n    fn kdf_circuit() {\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let m = 20;\n\n        let id: Vec<u8> = fr_into_bytes::<Bls12>(&rng.gen());\n        let parents: Vec<Vec<u8>> = (0..m).map(|_| fr_into_bytes::<Bls12>(&rng.gen())).collect();\n\n        let id_bits: Vec<Boolean> = {\n            let mut cs = cs.namespace(|| \"id\");\n            bytes_into_boolean_vec(&mut cs, Some(id.as_slice()), id.len()).unwrap()\n        };\n        let parents_bits: Vec<Vec<Boolean>> = parents\n            .clone()\n            .iter()\n            .enumerate()\n            .map(|(i, p)| {\n                let mut cs = cs.namespace(|| format!(\"parents {}\", i));\n                bytes_into_boolean_vec(&mut cs, Some(p.as_slice()), p.len()).unwrap()\n            })\n            .collect();\n        let out = kdf(\n            cs.namespace(|| \"kdf\"),\n            id_bits.clone(),\n            parents_bits.clone(),\n            m,\n        )\n        .unwrap();\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n        assert_eq!(cs.num_constraints(), 240282);\n\n        let input_bytes = parents.iter().fold(id, |mut acc, parent| {\n            acc.extend(parent);\n            acc\n        });\n\n        let expected = crypto::kdf::kdf::<Bls12>(input_bytes.as_slice(), m);\n\n        assert_eq!(\n            expected,\n            out.get_value().unwrap(),\n            \"circuit and non circuit do not match\"\n        );\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/mod.rs",
    "content": "mod constraint;\n\npub mod por;\n\npub mod beacon_post;\npub mod drgporep;\npub mod kdf;\npub mod multi_proof;\npub mod pedersen;\npub mod porc;\npub mod ppor;\npub mod sloth;\npub mod variables;\npub mod vdf_post;\npub mod xor;\npub mod zigzag;\n\n// FIXME: Can we make a config like for test?\npub mod bench;\n\npub mod test;\n"
  },
  {
    "path": "storage-proofs/src/circuit/multi_proof.rs",
    "content": "use bellman::groth16;\n\nuse crate::error::Result;\nuse pairing::Engine;\nuse std::io::{self, Read, Write};\n\npub struct MultiProof<E: Engine> {\n    pub circuit_proofs: Vec<groth16::Proof<E>>,\n    pub groth_params: groth16::Parameters<E>,\n}\n\nimpl<E: Engine> MultiProof<E> {\n    pub fn new(\n        groth_proofs: Vec<groth16::Proof<E>>,\n        groth_params: groth16::Parameters<E>,\n    ) -> MultiProof<E> {\n        MultiProof {\n            circuit_proofs: groth_proofs,\n            groth_params,\n        }\n    }\n\n    pub fn new_from_reader<R: Read>(\n        partitions: Option<usize>,\n        mut reader: R,\n        groth_params: groth16::Parameters<E>,\n    ) -> Result<MultiProof<E>> {\n        let num_proofs = match partitions {\n            Some(n) => n,\n            None => 1,\n        };\n        let proofs = (0..num_proofs)\n            .map(|_| groth16::Proof::read(&mut reader))\n            .collect::<io::Result<Vec<_>>>()?;\n\n        Ok(Self::new(proofs, groth_params))\n    }\n\n    pub fn write<W: Write>(&self, mut writer: W) -> Result<()> {\n        for proof in &self.circuit_proofs {\n            proof.write(&mut writer)?\n        }\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/pedersen.rs",
    "content": "use bellman::{ConstraintSystem, SynthesisError};\nuse sapling_crypto::circuit::boolean::Boolean;\nuse sapling_crypto::circuit::{num, pedersen_hash};\nuse sapling_crypto::jubjub::JubjubEngine;\n\nuse crate::crypto::pedersen::PEDERSEN_BLOCK_SIZE;\n\n/// Pedersen hashing for inputs with length multiple of the block size. Based on a Merkle-Damgard construction.\npub fn pedersen_md_no_padding<E, CS>(\n    mut cs: CS,\n    params: &E::Params,\n    data: &[Boolean],\n) -> Result<num::AllocatedNum<E>, SynthesisError>\nwhere\n    E: JubjubEngine,\n    CS: ConstraintSystem<E>,\n{\n    assert!(\n        data.len() >= 2 * PEDERSEN_BLOCK_SIZE,\n        \"must be at least 2 block sizes long\"\n    );\n\n    assert_eq!(\n        data.len() % PEDERSEN_BLOCK_SIZE,\n        0,\n        \"data must be a multiple of the block size\"\n    );\n\n    let mut chunks = data.chunks(PEDERSEN_BLOCK_SIZE);\n    let mut cur: Vec<Boolean> = chunks.nth(0).unwrap().to_vec();\n    let chunks_len = chunks.len();\n\n    for (i, block) in chunks.enumerate() {\n        let mut cs = cs.namespace(|| format!(\"block {}\", i));\n        for b in block {\n            // TODO: no cloning\n            cur.push(b.clone());\n        }\n        if i == chunks_len - 1 {\n            // last round, skip\n        } else {\n            cur = pedersen_compression(cs.namespace(|| \"hash\"), params, &cur)?;\n        }\n    }\n\n    // hash and return a num at the end\n    pedersen_compression_num(cs.namespace(|| \"last hash\"), params, &cur)\n}\n\npub fn pedersen_compression_num<E: JubjubEngine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    params: &E::Params,\n    bits: &[Boolean],\n) -> Result<num::AllocatedNum<E>, SynthesisError> {\n    Ok(pedersen_hash::pedersen_hash(\n        cs.namespace(|| \"inner hash\"),\n        pedersen_hash::Personalization::NoteCommitment,\n        &bits,\n        params,\n    )?\n    .get_x()\n    .clone())\n}\n\npub fn pedersen_compression<E: JubjubEngine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    params: &E::Params,\n    bits: &[Boolean],\n) -> Result<Vec<Boolean>, SynthesisError> {\n    let h = pedersen_compression_num(cs.namespace(|| \"compression\"), params, bits)?;\n    let mut out = h.into_bits_le(cs.namespace(|| \"h into bits\"))?;\n\n    // needs padding, because x does not always translate to exactly 256 bits\n    while out.len() < PEDERSEN_BLOCK_SIZE {\n        out.push(Boolean::Constant(false));\n    }\n\n    Ok(out)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::pedersen_md_no_padding;\n    use crate::circuit::test::TestConstraintSystem;\n    use crate::crypto;\n    use crate::util::bytes_into_boolean_vec;\n    use bellman::ConstraintSystem;\n    use pairing::bls12_381::Bls12;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n    use sapling_crypto::circuit::boolean::Boolean;\n    use sapling_crypto::jubjub::JubjubBls12;\n\n    #[test]\n    fn test_pedersen_input_circut() {\n        let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        for i in 2..6 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n            let data: Vec<u8> = (0..i * 32).map(|_| rng.gen()).collect();\n            let params = &JubjubBls12::new();\n\n            let data_bits: Vec<Boolean> = {\n                let mut cs = cs.namespace(|| \"data\");\n                bytes_into_boolean_vec(&mut cs, Some(data.as_slice()), data.len()).unwrap()\n            };\n            let out =\n                pedersen_md_no_padding(cs.namespace(|| \"pedersen\"), params, &data_bits).unwrap();\n\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n            let expected = crypto::pedersen::pedersen_md_no_padding(data.as_slice());\n\n            assert_eq!(\n                expected,\n                out.get_value().unwrap(),\n                \"circuit and non circuit do not match\"\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/por.rs",
    "content": "use bellman::{Circuit, ConstraintSystem, SynthesisError};\nuse pairing::bls12_381::{Bls12, Fr};\nuse sapling_crypto::circuit::{boolean, multipack, num, pedersen_hash};\nuse sapling_crypto::jubjub::{JubjubBls12, JubjubEngine};\n\nuse crate::circuit::constraint;\nuse crate::circuit::variables::Root;\nuse crate::compound_proof::{CircuitComponent, CompoundProof};\nuse crate::drgraph::graph_height;\nuse crate::merklepor::MerklePoR;\nuse crate::parameter_cache::{CacheableParameters, ParameterSetIdentifier};\nuse crate::proof::ProofScheme;\n\n/// Proof of retrievability.\n///\n/// # Fields\n///\n/// * `params` - The params for the bls curve.\n/// * `value` - The value of the leaf.\n/// * `auth_path` - The authentication path of the leaf in the tree.\n/// * `root` - The merkle root of the tree.\n///\nuse crate::hasher::Hasher;\nuse std::marker::PhantomData;\n\npub struct PoRCircuit<'a, E: JubjubEngine> {\n    params: &'a E::Params,\n    value: Option<E::Fr>,\n    auth_path: Vec<Option<(E::Fr, bool)>>,\n    root: Root<E>,\n    private: bool,\n}\n\nimpl<'a, E: JubjubEngine> CircuitComponent for PoRCircuit<'a, E> {\n    type ComponentPrivateInputs = Option<Root<E>>;\n}\n\npub struct PoRCompound<H: Hasher> {\n    _h: PhantomData<H>,\n}\n\npub fn challenge_into_auth_path_bits(challenge: usize, leaves: usize) -> Vec<bool> {\n    let height = graph_height(leaves);\n    let mut bits = Vec::new();\n    let mut n = challenge;\n    for _ in 0..height {\n        bits.push(n & 1 == 1);\n        n >>= 1;\n    }\n    bits\n}\n\nimpl<E: JubjubEngine, C: Circuit<E>, P: ParameterSetIdentifier, H: Hasher>\n    CacheableParameters<E, C, P> for PoRCompound<H>\n{\n    fn cache_prefix() -> String {\n        String::from(\"proof-of-retrievability\")\n    }\n}\n\n// can only implment for Bls12 because merklepor is not generic over the engine.\nimpl<'a, H> CompoundProof<'a, Bls12, MerklePoR<H>, PoRCircuit<'a, Bls12>> for PoRCompound<H>\nwhere\n    H: 'a + Hasher,\n{\n    fn circuit<'b>(\n        public_inputs: &<MerklePoR<H> as ProofScheme<'a>>::PublicInputs,\n        _component_private_inputs: <PoRCircuit<'a, Bls12> as CircuitComponent>::ComponentPrivateInputs,\n        proof: &'b <MerklePoR<H> as ProofScheme<'a>>::Proof,\n        public_params: &'b <MerklePoR<H> as ProofScheme<'a>>::PublicParams,\n        engine_params: &'a JubjubBls12,\n    ) -> PoRCircuit<'a, Bls12> {\n        let (root, private) = match (*public_inputs).commitment {\n            None => (Root::Val(Some(proof.proof.root.into())), true),\n            Some(commitment) => (Root::Val(Some(commitment.into())), false),\n        };\n\n        // Ensure inputs are consistent with public params.\n        assert_eq!(private, public_params.private);\n\n        PoRCircuit::<Bls12> {\n            params: engine_params,\n            value: Some(proof.data.into()),\n            auth_path: proof.proof.as_options(),\n            root,\n            private,\n        }\n    }\n\n    fn generate_public_inputs(\n        pub_inputs: &<MerklePoR<H> as ProofScheme<'a>>::PublicInputs,\n        pub_params: &<MerklePoR<H> as ProofScheme<'a>>::PublicParams,\n        _k: Option<usize>,\n    ) -> Vec<Fr> {\n        let auth_path_bits = challenge_into_auth_path_bits(pub_inputs.challenge, pub_params.leaves);\n        let packed_auth_path = multipack::compute_multipacking::<Bls12>(&auth_path_bits);\n\n        let mut inputs = Vec::new();\n        inputs.extend(packed_auth_path);\n\n        if let Some(commitment) = pub_inputs.commitment {\n            assert!(!pub_params.private);\n            inputs.push(commitment.into());\n        } else {\n            assert!(pub_params.private);\n        }\n\n        inputs\n    }\n}\n\nimpl<'a, E: JubjubEngine> Circuit<E> for PoRCircuit<'a, E> {\n    /// # Public Inputs\n    ///\n    /// This circuit expects the following public inputs.\n    ///\n    /// * [0] - packed version of the `is_right` components of the auth_path.\n    /// * [1] - the merkle root of the tree.\n    ///\n    /// This circuit derives the following private inputs from its fields:\n    /// * value_num - packed version of `value` as bits. (might be more than one Fr)\n    ///\n    /// Note: All public inputs must be provided as `E::Fr`.\n    fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError>\n    where\n        E: JubjubEngine,\n    {\n        let params = self.params;\n        let value = self.value;\n        let auth_path = self.auth_path;\n        let root = self.root;\n\n        {\n            let value_num = num::AllocatedNum::alloc(cs.namespace(|| \"value\"), || {\n                Ok(value.ok_or_else(|| SynthesisError::AssignmentMissing)?)\n            })?;\n\n            let mut cur = value_num;\n\n            let mut auth_path_bits = Vec::with_capacity(auth_path.len());\n\n            // Ascend the merkle tree authentication path\n            for (i, e) in auth_path.into_iter().enumerate() {\n                let cs = &mut cs.namespace(|| format!(\"merkle tree hash {}\", i));\n\n                // Determines if the current subtree is the \"right\" leaf at this\n                // depth of the tree.\n                let cur_is_right = boolean::Boolean::from(boolean::AllocatedBit::alloc(\n                    cs.namespace(|| \"position bit\"),\n                    e.map(|e| e.1),\n                )?);\n\n                // Witness the authentication path element adjacent\n                // at this depth.\n                let path_element =\n                    num::AllocatedNum::alloc(cs.namespace(|| \"path element\"), || {\n                        Ok(e.ok_or(SynthesisError::AssignmentMissing)?.0)\n                    })?;\n\n                // Swap the two if the current subtree is on the right\n                let (xl, xr) = num::AllocatedNum::conditionally_reverse(\n                    cs.namespace(|| \"conditional reversal of preimage\"),\n                    &cur,\n                    &path_element,\n                    &cur_is_right,\n                )?;\n\n                // We don't need to be strict, because the function is\n                // collision-resistant. If the prover witnesses a congruency,\n                // they will be unable to find an authentication path in the\n                // tree with high probability.\n                let mut preimage = vec![];\n                preimage.extend(xl.into_bits_le(cs.namespace(|| \"xl into bits\"))?);\n                preimage.extend(xr.into_bits_le(cs.namespace(|| \"xr into bits\"))?);\n\n                // Compute the new subtree value\n                cur = pedersen_hash::pedersen_hash(\n                    cs.namespace(|| \"computation of pedersen hash\"),\n                    pedersen_hash::Personalization::MerkleTree(i),\n                    &preimage,\n                    params,\n                )?\n                .get_x()\n                .clone(); // Injective encoding\n\n                auth_path_bits.push(cur_is_right);\n            }\n\n            // allocate input for is_right auth_path\n            multipack::pack_into_inputs(cs.namespace(|| \"path\"), &auth_path_bits)?;\n\n            {\n                // Validate that the root of the merkle tree that we calculated is the same as the input.\n\n                let rt = Root::allocated(&root, cs.namespace(|| \"root value\"))?;\n                constraint::equal(cs, || \"enforce root is correct\", &cur, &rt);\n\n                if !self.private {\n                    // Expose the root\n                    rt.inputize(cs.namespace(|| \"root\"))?;\n                }\n            }\n\n            Ok(())\n        }\n    }\n}\n\nimpl<'a, E: JubjubEngine> PoRCircuit<'a, E> {\n    pub fn synthesize<CS>(\n        mut cs: CS,\n        params: &E::Params,\n        value: Option<E::Fr>,\n        auth_path: Vec<Option<(E::Fr, bool)>>,\n        root: Root<E>,\n        private: bool,\n    ) -> Result<(), SynthesisError>\n    where\n        E: JubjubEngine,\n        CS: ConstraintSystem<E>,\n    {\n        let por = PoRCircuit::<E> {\n            params,\n            value,\n            auth_path,\n            root,\n            private,\n        };\n\n        por.synthesize(&mut cs)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use pairing::Field;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n    use sapling_crypto::circuit::multipack;\n    use sapling_crypto::jubjub::JubjubBls12;\n\n    use crate::circuit::test::*;\n    use crate::compound_proof;\n    use crate::drgraph::{new_seed, BucketGraph, Graph};\n    use crate::fr32::{bytes_into_fr, fr_into_bytes};\n    use crate::hasher::pedersen::*;\n    use crate::merklepor;\n    use crate::proof::ProofScheme;\n    use crate::util::data_at_node;\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn por_test_compound() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n        let leaves = 6;\n        let data: Vec<u8> = (0..leaves)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n        let graph = BucketGraph::<PedersenHasher>::new(leaves, 16, 0, new_seed());\n        let tree = graph.merkle_tree(data.as_slice()).unwrap();\n\n        for i in 0..3 {\n            let public_inputs = merklepor::PublicInputs {\n                challenge: i,\n                commitment: Some(tree.root()),\n            };\n\n            let setup_params = compound_proof::SetupParams {\n                vanilla_params: &merklepor::SetupParams {\n                    leaves,\n                    private: false,\n                },\n                engine_params: &JubjubBls12::new(),\n                partitions: None,\n            };\n            let public_params =\n                PoRCompound::<PedersenHasher>::setup(&setup_params).expect(\"setup failed\");\n\n            let private_inputs = merklepor::PrivateInputs::<PedersenHasher>::new(\n                bytes_into_fr::<Bls12>(\n                    data_at_node(data.as_slice(), public_inputs.challenge).unwrap(),\n                )\n                .expect(\"failed to create Fr from node data\")\n                .into(),\n                &tree,\n            );\n\n            let proof = PoRCompound::<PedersenHasher>::prove(\n                &public_params,\n                &public_inputs,\n                &private_inputs,\n                None,\n            )\n            .expect(\"failed while proving\");\n\n            let verified =\n                PoRCompound::<PedersenHasher>::verify(&public_params, &public_inputs, &proof)\n                    .expect(\"failed while verifying\");\n            assert!(verified);\n\n            let (circuit, inputs) = PoRCompound::<PedersenHasher>::circuit_for_test(\n                &public_params,\n                &public_inputs,\n                &private_inputs,\n            );\n\n            let mut cs = TestConstraintSystem::new();\n\n            let _ = circuit.synthesize(&mut cs);\n            assert!(cs.is_satisfied());\n            assert!(cs.verify(&inputs));\n        }\n    }\n\n    #[test]\n    fn test_por_input_circuit_with_bls12_381() {\n        let params = &JubjubBls12::new();\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let leaves = 6;\n\n        for i in 0..6 {\n            // -- Basic Setup\n\n            let data: Vec<u8> = (0..leaves)\n                .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n                .collect();\n\n            let graph = BucketGraph::<PedersenHasher>::new(leaves, 16, 0, new_seed());\n            let tree = graph.merkle_tree(data.as_slice()).unwrap();\n\n            // -- MerklePoR\n\n            let pub_params = merklepor::PublicParams {\n                leaves,\n                private: true,\n            };\n            let pub_inputs = merklepor::PublicInputs {\n                challenge: i,\n                commitment: Some(tree.root().into()),\n            };\n\n            let priv_inputs = merklepor::PrivateInputs::<PedersenHasher>::new(\n                bytes_into_fr::<Bls12>(\n                    data_at_node(data.as_slice(), pub_inputs.challenge).unwrap(),\n                )\n                .unwrap()\n                .into(),\n                &tree,\n            );\n\n            // create a non circuit proof\n            let proof = merklepor::MerklePoR::<PedersenHasher>::prove(\n                &pub_params,\n                &pub_inputs,\n                &priv_inputs,\n            )\n            .unwrap();\n\n            // make sure it verifies\n            assert!(\n                merklepor::MerklePoR::<PedersenHasher>::verify(&pub_params, &pub_inputs, &proof)\n                    .unwrap(),\n                \"failed to verify merklepor proof\"\n            );\n\n            // -- Circuit\n\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let por = PoRCircuit::<Bls12> {\n                params,\n                value: Some(proof.data.into()),\n                auth_path: proof.proof.as_options(),\n                root: Root::Val(Some(pub_inputs.commitment.unwrap().into())),\n                private: false,\n            };\n\n            por.synthesize(&mut cs).unwrap();\n\n            assert_eq!(cs.num_inputs(), 3, \"wrong number of inputs\");\n            assert_eq!(cs.num_constraints(), 4149, \"wrong number of constraints\");\n\n            let auth_path_bits: Vec<bool> = proof\n                .proof\n                .path()\n                .iter()\n                .map(|(_, is_right)| *is_right)\n                .collect();\n            let packed_auth_path = multipack::compute_multipacking::<Bls12>(&auth_path_bits);\n\n            let mut expected_inputs = Vec::new();\n            expected_inputs.extend(packed_auth_path);\n            expected_inputs.push(pub_inputs.commitment.unwrap().into());\n\n            assert_eq!(cs.get_input(0, \"ONE\"), Fr::one(), \"wrong input 0\");\n\n            assert_eq!(\n                cs.get_input(1, \"path/input 0\"),\n                expected_inputs[0],\n                \"wrong packed_auth_path\"\n            );\n\n            assert_eq!(\n                cs.get_input(2, \"root/input variable\"),\n                expected_inputs[1],\n                \"wrong root input\"\n            );\n\n            assert!(cs.is_satisfied(), \"constraints are not all satisfied\");\n            assert!(cs.verify(&expected_inputs), \"failed to verify inputs\");\n        }\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn private_por_test_compound() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n        let leaves = 6;\n        let data: Vec<u8> = (0..leaves)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n        let graph = BucketGraph::<PedersenHasher>::new(leaves, 16, 0, new_seed());\n        let tree = graph.merkle_tree(data.as_slice()).unwrap();\n\n        for i in 0..3 {\n            let public_inputs = merklepor::PublicInputs {\n                challenge: i,\n                commitment: None,\n            };\n\n            let setup_params = compound_proof::SetupParams {\n                vanilla_params: &merklepor::SetupParams {\n                    leaves,\n                    private: true,\n                },\n                engine_params: &JubjubBls12::new(),\n                partitions: None,\n            };\n            let public_params =\n                PoRCompound::<PedersenHasher>::setup(&setup_params).expect(\"setup failed\");\n\n            let private_inputs = merklepor::PrivateInputs::<PedersenHasher>::new(\n                bytes_into_fr::<Bls12>(\n                    data_at_node(data.as_slice(), public_inputs.challenge).unwrap(),\n                )\n                .expect(\"failed to create Fr from node data\")\n                .into(),\n                &tree,\n            );\n\n            let proof = PoRCompound::<PedersenHasher>::prove(\n                &public_params,\n                &public_inputs,\n                &private_inputs,\n                None,\n            )\n            .expect(\"failed while proving\");\n\n            {\n                let (circuit, inputs) = PoRCompound::<PedersenHasher>::circuit_for_test(\n                    &public_params,\n                    &public_inputs,\n                    &private_inputs,\n                );\n\n                let mut cs = TestConstraintSystem::new();\n\n                let _ = circuit.synthesize(&mut cs);\n\n                assert!(cs.is_satisfied());\n                assert!(cs.verify(&inputs));\n            }\n\n            let verified =\n                PoRCompound::<PedersenHasher>::verify(&public_params, &public_inputs, &proof)\n                    .expect(\"failed while verifying\");\n            assert!(verified);\n        }\n    }\n\n    #[test]\n    fn test_private_por_input_circuit_with_bls12_381() {\n        let params = &JubjubBls12::new();\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let leaves = 6;\n\n        for i in 0..6 {\n            // -- Basic Setup\n\n            let data: Vec<u8> = (0..leaves)\n                .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n                .collect();\n\n            let graph = BucketGraph::<PedersenHasher>::new(leaves, 16, 0, new_seed());\n            let tree = graph.merkle_tree(data.as_slice()).unwrap();\n\n            // -- MerklePoR\n\n            let pub_params = merklepor::PublicParams {\n                leaves,\n                private: true,\n            };\n            let pub_inputs = merklepor::PublicInputs {\n                challenge: i,\n                commitment: None,\n            };\n\n            let priv_inputs = merklepor::PrivateInputs::<PedersenHasher>::new(\n                bytes_into_fr::<Bls12>(\n                    data_at_node(data.as_slice(), pub_inputs.challenge).unwrap(),\n                )\n                .unwrap()\n                .into(),\n                &tree,\n            );\n\n            // create a non circuit proof\n            let proof = merklepor::MerklePoR::<PedersenHasher>::prove(\n                &pub_params,\n                &pub_inputs,\n                &priv_inputs,\n            )\n            .unwrap();\n\n            // make sure it verifies\n            assert!(\n                merklepor::MerklePoR::<PedersenHasher>::verify(&pub_params, &pub_inputs, &proof)\n                    .unwrap(),\n                \"failed to verify merklepor proof\"\n            );\n\n            // -- Circuit\n\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let por = PoRCircuit::<Bls12> {\n                params,\n                value: Some(proof.data.into()),\n                auth_path: proof.proof.as_options(),\n                root: Root::Val(Some(tree.root().into())),\n                private: true,\n            };\n\n            por.synthesize(&mut cs).unwrap();\n\n            assert_eq!(cs.num_inputs(), 2, \"wrong number of inputs\");\n            assert_eq!(cs.num_constraints(), 4148, \"wrong number of constraints\");\n\n            let auth_path_bits: Vec<bool> = proof\n                .proof\n                .path()\n                .iter()\n                .map(|(_, is_right)| *is_right)\n                .collect();\n            let packed_auth_path = multipack::compute_multipacking::<Bls12>(&auth_path_bits);\n\n            let mut expected_inputs = Vec::new();\n            expected_inputs.extend(packed_auth_path);\n\n            assert_eq!(cs.get_input(0, \"ONE\"), Fr::one(), \"wrong input 0\");\n\n            assert_eq!(\n                cs.get_input(1, \"path/input 0\"),\n                expected_inputs[0],\n                \"wrong packed_auth_path\"\n            );\n\n            assert!(cs.is_satisfied(), \"constraints are not all satisfied\");\n            assert!(cs.verify(&expected_inputs), \"failed to verify inputs\");\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/porc.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellman::{Circuit, ConstraintSystem, SynthesisError};\nuse pairing::bls12_381::{Bls12, Fr};\nuse pairing::{Engine, Field};\nuse sapling_crypto::circuit::boolean::Boolean;\nuse sapling_crypto::circuit::num::{AllocatedNum, Num};\nuse sapling_crypto::circuit::{boolean, num, pedersen_hash};\nuse sapling_crypto::jubjub::JubjubEngine;\n\nuse crate::circuit::constraint;\nuse crate::compound_proof::{CircuitComponent, CompoundProof};\nuse crate::fr32::u32_into_fr;\nuse crate::hasher::Hasher;\nuse crate::parameter_cache::{CacheableParameters, ParameterSetIdentifier};\nuse crate::porc::PoRC;\nuse crate::proof::ProofScheme;\n\n/// Takes a sequence of booleans and returns a single `E::Fr` as a packed representation.\n/// NOTE: bit length must be less than `Fr::capacity()` for the field, or this will overflow.\npub fn pack_into_allocated_num<E, CS>(\n    mut cs: CS,\n    bits: &[Boolean],\n) -> Result<AllocatedNum<E>, SynthesisError>\nwhere\n    E: Engine,\n    CS: ConstraintSystem<E>,\n{\n    let mut num = Num::<E>::zero();\n    let mut coeff = E::Fr::one();\n\n    //    for (i, bits) in bits.chunks(E::Fr::CAPACITY as usize).enumerate() {\n    for bit in bits {\n        num = num.add_bool_with_coeff(CS::one(), bit, coeff);\n\n        coeff.double();\n    }\n    let val = num::AllocatedNum::alloc(cs.namespace(|| \"val\"), || Ok(num.get_value().unwrap()))?;\n\n    Ok(val)\n}\n\n/// This is the `PoRC` circuit.\npub struct PoRCCircuit<'a, E: JubjubEngine> {\n    /// Paramters for the engine.\n    pub params: &'a E::Params,\n    pub challenges: Vec<Option<E::Fr>>,\n    pub challenged_leafs: Vec<Option<E::Fr>>,\n    pub challenged_sectors: Vec<usize>,\n    pub commitments: Vec<Option<E::Fr>>,\n    pub paths: Vec<Vec<Option<(E::Fr, bool)>>>,\n}\n\npub struct PoRCCompound<H>\nwhere\n    H: Hasher,\n{\n    _h: PhantomData<H>,\n}\n\nimpl<E: JubjubEngine, C: Circuit<E>, P: ParameterSetIdentifier, H: Hasher>\n    CacheableParameters<E, C, P> for PoRCCompound<H>\n{\n    fn cache_prefix() -> String {\n        String::from(\"proof-of-retrievable-commitments\")\n    }\n}\n\n#[derive(Clone, Default)]\npub struct ComponentPrivateInputs {}\n\nimpl<'a, E: JubjubEngine> CircuitComponent for PoRCCircuit<'a, E> {\n    type ComponentPrivateInputs = ComponentPrivateInputs;\n}\n\nimpl<'a, H> CompoundProof<'a, Bls12, PoRC<'a, H>, PoRCCircuit<'a, Bls12>> for PoRCCompound<H>\nwhere\n    H: 'a + Hasher,\n{\n    fn generate_public_inputs(\n        _pub_in: &<PoRC<'a, H> as ProofScheme<'a>>::PublicInputs,\n        _pub_params: &<PoRC<'a, H> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Vec<Fr> {\n        Vec::new()\n    }\n\n    fn circuit(\n        pub_in: &<PoRC<'a, H> as ProofScheme<'a>>::PublicInputs,\n        _component_private_inputs: <PoRCCircuit<'a, Bls12> as CircuitComponent>::ComponentPrivateInputs,\n        vanilla_proof: &<PoRC<'a, H> as ProofScheme<'a>>::Proof,\n        _pub_params: &<PoRC<'a, H> as ProofScheme<'a>>::PublicParams,\n        engine_params: &'a <Bls12 as JubjubEngine>::Params,\n    ) -> PoRCCircuit<'a, Bls12> {\n        let challenged_leafs = vanilla_proof\n            .leafs()\n            .iter()\n            .map(|l| Some((**l).into()))\n            .collect();\n\n        let commitments: Vec<_> = pub_in\n            .commitments\n            .iter()\n            .map(|c| Some((*c).into()))\n            .collect();\n\n        let paths: Vec<Vec<_>> = vanilla_proof\n            .paths()\n            .iter()\n            .map(|v| v.iter().map(|p| Some(((*p).0.into(), p.1))).collect())\n            .collect();\n\n        let challenges: Vec<_> = pub_in\n            .challenges\n            .iter()\n            .map(|c| Some(u32_into_fr::<Bls12>(*c as u32)))\n            .collect();\n\n        let challenged_sectors = pub_in.challenged_sectors.to_vec();\n\n        PoRCCircuit {\n            params: engine_params,\n            challenges,\n            challenged_leafs,\n            commitments,\n            challenged_sectors,\n            paths,\n        }\n    }\n}\n\nimpl<'a, E: JubjubEngine> Circuit<E> for PoRCCircuit<'a, E> {\n    fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let params = self.params;\n        let challenges = self.challenges;\n        let challenged_sectors = self.challenged_sectors;\n        let challenged_leafs = self.challenged_leafs;\n        let commitments = self.commitments;\n        let paths = self.paths;\n\n        assert_eq!(challenged_leafs.len(), paths.len());\n        assert_eq!(paths.len(), commitments.len());\n\n        for (i, (challenged_leaf, path)) in challenged_leafs.iter().zip(paths).enumerate() {\n            let mut cs = cs.namespace(|| format!(\"challenge_{}\", i));\n\n            let commitment = commitments[challenged_sectors[i]];\n\n            // Allocate the commitment\n            let rt = num::AllocatedNum::alloc(cs.namespace(|| \"commitment_num\"), || {\n                commitment.ok_or(SynthesisError::AssignmentMissing)\n            })?;\n\n            let params = params;\n\n            let leaf_num = num::AllocatedNum::alloc(cs.namespace(|| \"leaf_num\"), || {\n                challenged_leaf.ok_or_else(|| SynthesisError::AssignmentMissing)\n            })?;\n\n            // This is an injective encoding, as cur is a\n            // point in the prime order subgroup.\n            let mut cur = leaf_num;\n\n            let mut path_bits = Vec::with_capacity(path.len());\n\n            // Ascend the merkle tree authentication path\n            for (i, e) in path.iter().enumerate() {\n                let cs = &mut cs.namespace(|| format!(\"merkle tree hash {}\", i));\n\n                // Determines if the current subtree is the \"right\" leaf at this\n                // depth of the tree.\n                let cur_is_right = boolean::Boolean::from(boolean::AllocatedBit::alloc(\n                    cs.namespace(|| \"position bit\"),\n                    e.map(|e| e.1),\n                )?);\n\n                // Witness the authentication path element adjacent\n                // at this depth.\n                let path_element =\n                    num::AllocatedNum::alloc(cs.namespace(|| \"path element\"), || {\n                        Ok(e.ok_or(SynthesisError::AssignmentMissing)?.0)\n                    })?;\n\n                // Swap the two if the current subtree is on the right\n                let (xl, xr) = num::AllocatedNum::conditionally_reverse(\n                    cs.namespace(|| \"conditional reversal of preimage\"),\n                    &cur,\n                    &path_element,\n                    &cur_is_right,\n                )?;\n\n                let mut preimage = vec![];\n                preimage.extend(xl.into_bits_le(cs.namespace(|| \"xl into bits\"))?);\n                preimage.extend(xr.into_bits_le(cs.namespace(|| \"xr into bits\"))?);\n\n                // Compute the new subtree value\n                cur = pedersen_hash::pedersen_hash(\n                    cs.namespace(|| \"computation of pedersen hash\"),\n                    pedersen_hash::Personalization::MerkleTree(i),\n                    &preimage,\n                    params,\n                )?\n                .get_x()\n                .clone(); // Injective encoding\n\n                path_bits.push(cur_is_right);\n            }\n\n            let challenge_num =\n                num::AllocatedNum::alloc(cs.namespace(|| format!(\"challenge {}\", i)), || {\n                    Ok(challenges[i].ok_or(SynthesisError::AssignmentMissing)?)\n                })?;\n\n            // allocate value for is_right path\n            let packed = pack_into_allocated_num(cs.namespace(|| \"packed path\"), &path_bits)?;\n            constraint::equal(\n                &mut cs,\n                || \"enforce path equals challenge\",\n                &packed,\n                &challenge_num,\n            );\n            {\n                // Validate that the root of the merkle tree that we calculated is the same as the input.\n                constraint::equal(&mut cs, || \"enforce commitment correct\", &cur, &rt);\n            }\n        }\n\n        Ok(())\n    }\n}\n\nimpl<'a, E: JubjubEngine> PoRCCircuit<'a, E> {\n    pub fn synthesize<CS: ConstraintSystem<E>>(\n        cs: &mut CS,\n        params: &'a E::Params,\n        challenges: Vec<Option<E::Fr>>,\n        challenged_sectors: Vec<usize>,\n        challenged_leafs: Vec<Option<E::Fr>>,\n        commitments: Vec<Option<E::Fr>>,\n        paths: Vec<Vec<Option<(E::Fr, bool)>>>,\n    ) -> Result<(), SynthesisError> {\n        PoRCCircuit {\n            params,\n            challenges,\n            challenged_leafs,\n            challenged_sectors,\n            commitments,\n            paths,\n        }\n        .synthesize(cs)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use pairing::Field;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n    use sapling_crypto::jubjub::JubjubBls12;\n\n    use crate::circuit::test::*;\n    use crate::compound_proof;\n    use crate::drgraph::{new_seed, BucketGraph, Graph};\n    use crate::fr32::fr_into_bytes;\n    use crate::hasher::pedersen::*;\n    use crate::porc::{self, PoRC};\n    use crate::proof::ProofScheme;\n\n    #[test]\n    fn test_porc_circuit_with_bls12_381() {\n        let params = &JubjubBls12::new();\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let leaves = 32;\n\n        let pub_params = porc::PublicParams {\n            leaves,\n            sectors_count: 2,\n        };\n\n        let data1: Vec<u8> = (0..32)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n        let data2: Vec<u8> = (0..32)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n\n        let graph1 = BucketGraph::<PedersenHasher>::new(32, 5, 0, new_seed());\n        let tree1 = graph1.merkle_tree(data1.as_slice()).unwrap();\n\n        let graph2 = BucketGraph::<PedersenHasher>::new(32, 5, 0, new_seed());\n        let tree2 = graph2.merkle_tree(data2.as_slice()).unwrap();\n\n        let challenges = vec![rng.gen_range(0, leaves), rng.gen_range(0, leaves)];\n        let challenged_sectors = &[0, 0];\n\n        let pub_inputs = porc::PublicInputs {\n            challenges: &challenges,\n            challenged_sectors,\n            commitments: &[tree1.root(), tree2.root()],\n        };\n\n        let priv_inputs = porc::PrivateInputs::<PedersenHasher> {\n            trees: &[&tree1, &tree2],\n        };\n\n        let proof = PoRC::<PedersenHasher>::prove(&pub_params, &pub_inputs, &priv_inputs).unwrap();\n\n        assert!(PoRC::<PedersenHasher>::verify(&pub_params, &pub_inputs, &proof).unwrap());\n\n        // actual circuit test\n\n        let paths: Vec<_> = proof\n            .paths()\n            .iter()\n            .map(|p| {\n                p.iter()\n                    .map(|v| Some((v.0.into(), v.1)))\n                    .collect::<Vec<_>>()\n            })\n            .collect();\n        let challenged_leafs: Vec<_> = proof.leafs().iter().map(|l| Some((**l).into())).collect();\n        let commitments: Vec<_> = pub_inputs\n            .commitments\n            .iter()\n            .map(|c| Some((*c).into()))\n            .collect();\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n\n        let instance = PoRCCircuit {\n            params,\n            challenges: challenges\n                .iter()\n                .map(|c| Some(u32_into_fr::<Bls12>(*c as u32)))\n                .collect(),\n            challenged_sectors: challenged_sectors.to_vec(),\n            challenged_leafs,\n            paths,\n            commitments,\n        };\n\n        instance\n            .synthesize(&mut cs)\n            .expect(\"failed to synthesize circuit\");\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n        assert_eq!(cs.num_inputs(), 1, \"wrong number of inputs\");\n        assert_eq!(cs.num_constraints(), 13824, \"wrong number of constraints\");\n        assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn porc_test_compound() {\n        let params = &JubjubBls12::new();\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let leaves = 32;\n\n        let setup_params = compound_proof::SetupParams {\n            vanilla_params: &porc::SetupParams {\n                leaves,\n                sectors_count: 2,\n            },\n            engine_params: params,\n            partitions: None,\n        };\n\n        let pub_params =\n            PoRCCompound::<PedersenHasher>::setup(&setup_params).expect(\"setup failed\");\n\n        let data1: Vec<u8> = (0..32)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n        let data2: Vec<u8> = (0..32)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n\n        let graph1 = BucketGraph::<PedersenHasher>::new(32, 5, 0, new_seed());\n        let tree1 = graph1.merkle_tree(data1.as_slice()).unwrap();\n\n        let graph2 = BucketGraph::<PedersenHasher>::new(32, 5, 0, new_seed());\n        let tree2 = graph2.merkle_tree(data2.as_slice()).unwrap();\n\n        let pub_inputs = porc::PublicInputs {\n            challenges: &vec![rng.gen_range(0, leaves), rng.gen_range(0, leaves)],\n            challenged_sectors: &[0, 1],\n            commitments: &[tree1.root(), tree2.root()],\n        };\n\n        let priv_inputs = porc::PrivateInputs::<PedersenHasher> {\n            trees: &[&tree1, &tree2],\n        };\n\n        let proof =\n            PoRCCompound::<PedersenHasher>::prove(&pub_params, &pub_inputs, &priv_inputs, None)\n                .expect(\"failed while proving\");\n\n        let (circuit, inputs) = PoRCCompound::<PedersenHasher>::circuit_for_test(\n            &pub_params,\n            &pub_inputs,\n            &priv_inputs,\n        );\n\n        let mut cs = TestConstraintSystem::new();\n\n        let _ = circuit.synthesize(&mut cs);\n        assert!(cs.is_satisfied());\n        assert!(cs.verify(&inputs));\n\n        let verified = PoRCCompound::<PedersenHasher>::verify(&pub_params, &pub_inputs, &proof)\n            .expect(\"failed while verifying\");\n\n        assert!(verified);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/ppor/mod.rs",
    "content": "use bellman::{Circuit, ConstraintSystem, SynthesisError};\nuse sapling_crypto::circuit::{boolean, multipack, num, pedersen_hash};\nuse sapling_crypto::jubjub::JubjubEngine;\n\nuse crate::circuit::constraint;\n/// This is an instance of the `ParallelProofOfRetrievability` circuit.\n///\n/// # Public Inputs\n///\n/// This circuit expects the following public inputs.\n///\n/// * for i in 0..values.len()\n///   * [0] - packed version of `value` as bits. (might be more than one Fr)\n///   * [1] - packed version of the `is_right` components of the auth_path.\n///   * [2] - the merkle root of the tree.\npub struct ParallelProofOfRetrievability<'a, E: JubjubEngine> {\n    /// Paramters for the engine.\n    pub params: &'a E::Params,\n\n    /// Pedersen commitment to the value.\n    pub values: Vec<Option<E::Fr>>,\n\n    /// The authentication path of the commitment in the tree.\n    pub auth_paths: Vec<Vec<Option<(E::Fr, bool)>>>,\n\n    /// The root of the underyling merkle tree.\n    pub root: Option<E::Fr>,\n}\n\nimpl<'a, E: JubjubEngine> Circuit<E> for ParallelProofOfRetrievability<'a, E> {\n    fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        assert_eq!(self.values.len(), self.auth_paths.len());\n\n        let real_root_value = self.root;\n\n        // Allocate the \"real\" root that will be exposed.\n        let rt = num::AllocatedNum::alloc(cs.namespace(|| \"root value\"), || {\n            real_root_value.ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        for i in 0..self.values.len() {\n            let mut cs = cs.namespace(|| format!(\"round {}\", i));\n            let params = self.params;\n            let value = self.values[i];\n            let auth_path = self.auth_paths[i].clone();\n\n            let value_num = num::AllocatedNum::alloc(cs.namespace(|| \"value\"), || {\n                value.ok_or_else(|| SynthesisError::AssignmentMissing)\n            })?;\n\n            value_num.inputize(cs.namespace(|| \"value num\"))?;\n\n            // This is an injective encoding, as cur is a\n            // point in the prime order subgroup.\n            let mut cur = value_num;\n\n            let mut auth_path_bits = Vec::with_capacity(auth_path.len());\n\n            // Ascend the merkle tree authentication path\n            for (i, e) in auth_path.into_iter().enumerate() {\n                let cs = &mut cs.namespace(|| format!(\"merkle tree hash {}\", i));\n\n                // Determines if the current subtree is the \"right\" leaf at this\n                // depth of the tree.\n                let cur_is_right = boolean::Boolean::from(boolean::AllocatedBit::alloc(\n                    cs.namespace(|| \"position bit\"),\n                    e.map(|e| e.1),\n                )?);\n\n                // Witness the authentication path element adjacent\n                // at this depth.\n                let path_element =\n                    num::AllocatedNum::alloc(cs.namespace(|| \"path element\"), || {\n                        Ok(e.ok_or(SynthesisError::AssignmentMissing)?.0)\n                    })?;\n\n                // Swap the two if the current subtree is on the right\n                let (xl, xr) = num::AllocatedNum::conditionally_reverse(\n                    cs.namespace(|| \"conditional reversal of preimage\"),\n                    &cur,\n                    &path_element,\n                    &cur_is_right,\n                )?;\n\n                // We don't need to be strict, because the function is\n                // collision-resistant. If the prover witnesses a congruency,\n                // they will be unable to find an authentication path in the\n                // tree with high probability.\n                let mut preimage = vec![];\n                preimage.extend(xl.into_bits_le(cs.namespace(|| \"xl into bits\"))?);\n                preimage.extend(xr.into_bits_le(cs.namespace(|| \"xr into bits\"))?);\n\n                // Compute the new subtree value\n                cur = pedersen_hash::pedersen_hash(\n                    cs.namespace(|| \"computation of pedersen hash\"),\n                    pedersen_hash::Personalization::MerkleTree(i),\n                    &preimage,\n                    params,\n                )?\n                .get_x()\n                .clone(); // Injective encoding\n\n                auth_path_bits.push(cur_is_right);\n            }\n\n            // allocate input for is_right auth_path\n            multipack::pack_into_inputs(cs.namespace(|| \"packed auth_path\"), &auth_path_bits)?;\n\n            {\n                // Validate that the root of the merkle tree that we calculated is the same as the input.\n                constraint::equal(&mut cs, || \"enforce root is correct\", &cur, &rt);\n            }\n        }\n\n        // Expose the root\n        rt.inputize(cs.namespace(|| \"root\"))?;\n\n        Ok(())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::circuit::test::*;\n    use crate::drgraph::{new_seed, BucketGraph, Graph};\n    use crate::fr32::{bytes_into_fr, fr_into_bytes};\n    use crate::hasher::pedersen::*;\n    use crate::merklepor;\n    use crate::proof::ProofScheme;\n    use crate::util::data_at_node;\n    use pairing::bls12_381::*;\n    use pairing::Field;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n    use sapling_crypto::jubjub::JubjubBls12;\n\n    #[test]\n    fn test_parallel_por_input_circuit_with_bls12_381() {\n        let params = &JubjubBls12::new();\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let leaves = 16;\n        let pub_params = merklepor::PublicParams {\n            leaves,\n            private: false,\n        };\n\n        for _ in 0..5 {\n            let data: Vec<u8> = (0..leaves)\n                .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n                .collect();\n\n            let graph = BucketGraph::<PedersenHasher>::new(leaves, 6, 0, new_seed());\n            let tree = graph.merkle_tree(data.as_slice()).unwrap();\n\n            let pub_inputs: Vec<_> = (0..leaves)\n                .map(|i| merklepor::PublicInputs {\n                    challenge: i,\n                    commitment: Some(tree.root()),\n                })\n                .collect();\n            let priv_inputs: Vec<_> = (0..leaves)\n                .map(|i| {\n                    merklepor::PrivateInputs::<PedersenHasher>::new(\n                        bytes_into_fr::<Bls12>(\n                            data_at_node(data.as_slice(), pub_inputs[i].challenge).unwrap(),\n                        )\n                        .unwrap()\n                        .into(),\n                        &tree,\n                    )\n                })\n                .collect();\n\n            let proofs: Vec<_> = (0..leaves)\n                .map(|i| {\n                    merklepor::MerklePoR::<PedersenHasher>::prove(\n                        &pub_params,\n                        &pub_inputs[i],\n                        &priv_inputs[i],\n                    )\n                    .unwrap()\n                })\n                .collect();\n\n            for i in 0..leaves {\n                // make sure it verifies\n                assert!(\n                    merklepor::MerklePoR::<PedersenHasher>::verify(\n                        &pub_params,\n                        &pub_inputs[i],\n                        &proofs[i]\n                    )\n                    .unwrap(),\n                    \"failed to verify merklepor proof\"\n                );\n            }\n\n            let auth_paths: Vec<_> = proofs.iter().map(|p| p.proof.as_options()).collect();\n            let values: Vec<_> = proofs.iter().map(|p| Some(p.data.into())).collect();\n\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let instance = ParallelProofOfRetrievability {\n                params,\n                values,\n                auth_paths,\n                root: Some(tree.root().into()),\n            };\n\n            instance\n                .synthesize(&mut cs)\n                .expect(\"failed to synthesize circuit\");\n\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n            assert_eq!(cs.num_inputs(), 34, \"wrong number of inputs\");\n            assert_eq!(cs.num_constraints(), 88497, \"wrong number of constraints\");\n            assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/sloth.rs",
    "content": "use bellman::{ConstraintSystem, SynthesisError};\nuse pairing::{Engine, Field};\nuse sapling_crypto::circuit::num;\n\nuse crate::circuit::constraint;\n\n/// Circuit version of sloth decoding.\npub fn decode<E, CS>(\n    mut cs: CS,\n    key: &num::AllocatedNum<E>,\n    ciphertext: Option<E::Fr>,\n    rounds: usize,\n) -> Result<num::AllocatedNum<E>, SynthesisError>\nwhere\n    E: Engine,\n    CS: ConstraintSystem<E>,\n{\n    let mut plaintext = num::AllocatedNum::alloc(cs.namespace(|| \"decoded\"), || {\n        Ok(ciphertext.ok_or_else(|| SynthesisError::AssignmentMissing)?)\n    })?;\n\n    for i in 0..rounds {\n        let cs = &mut cs.namespace(|| format!(\"round {}\", i));\n\n        let c = plaintext;\n        let c2 = c.square(cs.namespace(|| \"c^2\"))?;\n        let c4 = c2.square(cs.namespace(|| \"c^4\"))?;\n        let c5 = c4.mul(cs.namespace(|| \"c^5\"), &c)?;\n\n        plaintext = sub(cs.namespace(|| \"c^5 - k\"), &c5, key)?;\n    }\n\n    if rounds == 0 {\n        plaintext = sub(cs.namespace(|| \"plaintext - k\"), &plaintext, key)?;\n    }\n\n    Ok(plaintext)\n}\n\nfn sub<E: Engine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    a: &num::AllocatedNum<E>,\n    b: &num::AllocatedNum<E>,\n) -> Result<num::AllocatedNum<E>, SynthesisError> {\n    let res = num::AllocatedNum::alloc(cs.namespace(|| \"sub num\"), || {\n        let mut tmp = a\n            .get_value()\n            .ok_or_else(|| SynthesisError::AssignmentMissing)?;\n        tmp.sub_assign(\n            &b.get_value()\n                .ok_or_else(|| SynthesisError::AssignmentMissing)?,\n        );\n\n        Ok(tmp)\n    })?;\n\n    // a - b = res\n    constraint::difference(&mut cs, || \"subtraction constraint\", &a, &b, &res);\n\n    Ok(res)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::circuit::test::TestConstraintSystem;\n    use crate::crypto::sloth;\n    use pairing::bls12_381::{Bls12, Fr};\n    use rand::{Rng, SeedableRng, XorShiftRng};\n\n    #[test]\n    fn sloth_snark_decode() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        for _ in 0..10 {\n            let key: Fr = rng.gen();\n            let plaintext: Fr = rng.gen();\n            let ciphertext = sloth::encode::<Bls12>(&key, &plaintext, 10);\n\n            // Vanilla\n            let decrypted = sloth::decode::<Bls12>(&key, &ciphertext, 10);\n\n            assert_eq!(plaintext, decrypted, \"vanilla failed\");\n\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let key_num = num::AllocatedNum::alloc(cs.namespace(|| \"key\"), || Ok(key)).unwrap();\n            let out = decode(cs.namespace(|| \"sloth\"), &key_num, Some(ciphertext), 10).unwrap();\n\n            assert!(cs.is_satisfied());\n            assert_eq!(out.get_value().unwrap(), decrypted, \"no interop\");\n        }\n    }\n\n    #[test]\n    fn sloth_snark_decode_bad() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        for _ in 0..10 {\n            let key: Fr = rng.gen();\n            let key_bad: Fr = rng.gen();\n            let plaintext: Fr = rng.gen();\n\n            let ciphertext = sloth::encode::<Bls12>(&key, &plaintext, 10);\n\n            let decrypted = sloth::decode::<Bls12>(&key, &ciphertext, 10);\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n            let key_bad_num =\n                num::AllocatedNum::alloc(cs.namespace(|| \"key bad\"), || Ok(key_bad)).unwrap();\n\n            let out = decode(cs.namespace(|| \"sloth\"), &key_bad_num, Some(ciphertext), 10).unwrap();\n\n            assert!(cs.is_satisfied());\n            assert_ne!(out.get_value().unwrap(), decrypted);\n        }\n    }\n\n    #[test]\n    fn sloth_snark_decode_different_iterations() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        for _ in 0..10 {\n            let key: Fr = rng.gen();\n            let plaintext: Fr = rng.gen();\n\n            let ciphertext = sloth::encode::<Bls12>(&key, &plaintext, 10);\n            let decrypted = sloth::decode::<Bls12>(&key, &ciphertext, 10);\n\n            {\n                let mut cs = TestConstraintSystem::<Bls12>::new();\n                let key_num = num::AllocatedNum::alloc(cs.namespace(|| \"key\"), || Ok(key)).unwrap();\n\n                let out9 =\n                    decode(cs.namespace(|| \"sloth 9\"), &key_num, Some(ciphertext), 9).unwrap();\n\n                assert!(cs.is_satisfied());\n                assert_ne!(out9.get_value().unwrap(), decrypted);\n            }\n\n            {\n                let mut cs = TestConstraintSystem::<Bls12>::new();\n                let key_num = num::AllocatedNum::alloc(cs.namespace(|| \"key\"), || Ok(key)).unwrap();\n                let out10 =\n                    decode(cs.namespace(|| \"sloth 10\"), &key_num, Some(ciphertext), 10).unwrap();\n\n                assert!(cs.is_satisfied());\n                assert_eq!(out10.get_value().unwrap(), decrypted);\n            }\n\n            {\n                let mut cs = TestConstraintSystem::<Bls12>::new();\n                let key_num = num::AllocatedNum::alloc(cs.namespace(|| \"key\"), || Ok(key)).unwrap();\n                let out11 =\n                    decode(cs.namespace(|| \"sloth 11\"), &key_num, Some(ciphertext), 11).unwrap();\n\n                assert!(cs.is_satisfied());\n                assert_ne!(out11.get_value().unwrap(), decrypted);\n            }\n        }\n    }\n\n    #[test]\n    fn sub_constraint() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        for _ in 0..100 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let a = num::AllocatedNum::alloc(cs.namespace(|| \"a\"), || Ok(rng.gen())).unwrap();\n            let b = num::AllocatedNum::alloc(cs.namespace(|| \"b\"), || Ok(rng.gen())).unwrap();\n\n            let res = sub(cs.namespace(|| \"a-b\"), &a, &b).unwrap();\n\n            let mut tmp = a.get_value().unwrap().clone();\n            tmp.sub_assign(&b.get_value().unwrap());\n\n            assert_eq!(res.get_value().unwrap(), tmp);\n            assert!(cs.is_satisfied());\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/test/mod.rs",
    "content": "use pairing::{Engine, Field, PrimeField, PrimeFieldRepr};\n\nuse bellman::{ConstraintSystem, Index, LinearCombination, SynthesisError, Variable};\n\nuse std::collections::HashMap;\nuse std::fmt::Write;\n\nuse byteorder::{BigEndian, ByteOrder};\nuse std::cmp::Ordering;\nuse std::collections::BTreeMap;\n\nuse blake2::{Blake2s, Digest};\n\n#[derive(Debug)]\nenum NamedObject {\n    Constraint(usize),\n    Var(Variable),\n    Namespace,\n}\n\n/// Constraint system for testing purposes.\npub struct TestConstraintSystem<E: Engine> {\n    named_objects: HashMap<String, NamedObject>,\n    current_namespace: Vec<String>,\n    constraints: Vec<(\n        LinearCombination<E>,\n        LinearCombination<E>,\n        LinearCombination<E>,\n        String,\n    )>,\n    inputs: Vec<(E::Fr, String)>,\n    aux: Vec<(E::Fr, String)>,\n}\n\n#[derive(Clone, Copy)]\nstruct OrderedVariable(Variable);\n\nimpl Eq for OrderedVariable {}\nimpl PartialEq for OrderedVariable {\n    fn eq(&self, other: &OrderedVariable) -> bool {\n        match (self.0.get_unchecked(), other.0.get_unchecked()) {\n            (Index::Input(ref a), Index::Input(ref b)) => a == b,\n            (Index::Aux(ref a), Index::Aux(ref b)) => a == b,\n            _ => false,\n        }\n    }\n}\nimpl PartialOrd for OrderedVariable {\n    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {\n        Some(self.cmp(other))\n    }\n}\nimpl Ord for OrderedVariable {\n    fn cmp(&self, other: &Self) -> Ordering {\n        match (self.0.get_unchecked(), other.0.get_unchecked()) {\n            (Index::Input(ref a), Index::Input(ref b)) => a.cmp(b),\n            (Index::Aux(ref a), Index::Aux(ref b)) => a.cmp(b),\n            (Index::Input(_), Index::Aux(_)) => Ordering::Less,\n            (Index::Aux(_), Index::Input(_)) => Ordering::Greater,\n        }\n    }\n}\n\nfn proc_lc<E: Engine>(terms: &[(Variable, E::Fr)]) -> BTreeMap<OrderedVariable, E::Fr> {\n    let mut map = BTreeMap::new();\n    for &(var, coeff) in terms {\n        map.entry(OrderedVariable(var))\n            .or_insert_with(E::Fr::zero)\n            .add_assign(&coeff);\n    }\n\n    // Remove terms that have a zero coefficient to normalize\n    let mut to_remove = vec![];\n    for (var, coeff) in map.iter() {\n        if coeff.is_zero() {\n            to_remove.push(var.clone())\n        }\n    }\n\n    for var in to_remove {\n        map.remove(&var);\n    }\n\n    map\n}\n\nfn hash_lc<E: Engine>(terms: &[(Variable, E::Fr)], h: &mut Blake2s) {\n    let map = proc_lc::<E>(terms);\n\n    let mut buf = [0u8; 9 + 32];\n    BigEndian::write_u64(&mut buf[0..8], map.len() as u64);\n    h.input(&buf[0..8]);\n\n    for (var, coeff) in map {\n        match var.0.get_unchecked() {\n            Index::Input(i) => {\n                buf[0] = b'I';\n                BigEndian::write_u64(&mut buf[1..9], i as u64);\n            }\n            Index::Aux(i) => {\n                buf[0] = b'A';\n                BigEndian::write_u64(&mut buf[1..9], i as u64);\n            }\n        }\n\n        coeff.into_repr().write_be(&mut buf[9..]).unwrap();\n\n        h.input(&buf[..]);\n    }\n}\n\nfn _eval_lc2<E: Engine>(terms: &[(Variable, E::Fr)], inputs: &[E::Fr], aux: &[E::Fr]) -> E::Fr {\n    let mut acc = E::Fr::zero();\n\n    for &(var, ref coeff) in terms {\n        let mut tmp = match var.get_unchecked() {\n            Index::Input(index) => inputs[index],\n            Index::Aux(index) => aux[index],\n        };\n\n        tmp.mul_assign(&coeff);\n        acc.add_assign(&tmp);\n    }\n\n    acc\n}\n\nfn eval_lc<E: Engine>(\n    terms: &[(Variable, E::Fr)],\n    inputs: &[(E::Fr, String)],\n    aux: &[(E::Fr, String)],\n) -> E::Fr {\n    let mut acc = E::Fr::zero();\n\n    for &(var, ref coeff) in terms {\n        let mut tmp = match var.get_unchecked() {\n            Index::Input(index) => inputs[index].0,\n            Index::Aux(index) => aux[index].0,\n        };\n\n        tmp.mul_assign(&coeff);\n        acc.add_assign(&tmp);\n    }\n\n    acc\n}\n\nimpl<E: Engine> Default for TestConstraintSystem<E> {\n    fn default() -> Self {\n        let mut map = HashMap::new();\n        map.insert(\n            \"ONE\".into(),\n            NamedObject::Var(TestConstraintSystem::<E>::one()),\n        );\n\n        TestConstraintSystem {\n            named_objects: map,\n            current_namespace: vec![],\n            constraints: vec![],\n            inputs: vec![(E::Fr::one(), \"ONE\".into())],\n            aux: vec![],\n        }\n    }\n}\n\nimpl<E: Engine> TestConstraintSystem<E> {\n    pub fn new() -> Self {\n        Default::default()\n    }\n\n    pub fn pretty_print(&self) -> String {\n        let mut s = String::new();\n\n        for input in &self.inputs {\n            writeln!(s, \"INPUT {}\", input.1).unwrap();\n        }\n        write!(s, \"\\n\\n\").unwrap();\n        for aux in &self.aux {\n            writeln!(s, \"AUX {}\", aux.1).unwrap();\n        }\n        write!(s, \"\\n\\n\").unwrap();\n\n        let negone = {\n            let mut tmp = E::Fr::one();\n            tmp.negate();\n            tmp\n        };\n\n        let powers_of_two = (0..E::Fr::NUM_BITS)\n            .map(|i| E::Fr::from_str(\"2\").unwrap().pow(&[u64::from(i)]))\n            .collect::<Vec<_>>();\n\n        let _pp = |s: &mut String, lc: &LinearCombination<E>| {\n            write!(s, \"(\").unwrap();\n            let mut is_first = true;\n            for (var, coeff) in proc_lc::<E>(lc.as_ref()) {\n                if coeff == negone {\n                    write!(s, \" - \").unwrap();\n                } else if !is_first {\n                    write!(s, \" + \").unwrap();\n                }\n                is_first = false;\n\n                if coeff != E::Fr::one() && coeff != negone {\n                    for (i, x) in powers_of_two.iter().enumerate() {\n                        if x == &coeff {\n                            write!(s, \"2^{} . \", i).unwrap();\n                            break;\n                        }\n                    }\n\n                    write!(s, \"{} . \", coeff).unwrap();\n                }\n\n                match var.0.get_unchecked() {\n                    Index::Input(i) => {\n                        write!(s, \"`{}`\", &self.inputs[i].1).unwrap();\n                    }\n                    Index::Aux(i) => {\n                        write!(s, \"`{}`\", &self.aux[i].1).unwrap();\n                    }\n                }\n            }\n            if is_first {\n                // Nothing was visited, print 0.\n                write!(s, \"0\").unwrap();\n            }\n            write!(s, \")\").unwrap();\n        };\n\n        for &(ref _a, ref _b, ref _c, ref name) in &self.constraints {\n            writeln!(&mut s).unwrap();\n\n            write!(&mut s, \"{}\", name).unwrap();\n            // TODO: we are removing this for now\n            // pp(&mut s, a);\n            // write!(&mut s, \" * \").unwrap();\n            // pp(&mut s, b);\n            // write!(&mut s, \" = \").unwrap();\n            // pp(&mut s, c);\n        }\n\n        writeln!(&mut s).unwrap();\n\n        s\n    }\n\n    pub fn hash(&self) -> String {\n        let mut h = Blake2s::new();\n        {\n            let mut buf = [0u8; 24];\n\n            BigEndian::write_u64(&mut buf[0..8], self.inputs.len() as u64);\n            BigEndian::write_u64(&mut buf[8..16], self.aux.len() as u64);\n            BigEndian::write_u64(&mut buf[16..24], self.constraints.len() as u64);\n            h.input(&buf);\n        }\n\n        for constraint in &self.constraints {\n            hash_lc::<E>(constraint.0.as_ref(), &mut h);\n            hash_lc::<E>(constraint.1.as_ref(), &mut h);\n            hash_lc::<E>(constraint.2.as_ref(), &mut h);\n        }\n\n        let mut s = String::new();\n        for b in h.result().as_ref() {\n            s += &format!(\"{:02x}\", b);\n        }\n\n        s\n    }\n\n    pub fn which_is_unsatisfied(&self) -> Option<&str> {\n        for &(ref a, ref b, ref c, ref path) in &self.constraints {\n            let mut a = eval_lc::<E>(a.as_ref(), &self.inputs, &self.aux);\n            let b = eval_lc::<E>(b.as_ref(), &self.inputs, &self.aux);\n            let c = eval_lc::<E>(c.as_ref(), &self.inputs, &self.aux);\n\n            a.mul_assign(&b);\n\n            if a != c {\n                return Some(&*path);\n            }\n        }\n\n        None\n    }\n\n    pub fn is_satisfied(&self) -> bool {\n        self.which_is_unsatisfied().is_none()\n    }\n\n    pub fn num_constraints(&self) -> usize {\n        self.constraints.len()\n    }\n\n    pub fn set(&mut self, path: &str, to: E::Fr) {\n        match self.named_objects.get(path) {\n            Some(&NamedObject::Var(ref v)) => match v.get_unchecked() {\n                Index::Input(index) => self.inputs[index].0 = to,\n                Index::Aux(index) => self.aux[index].0 = to,\n            },\n            Some(e) => panic!(\n                \"tried to set path `{}` to value, but `{:?}` already exists there.\",\n                path, e\n            ),\n            _ => panic!(\"no variable exists at path: {}\", path),\n        }\n    }\n\n    pub fn verify(&self, expected: &[E::Fr]) -> bool {\n        assert_eq!(expected.len() + 1, self.inputs.len());\n\n        for (a, b) in self.inputs.iter().skip(1).zip(expected.iter()) {\n            if &a.0 != b {\n                return false;\n            }\n        }\n\n        true\n    }\n\n    pub fn num_inputs(&self) -> usize {\n        self.inputs.len()\n    }\n\n    pub fn get_input(&mut self, index: usize, path: &str) -> E::Fr {\n        let (assignment, name) = self.inputs[index].clone();\n\n        assert_eq!(path, name);\n\n        assignment\n    }\n\n    pub fn get(&mut self, path: &str) -> E::Fr {\n        match self.named_objects.get(path) {\n            Some(&NamedObject::Var(ref v)) => match v.get_unchecked() {\n                Index::Input(index) => self.inputs[index].0,\n                Index::Aux(index) => self.aux[index].0,\n            },\n            Some(e) => panic!(\n                \"tried to get value of path `{}`, but `{:?}` exists there (not a variable)\",\n                path, e\n            ),\n            _ => panic!(\"no variable exists at path: {}\", path),\n        }\n    }\n\n    fn set_named_obj(&mut self, path: String, to: NamedObject) {\n        if self.named_objects.contains_key(&path) {\n            panic!(\"tried to create object at existing path: {}\", path);\n        }\n\n        self.named_objects.insert(path, to);\n    }\n}\n\nfn compute_path(ns: &[String], this: &str) -> String {\n    if this.chars().any(|a| a == '/') {\n        panic!(\"'/' is not allowed in names\");\n    }\n\n    let mut name = String::new();\n\n    let mut needs_separation = false;\n    for ns in ns.iter().chain(Some(this.to_string()).iter()) {\n        if needs_separation {\n            name += \"/\";\n        }\n\n        name += ns;\n        needs_separation = true;\n    }\n\n    name\n}\n\nimpl<E: Engine> ConstraintSystem<E> for TestConstraintSystem<E> {\n    type Root = Self;\n\n    fn alloc<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>\n    where\n        F: FnOnce() -> Result<E::Fr, SynthesisError>,\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n    {\n        let index = self.aux.len();\n        let path = compute_path(&self.current_namespace, &annotation().into());\n        self.aux.push((f()?, path.clone()));\n        let var = Variable::new_unchecked(Index::Aux(index));\n        self.set_named_obj(path, NamedObject::Var(var));\n\n        Ok(var)\n    }\n\n    fn alloc_input<F, A, AR>(&mut self, annotation: A, f: F) -> Result<Variable, SynthesisError>\n    where\n        F: FnOnce() -> Result<E::Fr, SynthesisError>,\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n    {\n        let index = self.inputs.len();\n        let path = compute_path(&self.current_namespace, &annotation().into());\n        self.inputs.push((f()?, path.clone()));\n        let var = Variable::new_unchecked(Index::Input(index));\n        self.set_named_obj(path, NamedObject::Var(var));\n\n        Ok(var)\n    }\n\n    fn enforce<A, AR, LA, LB, LC>(&mut self, annotation: A, a: LA, b: LB, c: LC)\n    where\n        A: FnOnce() -> AR,\n        AR: Into<String>,\n        LA: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n        LB: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n        LC: FnOnce(LinearCombination<E>) -> LinearCombination<E>,\n    {\n        let path = compute_path(&self.current_namespace, &annotation().into());\n        let index = self.constraints.len();\n        self.set_named_obj(path.clone(), NamedObject::Constraint(index));\n\n        let a = a(LinearCombination::zero());\n        let b = b(LinearCombination::zero());\n        let c = c(LinearCombination::zero());\n\n        self.constraints.push((a, b, c, path));\n    }\n\n    fn push_namespace<NR, N>(&mut self, name_fn: N)\n    where\n        NR: Into<String>,\n        N: FnOnce() -> NR,\n    {\n        let name = name_fn().into();\n        let path = compute_path(&self.current_namespace, &name);\n        self.set_named_obj(path.clone(), NamedObject::Namespace);\n        self.current_namespace.push(name);\n    }\n\n    fn pop_namespace(&mut self) {\n        assert!(self.current_namespace.pop().is_some());\n    }\n\n    fn get_root(&mut self) -> &mut Self::Root {\n        self\n    }\n}\n\n#[test]\nfn test_cs() {\n    use pairing::bls12_381::{Bls12, Fr};\n    use pairing::PrimeField;\n\n    let mut cs = TestConstraintSystem::<Bls12>::new();\n    assert!(cs.is_satisfied());\n    assert_eq!(cs.num_constraints(), 0);\n    let a = cs\n        .namespace(|| \"a\")\n        .alloc(|| \"var\", || Ok(Fr::from_str(\"10\").unwrap()))\n        .unwrap();\n    let b = cs\n        .namespace(|| \"b\")\n        .alloc(|| \"var\", || Ok(Fr::from_str(\"4\").unwrap()))\n        .unwrap();\n    let c = cs\n        .alloc(|| \"product\", || Ok(Fr::from_str(\"40\").unwrap()))\n        .unwrap();\n\n    cs.enforce(|| \"mult\", |lc| lc + a, |lc| lc + b, |lc| lc + c);\n    assert!(cs.is_satisfied());\n    assert_eq!(cs.num_constraints(), 1);\n\n    cs.set(\"a/var\", Fr::from_str(\"4\").unwrap());\n\n    let one = TestConstraintSystem::<Bls12>::one();\n    cs.enforce(|| \"eq\", |lc| lc + a, |lc| lc + one, |lc| lc + b);\n\n    assert!(!cs.is_satisfied());\n    assert!(cs.which_is_unsatisfied() == Some(\"mult\"));\n\n    assert!(cs.get(\"product\") == Fr::from_str(\"40\").unwrap());\n\n    cs.set(\"product\", Fr::from_str(\"16\").unwrap());\n    assert!(cs.is_satisfied());\n\n    {\n        let mut cs = cs.namespace(|| \"test1\");\n        let mut cs = cs.namespace(|| \"test2\");\n        cs.alloc(|| \"hehe\", || Ok(Fr::one())).unwrap();\n    }\n\n    assert!(cs.get(\"test1/test2/hehe\") == Fr::one());\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/variables.rs",
    "content": "use bellman::{ConstraintSystem, SynthesisError};\n\nuse pairing::Engine;\nuse sapling_crypto::circuit::num::AllocatedNum;\n\n/// Root represents a root commitment which may be either a raw value or an already-allocated number.\n/// This allows subcomponents to depend on roots which may optionally be shared with their parent\n/// or sibling components.\n#[derive(Clone)]\npub enum Root<E: Engine> {\n    Var(AllocatedNum<E>),\n    Val(Option<E::Fr>),\n}\n\nimpl<E: Engine> Root<E> {\n    pub fn allocated<CS: ConstraintSystem<E>>(\n        &self,\n        cs: CS,\n    ) -> Result<AllocatedNum<E>, SynthesisError> {\n        match self {\n            Root::Var(allocated) => Ok(allocated.clone()),\n            Root::Val(Some(fr)) => AllocatedNum::alloc(cs, || Ok(*fr)),\n            Root::Val(None) => Err(SynthesisError::AssignmentMissing),\n        }\n    }\n\n    pub fn var<CS: ConstraintSystem<E>>(cs: CS, fr: E::Fr) -> Self {\n        Root::Var(AllocatedNum::alloc(cs, || Ok(fr)).unwrap())\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/vdf_post.rs",
    "content": "use bellman::{Circuit, ConstraintSystem, SynthesisError};\nuse pairing::bls12_381::{Bls12, Fr};\nuse pairing::Engine;\nuse sapling_crypto::circuit::num;\nuse sapling_crypto::jubjub::JubjubEngine;\n\nuse crate::circuit::constraint;\nuse crate::circuit::porc;\nuse crate::circuit::sloth;\nuse crate::compound_proof::{CircuitComponent, CompoundProof};\nuse crate::fr32::u32_into_fr;\nuse crate::hasher::Hasher;\nuse crate::parameter_cache::{CacheableParameters, ParameterSetIdentifier};\nuse crate::proof::ProofScheme;\nuse crate::vdf::Vdf;\nuse crate::vdf_post;\nuse crate::vdf_post::{compute_root_commitment, VDFPoSt};\n\n/// This is the `VDF-PoSt` circuit.\npub struct VDFPoStCircuit<'a, E: JubjubEngine> {\n    /// Paramters for the engine.\n    pub params: &'a E::Params,\n\n    pub challenge_seed: Option<E::Fr>,\n\n    // VDF\n    pub vdf_key: Option<E::Fr>,\n    pub vdf_ys: Vec<Option<E::Fr>>,\n    pub vdf_xs: Vec<Option<E::Fr>>,\n    pub vdf_sloth_rounds: usize,\n\n    // PoRCs\n    pub challenges_vec: Vec<Vec<usize>>,\n    pub challenged_sectors_vec: Vec<Vec<usize>>,\n    pub challenged_leafs_vec: Vec<Vec<Option<E::Fr>>>,\n    pub commitments_vec: Vec<Vec<Option<E::Fr>>>,\n    pub root_commitment: Option<E::Fr>,\n    pub paths_vec: Vec<Vec<Vec<Option<(E::Fr, bool)>>>>,\n}\n\npub struct VDFPostCompound {}\n\nimpl<E: JubjubEngine, C: Circuit<E>, P: ParameterSetIdentifier> CacheableParameters<E, C, P>\n    for VDFPostCompound\n{\n    fn cache_prefix() -> String {\n        String::from(\"vdf-post\")\n    }\n}\n\n#[derive(Clone, Default)]\npub struct ComponentPrivateInputs {}\n\nimpl<'a, E: JubjubEngine> CircuitComponent for VDFPoStCircuit<'a, E> {\n    type ComponentPrivateInputs = ComponentPrivateInputs;\n}\n\nimpl<'a, H, V> CompoundProof<'a, Bls12, VDFPoSt<H, V>, VDFPoStCircuit<'a, Bls12>>\n    for VDFPostCompound\nwhere\n    H: 'a + Hasher,\n    V: Vdf<H::Domain>,\n    <V as Vdf<H::Domain>>::PublicParams: Send + Sync,\n    <V as Vdf<H::Domain>>::Proof: Send + Sync,\n    V: Sync + Send,\n{\n    fn generate_public_inputs(\n        pub_in: &<VDFPoSt<H, V> as ProofScheme<'a>>::PublicInputs,\n        _pub_params: &<VDFPoSt<H, V> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Vec<Fr> {\n        let mut inputs: Vec<Fr> = Vec::new();\n        inputs.push(pub_in.challenge_seed.into());\n        inputs.push(compute_root_commitment(&pub_in.commitments).into());\n        inputs\n    }\n    fn circuit(\n        pub_in: &<VDFPoSt<H, V> as ProofScheme<'a>>::PublicInputs,\n        _component_private_inputs:<VDFPoStCircuit<'a, Bls12> as CircuitComponent>::ComponentPrivateInputs,\n        vanilla_proof: &<VDFPoSt<H, V> as ProofScheme<'a>>::Proof,\n        pub_params: &<VDFPoSt<H, V> as ProofScheme<'a>>::PublicParams,\n        engine_params: &'a <Bls12 as JubjubEngine>::Params,\n    ) -> VDFPoStCircuit<'a, Bls12> {\n        let vdf_ys = vanilla_proof\n            .ys\n            .iter()\n            .map(|y| Some(y.clone().into()))\n            .collect::<Vec<_>>();\n\n        let vdf_xs = vanilla_proof\n            .porep_proofs\n            .iter()\n            .take(vdf_ys.len())\n            .map(|p| Some(vdf_post::extract_vdf_input(p).into()))\n            .collect();\n\n        let mut paths_vec = Vec::new();\n        let mut challenged_leafs_vec = Vec::new();\n        let mut commitments_vec = Vec::new();\n\n        for porep_proof in &vanilla_proof.porep_proofs {\n            // -- paths\n            paths_vec.push(\n                porep_proof\n                    .paths()\n                    .iter()\n                    .map(|p| {\n                        p.iter()\n                            .map(|v| Some((v.0.into(), v.1)))\n                            .collect::<Vec<_>>()\n                    })\n                    .collect::<Vec<_>>(),\n            );\n\n            // -- challenged leafs\n            challenged_leafs_vec.push(\n                porep_proof\n                    .leafs()\n                    .iter()\n                    .map(|l| Some((**l).into()))\n                    .collect::<Vec<_>>(),\n            );\n\n            // -- commitments\n            commitments_vec.push(\n                porep_proof\n                    .commitments()\n                    .iter()\n                    .map(|c| Some((**c).into()))\n                    .collect::<Vec<_>>(),\n            );\n        }\n\n        VDFPoStCircuit {\n            params: engine_params,\n            challenges_vec: vanilla_proof.challenges.clone(),\n            challenged_sectors_vec: vanilla_proof.challenged_sectors.clone(),\n            challenge_seed: Some(pub_in.challenge_seed.into()),\n            vdf_key: Some(V::key(&pub_params.pub_params_vdf).into()),\n            vdf_ys,\n            vdf_xs,\n            vdf_sloth_rounds: V::rounds(&pub_params.pub_params_vdf),\n            challenged_leafs_vec,\n            root_commitment: Some(compute_root_commitment(&pub_in.commitments).into()),\n            commitments_vec,\n            paths_vec,\n        }\n    }\n}\n\nimpl<'a, E: JubjubEngine> Circuit<E> for VDFPoStCircuit<'a, E> {\n    fn synthesize<CS: ConstraintSystem<E>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let params = self.params;\n        let vdf_key = self.vdf_key;\n        let vdf_ys = self.vdf_ys.clone();\n        let vdf_xs = self.vdf_xs.clone();\n        let vdf_sloth_rounds = self.vdf_sloth_rounds;\n        let challenges_vec = self.challenges_vec.clone();\n        let challenged_sectors_vec = self.challenged_sectors_vec.clone();\n        let challenged_leafs_vec = self.challenged_leafs_vec.clone();\n        let commitments_vec = self.commitments_vec.clone();\n        let paths_vec = self.paths_vec.clone();\n\n        let challenge_seed = cs.alloc_input(\n            || \"challenge_seed\",\n            || {\n                self.challenge_seed\n                    .ok_or_else(|| SynthesisError::AssignmentMissing)\n            },\n        )?;\n        cs.alloc_input(\n            || \"root_commitment\",\n            || {\n                self.root_commitment\n                    .ok_or_else(|| SynthesisError::AssignmentMissing)\n            },\n        )?;\n\n        // VDF Output Verification\n        assert_eq!(vdf_xs.len(), vdf_ys.len());\n\n        let vdf_key = num::AllocatedNum::alloc(cs.namespace(|| \"vdf_key\"), || {\n            vdf_key.ok_or_else(|| SynthesisError::AssignmentMissing)\n        })?;\n\n        for (i, (y, x)) in vdf_ys.iter().zip(vdf_xs.iter()).enumerate() {\n            {\n                // VDF Verification\n                let mut cs = cs.namespace(|| format!(\"vdf_verification_round_{}\", i));\n\n                // FIXME: make this a generic call to Vdf proof circuit function.\n                let decoded = sloth::decode(\n                    cs.namespace(|| \"sloth_decode\"),\n                    &vdf_key,\n                    *y,\n                    vdf_sloth_rounds,\n                )?;\n\n                let x_alloc = num::AllocatedNum::alloc(cs.namespace(|| \"x\"), || {\n                    x.ok_or_else(|| SynthesisError::AssignmentMissing)\n                })?;\n\n                constraint::equal(&mut cs, || \"equality\", &x_alloc, &decoded);\n\n                let partial_challenge = x;\n\n                // Challenge Verification\n                if i == 0 {\n                    verify_challenges(\n                        &mut cs,\n                        // Should be CHALLENGES, not CHALLENGED_LEAFS.\n                        challenged_leafs_vec[i]\n                            .iter()\n                            .map(|l| (*l).unwrap())\n                            .collect::<Vec<_>>(),\n                        partial_challenge,\n                        Some(challenge_seed), // First iteration uses supplied challenge seed.\n                        paths_vec[i][0].len(),\n                    );\n                } else {\n                    verify_challenges(\n                        &mut cs,\n                        challenged_leafs_vec[i]\n                            .iter()\n                            .map(|l| (*l).unwrap())\n                            .collect::<Vec<_>>(),\n                        partial_challenge,\n                        *y, // Subsequent iterations use computed Vdf result\n                        paths_vec[i][0].len(),\n                    );\n                }\n            }\n\n            // TODO: VDF Input Verification\n            // Verify that proof leaves hash to next vdf input.\n\n            // TODO: Root Commitment verification.\n            // Skip for now, but this is an absence that needs to be addressed once we have a vector commitment strategy.\n        }\n\n        // PoRC Verification\n        assert_eq!(challenged_leafs_vec.len(), commitments_vec.len());\n        assert_eq!(paths_vec.len(), commitments_vec.len());\n\n        for (i, (challenged_leafs, (commitments, paths))) in challenged_leafs_vec\n            .iter()\n            .zip(commitments_vec.iter().zip(paths_vec.iter()))\n            .enumerate()\n        {\n            let mut cs = cs.namespace(|| format!(\"porc_verification_round_{}\", i));\n            porc::PoRCCircuit::synthesize(\n                &mut cs,\n                params,\n                challenges_vec[i]\n                    .iter()\n                    .map(|c| Some(u32_into_fr::<E>(*c as u32)))\n                    .collect(),\n                challenged_sectors_vec[i].clone(),\n                challenged_leafs.to_vec(),\n                commitments.to_vec(),\n                paths.to_vec(),\n            )?;\n        }\n\n        Ok(())\n    }\n}\n\nfn verify_challenges<E: Engine, CS: ConstraintSystem<E>, T>(\n    _cs: &mut CS,\n    _challenges: Vec<E::Fr>,\n    _partial_challenge: &Option<E::Fr>,\n    // This is generic because it needs to work with a public input (challenge seed) on first iteration\n    // then an allocated number subsequently.\n    _mix: T,\n    _challenge_bits: usize,\n) -> bool {\n    // TODO: Actually verify that challenges are correctly derived.\n    // Verification algorithm is implemented and tested in vdf_post::verify_final_challenge_derivation.\n    // NOTE: verification as designed here requires that all challenges (N) extractable from one partial_challenge\n    // are used. If challenge_count is not a multiple of this N, the surplus challenges will still be needed for verification,\n    // even if unused.\n    true\n}\n\nimpl<'a, E: JubjubEngine> VDFPoStCircuit<'a, E> {\n    pub fn synthesize<CS: ConstraintSystem<E>>(\n        cs: &mut CS,\n        params: &E::Params,\n        challenge_seed: Option<E::Fr>,\n        vdf_key: Option<E::Fr>,\n        vdf_ys: Vec<Option<E::Fr>>,\n        vdf_xs: Vec<Option<E::Fr>>,\n        vdf_sloth_rounds: usize,\n        challenges_vec: Vec<Vec<usize>>,\n        challenged_sectors_vec: Vec<Vec<usize>>,\n        challenged_leafs_vec: Vec<Vec<Option<E::Fr>>>,\n        root_commitment: Option<E::Fr>,\n        commitments_vec: Vec<Vec<Option<E::Fr>>>,\n        paths_vec: Vec<Vec<Vec<Option<(E::Fr, bool)>>>>,\n    ) -> Result<(), SynthesisError> {\n        VDFPoStCircuit {\n            params,\n            challenges_vec,\n            challenged_sectors_vec,\n            challenge_seed,\n            vdf_key,\n            vdf_ys,\n            vdf_xs,\n            vdf_sloth_rounds,\n            challenged_leafs_vec,\n            root_commitment,\n            commitments_vec,\n            paths_vec,\n        }\n        .synthesize(cs)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use pairing::Field;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n    use sapling_crypto::jubjub::JubjubBls12;\n\n    use crate::circuit::test::*;\n    use crate::compound_proof;\n    use crate::drgraph::{new_seed, BucketGraph, Graph};\n    use crate::fr32::fr_into_bytes;\n    use crate::hasher::pedersen::*;\n    use crate::proof::ProofScheme;\n    use crate::vdf_post;\n    use crate::vdf_sloth;\n\n    #[test]\n    fn test_vdf_post_circuit_with_bls12_381() {\n        let params = &JubjubBls12::new();\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let lambda = 32;\n\n        let sp = vdf_post::SetupParams::<PedersenDomain, vdf_sloth::Sloth> {\n            challenge_count: 10,\n            sector_size: 1024 * lambda,\n            post_epochs: 3,\n            setup_params_vdf: vdf_sloth::SetupParams {\n                key: rng.gen(),\n                rounds: 1,\n            },\n            sectors_count: 2,\n        };\n\n        let pub_params = vdf_post::VDFPoSt::<PedersenHasher, vdf_sloth::Sloth>::setup(&sp).unwrap();\n\n        let data0: Vec<u8> = (0..1024)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n        let data1: Vec<u8> = (0..1024)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n\n        let graph0 = BucketGraph::<PedersenHasher>::new(1024, 5, 0, new_seed());\n        let tree0 = graph0.merkle_tree(data0.as_slice()).unwrap();\n        let graph1 = BucketGraph::<PedersenHasher>::new(1024, 5, 0, new_seed());\n        let tree1 = graph1.merkle_tree(data1.as_slice()).unwrap();\n\n        let pub_inputs = vdf_post::PublicInputs {\n            challenge_seed: rng.gen(),\n            commitments: vec![tree0.root(), tree1.root()],\n        };\n\n        let trees = [&tree0, &tree1];\n        let priv_inputs = vdf_post::PrivateInputs::new(&trees[..]);\n\n        let proof = vdf_post::VDFPoSt::<PedersenHasher, vdf_sloth::Sloth>::prove(\n            &pub_params,\n            &pub_inputs,\n            &priv_inputs,\n        )\n        .unwrap();\n\n        assert!(\n            vdf_post::VDFPoSt::<PedersenHasher, vdf_sloth::Sloth>::verify(\n                &pub_params,\n                &pub_inputs,\n                &proof\n            )\n            .unwrap()\n        );\n\n        // actual circuit test\n\n        let vdf_ys = proof\n            .ys\n            .iter()\n            .map(|y| Some(y.clone().into()))\n            .collect::<Vec<_>>();\n        let vdf_xs = proof\n            .porep_proofs\n            .iter()\n            .take(vdf_ys.len())\n            .map(|p| Some(vdf_post::extract_vdf_input::<PedersenHasher>(p).into()))\n            .collect();\n\n        let mut paths_vec = Vec::new();\n        let mut challenged_leafs_vec = Vec::new();\n        let mut commitments_vec = Vec::new();\n\n        for porep_proof in &proof.porep_proofs {\n            // -- paths\n            paths_vec.push(\n                porep_proof\n                    .paths()\n                    .iter()\n                    .map(|p| {\n                        p.iter()\n                            .map(|v| Some((v.0.into(), v.1)))\n                            .collect::<Vec<_>>()\n                    })\n                    .collect::<Vec<_>>(),\n            );\n\n            // -- challenged leafs\n            challenged_leafs_vec.push(\n                porep_proof\n                    .leafs()\n                    .iter()\n                    .map(|l| Some((**l).into()))\n                    .collect::<Vec<_>>(),\n            );\n\n            // -- commitments\n            commitments_vec.push(\n                porep_proof\n                    .commitments()\n                    .iter()\n                    .map(|c| Some((**c).into()))\n                    .collect::<Vec<_>>(),\n            );\n        }\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n\n        let instance = VDFPoStCircuit {\n            params,\n            challenges_vec: proof.challenges,\n            challenged_sectors_vec: proof.challenged_sectors,\n            challenge_seed: Some(pub_inputs.challenge_seed.into()),\n            vdf_key: Some(pub_params.pub_params_vdf.key.into()),\n            vdf_xs,\n            vdf_ys,\n            vdf_sloth_rounds: pub_params.pub_params_vdf.rounds,\n            challenged_leafs_vec,\n            paths_vec,\n            root_commitment: Some(compute_root_commitment(&pub_inputs.commitments).into()),\n            commitments_vec,\n        };\n\n        instance\n            .synthesize(&mut cs)\n            .expect(\"failed to synthesize circuit\");\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n        assert_eq!(cs.num_inputs(), 3, \"wrong number of inputs\");\n        assert_eq!(cs.num_constraints(), 276450, \"wrong number of constraints\");\n        assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_vdf_post_compound() {\n        let params = &JubjubBls12::new();\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let lambda = 32;\n\n        let setup_params = compound_proof::SetupParams {\n            vanilla_params: &vdf_post::SetupParams::<PedersenDomain, vdf_sloth::Sloth> {\n                challenge_count: 3,\n                sector_size: 1024 * lambda,\n                post_epochs: 3,\n                setup_params_vdf: vdf_sloth::SetupParams {\n                    key: rng.gen(),\n                    rounds: 1,\n                },\n                sectors_count: 2,\n            },\n            engine_params: params,\n            partitions: None,\n        };\n\n        let pub_params: compound_proof::PublicParams<\n            _,\n            vdf_post::VDFPoSt<PedersenHasher, vdf_sloth::Sloth>,\n        > = VDFPostCompound::setup(&setup_params).expect(\"setup failed\");\n\n        let data0: Vec<u8> = (0..1024)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n        let data1: Vec<u8> = (0..1024)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n\n        let graph0 = BucketGraph::<PedersenHasher>::new(1024, 5, 0, new_seed());\n        let tree0 = graph0.merkle_tree(data0.as_slice()).unwrap();\n        let graph1 = BucketGraph::<PedersenHasher>::new(1024, 5, 0, new_seed());\n        let tree1 = graph1.merkle_tree(data1.as_slice()).unwrap();\n\n        let pub_inputs = vdf_post::PublicInputs {\n            challenge_seed: rng.gen(),\n            commitments: vec![tree0.root(), tree1.root()],\n        };\n\n        let trees = [&tree0, &tree1];\n        let priv_inputs = //: vdf_post::PrivateInputs<PedersenHasher> =\n            vdf_post::PrivateInputs::<PedersenHasher>::new(&trees[..]);\n\n        // Without the commented section below, this test doesn't do much.\n        // However, the test cannot pass until generate_public_inputs is implemented.\n        // That is currently blocked on a clearer sense of how the circuit should behave.\n\n        let proof = VDFPostCompound::prove(&pub_params, &pub_inputs, &priv_inputs, None)\n            .expect(\"failed while proving\");\n\n        let (circuit, inputs) =\n            VDFPostCompound::circuit_for_test(&pub_params, &pub_inputs, &priv_inputs);\n\n        let mut cs = TestConstraintSystem::new();\n\n        let _ = circuit.synthesize(&mut cs);\n        assert!(cs.is_satisfied());\n        assert!(cs.verify(&inputs));\n\n        let verified = VDFPostCompound::verify(&pub_params, &pub_inputs, &proof)\n            .expect(\"failed while verifying\");\n\n        assert!(verified);\n    }\n\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/xor.rs",
    "content": "use bellman::{ConstraintSystem, SynthesisError};\nuse sapling_crypto::circuit::boolean::Boolean;\nuse sapling_crypto::jubjub::JubjubEngine;\n\npub fn xor<E, CS>(\n    cs: &mut CS,\n    key: &[Boolean],\n    input: &[Boolean],\n) -> Result<Vec<Boolean>, SynthesisError>\nwhere\n    E: JubjubEngine,\n    CS: ConstraintSystem<E>,\n{\n    let key_len = key.len();\n    assert_eq!(key_len, 32 * 8);\n\n    input\n        .iter()\n        .enumerate()\n        .map(|(i, byte)| {\n            Boolean::xor(\n                cs.namespace(|| format!(\"xor bit: {}\", i)),\n                byte,\n                &key[i % key_len],\n            )\n        })\n        .collect::<Result<Vec<_>, SynthesisError>>()\n}\n\n#[cfg(test)]\nmod tests {\n    use super::xor;\n    use crate::circuit::test::TestConstraintSystem;\n    use crate::crypto;\n    use crate::util::{bits_to_bytes, bytes_into_boolean_vec};\n    use bellman::ConstraintSystem;\n    use pairing::bls12_381::Bls12;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n    use sapling_crypto::circuit::boolean::Boolean;\n\n    #[test]\n    fn test_xor_input_circut() {\n        let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        for i in 0..10 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let key: Vec<u8> = (0..32).map(|_| rng.gen()).collect();\n            let data: Vec<u8> = (0..(i + 1) * 32).map(|_| rng.gen()).collect();\n\n            let key_bits: Vec<Boolean> = {\n                let mut cs = cs.namespace(|| \"key\");\n                bytes_into_boolean_vec(&mut cs, Some(key.as_slice()), key.len()).unwrap()\n            };\n\n            let data_bits: Vec<Boolean> = {\n                let mut cs = cs.namespace(|| \"data bits\");\n                bytes_into_boolean_vec(&mut cs, Some(data.as_slice()), data.len()).unwrap()\n            };\n\n            let out_bits = xor(&mut cs, key_bits.as_slice(), data_bits.as_slice()).unwrap();\n\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n            assert_eq!(out_bits.len(), data_bits.len(), \"invalid output length\");\n\n            // convert Vec<Boolean> to Vec<u8>\n            let actual = bits_to_bytes(\n                out_bits\n                    .iter()\n                    .map(|v| v.get_value().unwrap())\n                    .collect::<Vec<bool>>()\n                    .as_slice(),\n            );\n\n            let expected = crypto::xor::encode(key.as_slice(), data.as_slice()).unwrap();\n\n            assert_eq!(expected, actual, \"circuit and non circuit do not match\");\n\n            // -- roundtrip\n            let roundtrip_bits = {\n                let mut cs = cs.namespace(|| \"roundtrip\");\n                xor(&mut cs, key_bits.as_slice(), out_bits.as_slice()).unwrap()\n            };\n\n            let roundtrip = bits_to_bytes(\n                roundtrip_bits\n                    .iter()\n                    .map(|v| v.get_value().unwrap())\n                    .collect::<Vec<bool>>()\n                    .as_slice(),\n            );\n\n            assert_eq!(data, roundtrip, \"failed to roundtrip\");\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/circuit/zigzag.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellman::{Circuit, ConstraintSystem, SynthesisError};\nuse pairing::bls12_381::{Bls12, Fr};\nuse sapling_crypto::circuit::num;\nuse sapling_crypto::jubjub::JubjubEngine;\n\nuse crate::circuit::constraint;\nuse crate::circuit::drgporep::{ComponentPrivateInputs, DrgPoRepCompound};\nuse crate::circuit::pedersen::pedersen_md_no_padding;\nuse crate::circuit::variables::Root;\nuse crate::compound_proof::{CircuitComponent, CompoundProof};\nuse crate::drgporep::{self, DrgPoRep};\nuse crate::drgraph::{graph_height, Graph};\nuse crate::hasher::{Domain, Hasher};\nuse crate::layered_drgporep::{self, Layers as LayersTrait};\nuse crate::parameter_cache::{CacheableParameters, ParameterSetIdentifier};\nuse crate::porep;\nuse crate::proof::ProofScheme;\nuse crate::util::bytes_into_boolean_vec;\nuse crate::zigzag_drgporep::ZigZagDrgPoRep;\n\ntype Layers<'a, H, G> = Vec<(\n    <DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicInputs,\n    Option<<DrgPoRep<'a, H, G> as ProofScheme<'a>>::Proof>,\n)>;\n\n/// ZigZag DRG based Proof of Replication.\n///\n/// # Fields\n///\n/// * `params` - parameters for the curve\n/// * `public_params` - ZigZagDrgPoRep public parameters.\n/// * 'layers' - A vector of Layers – each representing a DrgPoRep proof (see Layers type definition).\n///\npub struct ZigZagCircuit<'a, E: JubjubEngine, H: 'static + Hasher> {\n    params: &'a E::Params,\n    public_params: <ZigZagDrgPoRep<'a, H> as ProofScheme<'a>>::PublicParams,\n    layers: Layers<\n        'a,\n        <ZigZagDrgPoRep<'a, H> as LayersTrait>::Hasher,\n        <ZigZagDrgPoRep<'a, H> as LayersTrait>::Graph,\n    >,\n    tau: porep::Tau<<<ZigZagDrgPoRep<'a, H> as LayersTrait>::Hasher as Hasher>::Domain>,\n    comm_r_star: H::Domain,\n    _e: PhantomData<E>,\n}\n\nimpl<'a, E: JubjubEngine, H: Hasher> CircuitComponent for ZigZagCircuit<'a, E, H> {\n    type ComponentPrivateInputs = ();\n}\n\nimpl<'a, H: Hasher> ZigZagCircuit<'a, Bls12, H> {\n    pub fn synthesize<CS>(\n        mut cs: CS,\n        params: &'a <Bls12 as JubjubEngine>::Params,\n        public_params: <ZigZagDrgPoRep<'a, H> as ProofScheme<'a>>::PublicParams,\n        layers: Layers<\n            'a,\n            <ZigZagDrgPoRep<H> as LayersTrait>::Hasher,\n            <ZigZagDrgPoRep<H> as LayersTrait>::Graph,\n        >,\n        tau: porep::Tau<<<ZigZagDrgPoRep<H> as LayersTrait>::Hasher as Hasher>::Domain>,\n        comm_r_star: H::Domain,\n    ) -> Result<(), SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>,\n    {\n        let circuit = ZigZagCircuit::<'a, Bls12, H> {\n            params,\n            public_params,\n            layers,\n            tau,\n            comm_r_star,\n            _e: PhantomData,\n        };\n\n        circuit.synthesize(&mut cs)\n    }\n}\n\nimpl<'a, H: Hasher> Circuit<Bls12> for ZigZagCircuit<'a, Bls12, H> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let graph = self.public_params.drg_porep_public_params.graph.clone();\n        let mut crs_input = vec![0u8; 32 * (self.layers.len() + 1)];\n\n        self.layers[0]\n            .0\n            .replica_id\n            .write_bytes(&mut crs_input[0..32])\n            .expect(\"failed to write vec\");\n\n        let public_comm_d_raw = self.tau.comm_d;\n\n        let public_comm_d =\n            num::AllocatedNum::alloc(cs.namespace(|| \"public comm_d value\"), || {\n                Ok(public_comm_d_raw.into())\n            })?;\n\n        public_comm_d.inputize(cs.namespace(|| \"zigzag comm_d\"))?;\n\n        let public_comm_r =\n            num::AllocatedNum::alloc(cs.namespace(|| \"public comm_r value\"), || {\n                Ok(self.tau.comm_r.into())\n            })?;\n\n        public_comm_r.inputize(cs.namespace(|| \"zigzag comm_r\"))?;\n\n        // Yuck. This will never be used, but we need an initial value to satisfy the compiler.\n        let mut previous_comm_r_var = Root::Val(Some(public_comm_d_raw.into()));\n\n        for (l, (public_inputs, layer_proof)) in self.layers.iter().enumerate() {\n            let first_layer = l == 0;\n            let last_layer = l == self.layers.len() - 1;\n\n            let height = graph_height(graph.size());\n            let proof = match layer_proof {\n                Some(wrapped_proof) => {\n                    let typed_proof: drgporep::Proof<<ZigZagDrgPoRep<H> as LayersTrait>::Hasher> =\n                        wrapped_proof.into();\n                    typed_proof\n                }\n                // Synthesize a default drgporep if none is supplied – for use in tests, etc.\n                None => drgporep::Proof::new_empty(height, graph.degree()),\n            };\n\n            let comm_r = proof.replica_root;\n\n            let comm_d_var = if first_layer {\n                Root::Var(public_comm_d.clone())\n            } else {\n                previous_comm_r_var\n            };\n\n            let comm_r_var = if last_layer {\n                Root::Var(public_comm_r.clone())\n            } else {\n                Root::var(\n                    &mut cs.namespace(|| format!(\"layer {} comm_r\", l)),\n                    comm_r.into(),\n                )\n            };\n            previous_comm_r_var = comm_r_var.clone();\n\n            comm_r\n                .write_bytes(&mut crs_input[(l + 1) * 32..(l + 2) * 32])\n                .expect(\"failed to write vec\");\n\n            // TODO: As an optimization, we may be able to skip proving the original data\n            // on some (50%?) of challenges.\n            let circuit = DrgPoRepCompound::circuit(\n                public_inputs,\n                ComponentPrivateInputs {\n                    comm_d: Some(comm_d_var),\n                    comm_r: Some(comm_r_var),\n                },\n                &proof,\n                &self.public_params.drg_porep_public_params,\n                self.params,\n            );\n            circuit.synthesize(&mut cs.namespace(|| format!(\"zigzag layer {}\", l)))?;\n        }\n\n        let crs_boolean = bytes_into_boolean_vec(\n            cs.namespace(|| \"comm_r_star boolean\"),\n            Some(&crs_input),\n            8 * crs_input.len(),\n        )?;\n\n        let computed_comm_r_star =\n            pedersen_md_no_padding(cs.namespace(|| \"comm_r_star\"), self.params, &crs_boolean)?;\n\n        let public_comm_r_star =\n            num::AllocatedNum::alloc(cs.namespace(|| \"public comm_r_star value\"), || {\n                Ok(self.comm_r_star.into())\n            })?;\n\n        constraint::equal(\n            cs,\n            || \"enforce comm_r_star is correct\",\n            &computed_comm_r_star,\n            &public_comm_r_star,\n        );\n\n        public_comm_r_star.inputize(cs.namespace(|| \"zigzag comm_r_star\"))?;\n\n        Ok(())\n    }\n}\n\n#[allow(dead_code)]\npub struct ZigZagCompound {\n    partitions: Option<usize>,\n}\n\nimpl<E: JubjubEngine, C: Circuit<E>, P: ParameterSetIdentifier> CacheableParameters<E, C, P>\n    for ZigZagCompound\n{\n    fn cache_prefix() -> String {\n        String::from(\"zigzag-proof-of-replication\")\n    }\n}\n\nimpl<'a, H: 'static + Hasher>\n    CompoundProof<'a, Bls12, ZigZagDrgPoRep<'a, H>, ZigZagCircuit<'a, Bls12, H>>\n    for ZigZagCompound\n{\n    fn generate_public_inputs(\n        pub_in: &<ZigZagDrgPoRep<H> as ProofScheme>::PublicInputs,\n        pub_params: &<ZigZagDrgPoRep<H> as ProofScheme>::PublicParams,\n        k: Option<usize>,\n    ) -> Vec<Fr> {\n        let mut inputs = Vec::new();\n\n        let mut drgporep_pub_params = drgporep::PublicParams::new(\n            pub_params.drg_porep_public_params.graph.clone(),\n            pub_params.drg_porep_public_params.sloth_iter,\n        );\n\n        let comm_d = pub_in.tau.unwrap().comm_d.into();\n        inputs.push(comm_d);\n\n        let comm_r = pub_in.tau.unwrap().comm_r.into();\n        inputs.push(comm_r);\n\n        for i in 0..pub_params.layers {\n            let drgporep_pub_inputs = drgporep::PublicInputs {\n                replica_id: pub_in.replica_id,\n                challenges: pub_in.challenges(\n                    pub_params.drg_porep_public_params.graph.size(),\n                    i as u8,\n                    k,\n                ),\n                tau: None,\n            };\n            let drgporep_inputs = DrgPoRepCompound::generate_public_inputs(\n                &drgporep_pub_inputs,\n                &drgporep_pub_params,\n                None,\n            );\n            inputs.extend(drgporep_inputs);\n\n            drgporep_pub_params = <ZigZagDrgPoRep<H> as layered_drgporep::Layers>::transform(\n                &drgporep_pub_params,\n                i,\n                pub_params.layers,\n            );\n        }\n        inputs.push(pub_in.comm_r_star.into());\n        inputs\n    }\n\n    fn circuit<'b>(\n        public_inputs: &'b <ZigZagDrgPoRep<H> as ProofScheme>::PublicInputs,\n        _component_private_inputs: <ZigZagCircuit<'a, Bls12, H> as CircuitComponent>::ComponentPrivateInputs,\n        vanilla_proof: &'b <ZigZagDrgPoRep<H> as ProofScheme>::Proof,\n        public_params: &'b <ZigZagDrgPoRep<H> as ProofScheme>::PublicParams,\n        engine_params: &'a <Bls12 as JubjubEngine>::Params,\n    ) -> ZigZagCircuit<'a, Bls12, H> {\n        let layers = (0..(vanilla_proof.encoding_proofs.len()))\n            .map(|l| {\n                let layer_public_inputs = drgporep::PublicInputs {\n                    replica_id: public_inputs.replica_id,\n                    // Challenges are not used in circuit synthesis. Don't bother generating.\n                    challenges: vec![],\n                    tau: None,\n                };\n                let layer_proof = vanilla_proof.encoding_proofs[l].clone();\n                (layer_public_inputs, Some(layer_proof))\n            })\n            .collect();\n\n        let pp: <ZigZagDrgPoRep<H> as ProofScheme>::PublicParams = public_params.into();\n\n        ZigZagCircuit {\n            params: engine_params,\n            public_params: pp,\n            tau: public_inputs.tau.unwrap(),\n            comm_r_star: public_inputs.comm_r_star,\n            layers,\n            _e: PhantomData,\n        }\n    }\n\n    fn blank_circuit(\n        public_params: &<ZigZagDrgPoRep<H> as ProofScheme>::PublicParams,\n        engine_params: &'a <Bls12 as JubjubEngine>::Params,\n    ) -> ZigZagCircuit<'a, Bls12, H> {\n        use rand::{Rng, SeedableRng, XorShiftRng};\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n        let replica_id = rng.gen();\n\n        let layers = (0..public_params.layers)\n            .map(|_| {\n                let layer_public_inputs = drgporep::PublicInputs {\n                    replica_id,\n                    // Challenges are not used in circuit synthesis. Don't bother generating.\n                    challenges: vec![],\n                    tau: None,\n                };\n                (layer_public_inputs, None)\n            })\n            .collect();\n\n        let pp: <ZigZagDrgPoRep<H> as ProofScheme>::PublicParams = public_params.into();\n\n        ZigZagCircuit {\n            params: engine_params,\n            public_params: pp,\n            tau: porep::Tau {\n                comm_r: rng.gen(),\n                comm_d: rng.gen(),\n            },\n            comm_r_star: rng.gen(),\n            layers,\n            _e: PhantomData,\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::circuit::test::*;\n    use crate::compound_proof;\n    use crate::drgporep;\n    use crate::drgraph::new_seed;\n    use crate::fr32::fr_into_bytes;\n    use crate::hasher::pedersen::*;\n    use crate::layered_drgporep;\n    use crate::porep::PoRep;\n    use crate::proof::ProofScheme;\n    use crate::zigzag_graph::{ZigZag, ZigZagGraph};\n    use pairing::Field;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n    use sapling_crypto::jubjub::JubjubBls12;\n\n    #[test]\n    fn zigzag_drgporep_input_circuit_with_bls12_381() {\n        let params = &JubjubBls12::new();\n        let nodes = 5;\n        let degree = 1;\n        let expansion_degree = 2;\n        let challenge_count = 1;\n        let num_layers = 2;\n        let sloth_iter = 1;\n\n        let n = nodes; // FIXME: Consolidate variable names.\n\n        // TODO: The code in this section was copied directly from zizag_drgporep::tests::prove_verify.\n        // We should refactor to share the code – ideally in such a way that we can just add\n        // methods and get the assembled tests for free.\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let replica_id: Fr = rng.gen();\n        let data: Vec<u8> = (0..n)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n        // create a copy, so we can compare roundtrips\n        let mut data_copy = data.clone();\n        let sp = layered_drgporep::SetupParams {\n            drg_porep_setup_params: drgporep::SetupParams {\n                drg: drgporep::DrgParams {\n                    nodes: n,\n                    degree,\n                    expansion_degree,\n                    seed: new_seed(),\n                },\n                sloth_iter,\n            },\n            layers: num_layers,\n            challenge_count,\n        };\n\n        let pp = ZigZagDrgPoRep::setup(&sp).unwrap();\n        let (tau, aux) =\n            ZigZagDrgPoRep::replicate(&pp, &replica_id.into(), data_copy.as_mut_slice(), None)\n                .unwrap();\n        assert_ne!(data, data_copy);\n\n        let simplified_tau = tau.clone().simplify();\n\n        let pub_inputs = layered_drgporep::PublicInputs::<PedersenDomain> {\n            replica_id: replica_id.into(),\n            challenge_count,\n            tau: Some(tau.simplify().into()),\n            comm_r_star: tau.comm_r_star.into(),\n            k: None,\n        };\n\n        let priv_inputs = layered_drgporep::PrivateInputs::<PedersenHasher> {\n            replica: data.as_slice(),\n            aux: aux.into(),\n            tau: tau.layer_taus.into(),\n        };\n\n        let proofs =\n            ZigZagDrgPoRep::prove_all_partitions(&pp, &pub_inputs, &priv_inputs, 1).unwrap();\n        assert!(ZigZagDrgPoRep::verify_all_partitions(&pp, &pub_inputs, &proofs).unwrap());\n\n        // End copied section.\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n\n        ZigZagCompound::circuit(\n            &pub_inputs,\n            <ZigZagCircuit<Bls12, PedersenHasher> as CircuitComponent>::ComponentPrivateInputs::default(),\n            &proofs[0],\n            &pp,\n            params,\n        )\n        .synthesize(&mut cs.namespace(|| \"zigzag drgporep\"))\n        .expect(\"failed to synthesize circuit\");\n\n        if !cs.is_satisfied() {\n            println!(\n                \"failed to satisfy: {:?}\",\n                cs.which_is_unsatisfied().unwrap()\n            );\n        }\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n        assert_eq!(cs.num_inputs(), 16, \"wrong number of inputs\");\n        assert_eq!(cs.num_constraints(), 131097, \"wrong number of constraints\");\n\n        assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n\n        assert_eq!(\n            cs.get_input(1, \"zigzag drgporep/zigzag comm_d/input variable\"),\n            simplified_tau.comm_d.into(),\n        );\n\n        assert_eq!(\n            cs.get_input(2, \"zigzag drgporep/zigzag comm_r/input variable\"),\n            simplified_tau.comm_r.into(),\n        );\n\n        assert_eq!(\n            cs.get_input(3, \"zigzag drgporep/zigzag layer 0/replica_id/input 0\"),\n            replica_id.into(),\n        );\n\n        // This test was modeled on equivalent from drgporep circuit.\n        // TODO: add add assertions about other inputs.\n    }\n\n    #[test]\n    fn zigzag_input_circuit_num_constraints() {\n        let params = &JubjubBls12::new();\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        // 1 GB\n        let n = (1 << 30) / 32;\n        let num_layers = 2;\n        let base_degree = 2;\n        let expansion_degree = 2;\n        let replica_id: Fr = rng.gen();\n        let challenge_count = 1;\n        let challenge = 1;\n        let sloth_iter = 2;\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        let layers = (0..num_layers)\n            .map(|_l| {\n                // l is ignored because we assume uniform layers here.\n                let public_inputs = drgporep::PublicInputs {\n                    replica_id: replica_id.into(),\n                    challenges: vec![challenge],\n                    tau: None,\n                };\n                let proof = None;\n                (public_inputs, proof)\n            })\n            .collect();\n\n        let public_params = layered_drgporep::PublicParams {\n            drg_porep_public_params: drgporep::PublicParams::new(\n                ZigZagGraph::new_zigzag(n, base_degree, expansion_degree, new_seed()),\n                sloth_iter,\n            ),\n            layers: num_layers,\n            challenge_count,\n        };\n\n        ZigZagCircuit::<Bls12, PedersenHasher>::synthesize(\n            cs.namespace(|| \"zigzag_drgporep\"),\n            params,\n            public_params,\n            layers,\n            porep::Tau {\n                comm_r: rng.gen(),\n                comm_d: rng.gen(),\n            },\n            rng.gen(),\n        )\n        .expect(\"failed to synthesize circuit\");\n\n        assert_eq!(cs.num_inputs(), 18, \"wrong number of inputs\");\n        assert_eq!(cs.num_constraints(), 547539, \"wrong number of constraints\");\n    }\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn zigzag_test_compound() {\n        let params = &JubjubBls12::new();\n        let nodes = 5;\n        let degree = 2;\n        let expansion_degree = 1;\n        let challenge_count = 2;\n        let num_layers = 2;\n        let sloth_iter = 1;\n        let partition_count = 1;\n\n        let n = nodes; // FIXME: Consolidate variable names.\n\n        // TODO: The code in this section was copied directly from zizag_drgporep::tests::prove_verify.\n        // We should refactor to share the code – ideally in such a way that we can just add\n        // methods and get the assembled tests for free.\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let replica_id: Fr = rng.gen();\n        let data: Vec<u8> = (0..n)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n        // create a copy, so we can compare roundtrips\n        let mut data_copy = data.clone();\n\n        let setup_params = compound_proof::SetupParams {\n            engine_params: params,\n            vanilla_params: &layered_drgporep::SetupParams {\n                drg_porep_setup_params: drgporep::SetupParams {\n                    drg: drgporep::DrgParams {\n                        nodes: n,\n                        degree,\n                        expansion_degree,\n                        seed: new_seed(),\n                    },\n                    sloth_iter,\n                },\n                layers: num_layers,\n                challenge_count,\n            },\n            partitions: Some(partition_count),\n        };\n\n        let public_params = ZigZagCompound::setup(&setup_params).unwrap();\n        let (tau, aux) = ZigZagDrgPoRep::replicate(\n            &public_params.vanilla_params,\n            &replica_id.into(),\n            data_copy.as_mut_slice(),\n            None,\n        )\n        .unwrap();\n        assert_ne!(data, data_copy);\n\n        let public_inputs = layered_drgporep::PublicInputs::<PedersenDomain> {\n            replica_id: replica_id.into(),\n            challenge_count,\n            tau: Some(tau.simplify()),\n            comm_r_star: tau.comm_r_star,\n            k: None,\n        };\n        let private_inputs = layered_drgporep::PrivateInputs::<PedersenHasher> {\n            replica: data.as_slice(),\n            aux,\n            tau: tau.layer_taus,\n        };\n\n        // TOOD: Move this to e.g. circuit::test::compound_helper and share between all compound proofs.\n        {\n            let (circuit, inputs) =\n                ZigZagCompound::circuit_for_test(&public_params, &public_inputs, &private_inputs);\n\n            let mut cs = TestConstraintSystem::new();\n\n            let _ = circuit.synthesize(&mut cs);\n\n            assert!(cs.is_satisfied(), \"TestContraintSystem was not satisfied\");\n            assert!(\n                cs.verify(&inputs),\n                \"verification failed with TestContraintSystem and generated inputs\"\n            );\n        }\n\n        let proof = ZigZagCompound::prove(&public_params, &public_inputs, &private_inputs, None)\n            .expect(\"failed while proving\");\n\n        let verified = ZigZagCompound::verify(&public_params, &public_inputs, &proof)\n            .expect(\"failed while verifying\");\n\n        assert!(verified);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/compound_proof.rs",
    "content": "use rayon::prelude::*;\n\nuse crate::circuit::multi_proof::MultiProof;\nuse crate::error::Result;\nuse crate::parameter_cache::{CacheableParameters, ParameterSetIdentifier};\nuse crate::partitions;\nuse crate::proof::ProofScheme;\nuse bellman::{groth16, Circuit};\nuse rand::{SeedableRng, XorShiftRng};\nuse sapling_crypto::jubjub::JubjubEngine;\n\npub struct SetupParams<'a, 'b: 'a, E: JubjubEngine, S: ProofScheme<'a>>\nwhere\n    <S as ProofScheme<'a>>::SetupParams: 'b,\n    E::Params: Sync,\n{\n    pub vanilla_params: &'b <S as ProofScheme<'a>>::SetupParams,\n    pub engine_params: &'a E::Params,\n    pub partitions: Option<usize>,\n}\n\n#[derive(Clone)]\npub struct PublicParams<'a, E: JubjubEngine, S: ProofScheme<'a>> {\n    pub vanilla_params: S::PublicParams,\n    pub engine_params: &'a E::Params,\n    pub partitions: Option<usize>,\n}\n\n/// CircuitComponent exists so parent components can pass private inputs to their subcomponents\n/// when calling CompoundProof::circuit directly. In general, there are no internal private inputs,\n/// and a default value will be passed. CompoundProof::circuit implementations should exhibit\n/// default behavior when passed a default ComponentPrivateinputs.\npub trait CircuitComponent {\n    type ComponentPrivateInputs: Default + Clone;\n}\n\n/// The CompoundProof trait bundles a proof::ProofScheme and a bellman::Circuit together.\n/// It provides methods equivalent to those provided by proof::ProofScheme (setup, prove, verify).\n/// See documentation at proof::ProofScheme for details.\n/// Implementations should generally only need to supply circuit and generate_public_inputs.\n/// The remaining trait methods are used internally and implement the necessary plumbing.\npub trait CompoundProof<'a, E: JubjubEngine, S: ProofScheme<'a>, C: Circuit<E> + CircuitComponent>\nwhere\n    S::Proof: Sync + Send,\n    S::PublicParams: ParameterSetIdentifier + Sync + Send,\n    S::PublicInputs: Clone + Sync,\n    Self: CacheableParameters<E, C, S::PublicParams>,\n{\n    // setup is equivalent to ProofScheme::setup.\n    fn setup<'b>(sp: &SetupParams<'a, 'b, E, S>) -> Result<PublicParams<'a, E, S>>\n    where\n        E::Params: Sync,\n    {\n        Ok(PublicParams {\n            vanilla_params: S::setup(sp.vanilla_params)?,\n            engine_params: sp.engine_params,\n            partitions: sp.partitions,\n        })\n    }\n\n    fn partition_count(public_params: &PublicParams<'a, E, S>) -> usize {\n        match public_params.partitions {\n            None => 1,\n            Some(0) => panic!(\"cannot specify zero partitions\"),\n            Some(k) => k,\n        }\n    }\n\n    /// prove is equivalent to ProofScheme::prove.\n    fn prove<'b>(\n        pub_params: &'b PublicParams<'a, E, S>,\n        pub_in: &'b S::PublicInputs,\n        priv_in: &'b S::PrivateInputs,\n        groth_params: Option<groth16::Parameters<E>>,\n    ) -> Result<MultiProof<E>>\n    where\n        E::Params: Sync,\n    {\n        let partitions = Self::partition_count(pub_params);\n        let partition_count = Self::partition_count(pub_params);\n\n        let vanilla_proofs =\n            S::prove_all_partitions(&pub_params.vanilla_params, &pub_in, priv_in, partitions)?;\n\n        // This will always run at least once, since there cannot be zero partitions.\n        assert!(partition_count > 0);\n\n        // If groth_params is None, generate once and share with each thread.\n        let actual_groth_params = match groth_params {\n            None => {\n                // TODO: eventually, don't generate random params here at all.\n                let rng =\n                    &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n                let circuit = Self::circuit(\n                    &pub_in,\n                    C::ComponentPrivateInputs::default(),\n                    // Use first vanilla proof to get circuit exemplar for parameter generation.\n                    &vanilla_proofs[0],\n                    &pub_params.vanilla_params,\n                    &pub_params.engine_params,\n                );\n                Self::get_groth_params(circuit, &pub_params.vanilla_params, rng)?\n            }\n            Some(gp) => gp,\n        };\n\n        let groth_proofs: Result<Vec<_>> = vanilla_proofs\n            .par_iter()\n            .map(|vanilla_proof| {\n                Self::circuit_proof(\n                    pub_in,\n                    &vanilla_proof,\n                    &pub_params.vanilla_params,\n                    &pub_params.engine_params,\n                    &actual_groth_params,\n                )\n            })\n            .collect();\n\n        Ok(MultiProof::new(groth_proofs?, actual_groth_params.clone()))\n    }\n\n    // verify is equivalent to ProofScheme::verify.\n    fn verify(\n        public_params: &PublicParams<'a, E, S>,\n        public_inputs: &S::PublicInputs,\n        multi_proof: &MultiProof<E>,\n    ) -> Result<bool> {\n        let vanilla_public_params = &public_params.vanilla_params;\n        let pvk = groth16::prepare_verifying_key(&multi_proof.groth_params.vk);\n        if multi_proof.circuit_proofs.len() != Self::partition_count(public_params) {\n            return Ok(false);\n        }\n        for (k, circuit_proof) in multi_proof.circuit_proofs.iter().enumerate() {\n            let inputs =\n                Self::generate_public_inputs(public_inputs, vanilla_public_params, Some(k));\n\n            if !groth16::verify_proof(&pvk, &circuit_proof, inputs.as_slice())? {\n                return Ok(false);\n            }\n        }\n        Ok(true)\n    }\n\n    /// circuit_proof creates and synthesizes a circuit from concrete params/inputs, then generates a\n    /// groth proof from it. It returns a groth proof.\n    /// circuit_proof is used internally and should neither be called nor implemented outside of\n    /// default trait methods.\n    fn circuit_proof<'b>(\n        pub_in: &S::PublicInputs,\n        vanilla_proof: &S::Proof,\n        pub_params: &'b S::PublicParams,\n        params: &'a E::Params,\n        groth_params: &groth16::Parameters<E>,\n    ) -> Result<groth16::Proof<E>> {\n        // TODO: eventually, don't generate 'random proof' here at all.\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        // We need to make the circuit repeatedly because we can't clone it.\n        // Fortunately, doing so is cheap.\n        let make_circuit = || {\n            Self::circuit(\n                &pub_in,\n                C::ComponentPrivateInputs::default(),\n                &vanilla_proof,\n                &pub_params,\n                &params,\n            )\n        };\n\n        let groth_proof = groth16::create_random_proof(make_circuit(), groth_params, rng)?;\n\n        let mut proof_vec = vec![];\n        groth_proof.write(&mut proof_vec)?;\n        let gp = groth16::Proof::<E>::read(&proof_vec[..])?;\n\n        Ok(gp)\n    }\n\n    /// generate_public_inputs generates public inputs suitable for use as input during verification\n    /// of a proof generated from this CompoundProof's bellman::Circuit (C). These inputs correspond\n    /// to those allocated when C is synthesized.\n    fn generate_public_inputs(\n        pub_in: &S::PublicInputs,\n        pub_params: &S::PublicParams,\n        partition_k: Option<usize>,\n    ) -> Vec<E::Fr>;\n\n    /// circuit constructs an instance of this CompoundProof's bellman::Circuit.\n    /// circuit takes PublicInputs, PublicParams, and Proof from this CompoundProof's proof::ProofScheme (S)\n    /// and uses them to initialize Circuit fields which will be used to construct public and private\n    /// inputs during circuit synthesis.\n    fn circuit(\n        public_inputs: &S::PublicInputs,\n        component_private_inputs: C::ComponentPrivateInputs,\n        vanilla_proof: &S::Proof,\n        public_param: &S::PublicParams,\n        engine_params: &'a E::Params,\n    ) -> C;\n\n    fn blank_circuit(_public_param: &S::PublicParams, _engine_params: &'a E::Params) -> C {\n        unimplemented!();\n    }\n\n    fn circuit_for_test(\n        public_parameters: &PublicParams<'a, E, S>,\n        public_inputs: &S::PublicInputs,\n        private_inputs: &S::PrivateInputs,\n    ) -> (C, Vec<E::Fr>) {\n        let vanilla_params = &public_parameters.vanilla_params;\n        let partition_count = partitions::partition_count(public_parameters.partitions);\n        let vanilla_proofs = S::prove_all_partitions(\n            vanilla_params,\n            public_inputs,\n            private_inputs,\n            partition_count,\n        )\n        .unwrap();\n        assert_eq!(vanilla_proofs.len(), partition_count);\n\n        assert!(\n            S::verify_all_partitions(vanilla_params, &public_inputs, &vanilla_proofs).unwrap(),\n            \"vanilla proof didn't verify\"\n        );\n\n        // Some(0) because we only return a circuit and inputs for the first partition.\n        // It would be more thorough to return all, though just checking one is probably\n        // fine for verifying circuit construction.\n        let partition_pub_in = S::with_partition(public_inputs.clone(), Some(0));\n        let inputs = Self::generate_public_inputs(&partition_pub_in, vanilla_params, Some(0));\n\n        let circuit = Self::circuit(\n            &partition_pub_in,\n            C::ComponentPrivateInputs::default(),\n            &vanilla_proofs[0],\n            vanilla_params,\n            &public_parameters.engine_params,\n        );\n\n        (circuit, inputs)\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/crypto/aes.rs",
    "content": "use crate::error::Result;\nuse aes::block_cipher_trait::generic_array::GenericArray;\nuse aes::Aes256;\nuse block_modes::block_padding::ZeroPadding;\nuse block_modes::{BlockMode, BlockModeIv, Cbc};\n\npub fn encode(key: &[u8], plaintext: &[u8]) -> Result<Vec<u8>> {\n    assert_eq!(key.len(), 32, \"invalid key length\");\n\n    let iv = GenericArray::from_slice(&[0u8; 16]);\n    let mut mode = Cbc::<Aes256, ZeroPadding>::new_varkey(key, iv).expect(\"invalid key\");\n\n    let mut ciphertext = plaintext.to_vec();\n    mode.encrypt_nopad(&mut ciphertext)\n        .expect(\"failed to encrypt\");\n\n    Ok(ciphertext)\n}\n\npub fn decode(key: &[u8], ciphertext: &[u8]) -> Result<Vec<u8>> {\n    assert_eq!(key.len(), 32, \"invalid key length\");\n    let iv = GenericArray::from_slice(&[0u8; 16]);\n\n    let mut mode = Cbc::<Aes256, ZeroPadding>::new_varkey(key, iv).expect(\"invalid key\");\n\n    let mut plaintext = ciphertext.to_vec();\n    mode.decrypt_nopad(&mut plaintext)\n        .expect(\"failed to decrypt\");\n\n    Ok(plaintext)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n\n    #[test]\n    fn test_aes() {\n        let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        for i in 0..10 {\n            let key: Vec<u8> = (0..32).map(|_| rng.gen()).collect();\n            let plaintext: Vec<u8> = (0..(i + 1) * 32).map(|_| rng.gen()).collect();\n\n            let ciphertext = encode(key.as_slice(), plaintext.as_slice()).unwrap();\n\n            assert_ne!(\n                plaintext, ciphertext,\n                \"plaintext and ciphertext are identical\"\n            );\n            assert_eq!(plaintext.len(), ciphertext.len());\n\n            let roundtrip = decode(key.as_slice(), ciphertext.as_slice()).unwrap();\n            assert_eq!(plaintext, roundtrip, \"failed to roundtrip\");\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/crypto/blake2s.rs",
    "content": "use blake2::{Blake2s, Digest};\n\npub fn blake2s(data: &[u8]) -> Vec<u8> {\n    Blake2s::digest(data).to_vec()\n}\n"
  },
  {
    "path": "storage-proofs/src/crypto/feistel.rs",
    "content": "use blake2::{Blake2s, Digest};\n\npub const FEISTEL_ROUNDS: usize = 3;\npub type FeistelPrecomputed = (u32, u32, u32);\n\npub fn precompute(num_elements: u32) -> FeistelPrecomputed {\n    let mut next_pow4 = 4;\n    let mut log4 = 1;\n\n    while next_pow4 < num_elements {\n        next_pow4 *= 4;\n        log4 += 1;\n    }\n\n    let left_mask = ((1 << log4) - 1) << log4;\n    let right_mask = (1 << log4) - 1;\n    let half_bits = log4;\n\n    (left_mask, right_mask, half_bits)\n}\n\npub fn permute(\n    num_elements: u32,\n    index: u32,\n    keys: &[u32],\n    precomputed: FeistelPrecomputed,\n) -> u32 {\n    let mut u = encode(index, keys, precomputed);\n\n    while u >= num_elements {\n        u = encode(u, keys, precomputed)\n    }\n    u\n}\n\npub fn invert_permute(\n    num_elements: u32,\n    index: u32,\n    keys: &[u32],\n    precomputed: FeistelPrecomputed,\n) -> u32 {\n    let mut u = decode(index, keys, precomputed);\n\n    while u >= num_elements {\n        u = decode(u, keys, precomputed);\n    }\n    u\n}\n\n/// common_setup performs common calculations on inputs shared by encode and decode.\nfn common_setup(index: u32, precomputed: FeistelPrecomputed) -> (u32, u32, u32, u32) {\n    let (left_mask, right_mask, half_bits) = precomputed;\n\n    let left = (index & left_mask) >> half_bits;\n    let right = index & right_mask;\n\n    (left, right, right_mask, half_bits)\n}\n\nfn encode(index: u32, keys: &[u32], precomputed: FeistelPrecomputed) -> u32 {\n    let (mut left, mut right, right_mask, half_bits) = common_setup(index, precomputed);\n\n    for key in keys.iter().take(FEISTEL_ROUNDS) {\n        let (l, r) = (right, left ^ feistel(right, *key, right_mask));\n        left = l;\n        right = r;\n    }\n\n    (left << half_bits) | right\n}\n\nfn decode(index: u32, keys: &[u32], precomputed: FeistelPrecomputed) -> u32 {\n    let (mut left, mut right, right_mask, half_bits) = common_setup(index, precomputed);\n\n    for i in (0..FEISTEL_ROUNDS).rev() {\n        let (l, r) = ((right ^ feistel(left, keys[i], right_mask)), left);\n        left = l;\n        right = r;\n    }\n\n    (left << half_bits) | right\n}\n\nfn feistel(right: u32, key: u32, right_mask: u32) -> u32 {\n    let mut data: [u8; 8] = [0; 8];\n    data[0] = (right >> 24) as u8;\n    data[1] = (right >> 16) as u8;\n    data[2] = (right >> 8) as u8;\n    data[3] = right as u8;\n\n    data[4] = (key >> 24) as u8;\n    data[5] = (key >> 16) as u8;\n    data[6] = (key >> 8) as u8;\n    data[7] = key as u8;\n\n    let hash = Blake2s::digest(&data);\n\n    let r = u32::from(hash[0]) << 24\n        | u32::from(hash[1]) << 16\n        | u32::from(hash[2]) << 8\n        | u32::from(hash[3]);\n\n    r & right_mask\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    // Some sample n-values which are not powers of four and also don't coincidentally happen to\n    // encode/decode correctly.\n    const BAD_NS: &[u32] = &[5, 6, 8, 12, 17];\n\n    fn encode_decode(n: u32, expect_success: bool) {\n        let mut failed = false;\n        let precomputed = precompute(n);\n        for i in 0..n {\n            let p = encode(i, &[1, 2, 3, 4], precomputed);\n            let v = decode(p, &[1, 2, 3, 4], precomputed);\n            let equal = i == v;\n            let in_range = p <= n;\n            if expect_success {\n                assert!(equal, \"failed to permute (n = {})\", n);\n                assert!(in_range, \"output number is too big (n = {})\", n);\n            } else {\n                if !equal || !in_range {\n                    failed = true;\n                }\n            }\n        }\n        if !expect_success {\n            assert!(failed, \"expected failure (n = {})\", n);\n        }\n    }\n\n    #[test]\n    fn test_feistel_power_of_4() {\n        // Our implementation is guaranteed to produce a permutation when input size (number of elements)\n        // is a power of our.\n        let mut n = 1;\n\n        // Powers of 4 always succeed.\n        for _ in 0..4 {\n            n *= 4;\n            encode_decode(n, true);\n        }\n\n        // Some non-power-of 4 also succeed, but here is a selection of examples values showing\n        // that this is not guaranteed.\n        for i in BAD_NS.iter() {\n            encode_decode(*i, false);\n        }\n    }\n\n    #[test]\n    fn test_feistel_on_arbitrary_set() {\n        for n in BAD_NS.iter() {\n            let precomputed = precompute(*n as u32);\n            for i in 0..*n {\n                let p = permute(*n, i, &[1, 2, 3, 4], precomputed);\n                let v = invert_permute(*n, p, &[1, 2, 3, 4], precomputed);\n                // Since every element in the set is reversibly mapped to another element also in the set,\n                // this is indeed a permutation.\n                assert_eq!(i, v, \"failed to permute\");\n                assert!(p <= *n, \"output number is too big\");\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/crypto/kdf.rs",
    "content": "use pairing::bls12_381::Fr;\nuse pairing::Engine;\n\nuse crate::hasher::{Blake2sHasher, Hasher};\n\n/// Key derivation function, based on pedersen hashing.\npub fn kdf<E: Engine>(data: &[u8], m: usize) -> Fr {\n    Blake2sHasher::kdf(&data, m).into()\n}\n\n#[cfg(test)]\nmod tests {\n    use super::kdf;\n    use crate::fr32::bytes_into_fr;\n    use pairing::bls12_381::Bls12;\n\n    #[test]\n    fn kdf_valid_block_len() {\n        let m = 1;\n        let size = 32 * (1 + m);\n\n        let data = vec![1u8; size];\n        let expected = bytes_into_fr::<Bls12>(\n            &mut vec![\n                220, 60, 76, 126, 119, 247, 67, 162, 98, 94, 119, 28, 247, 18, 71, 208, 167, 72,\n                33, 85, 59, 56, 96, 13, 9, 67, 49, 109, 95, 246, 152, 63,\n            ]\n            .as_slice(),\n        )\n        .unwrap();\n\n        let res = kdf::<Bls12>(&data, m);\n        assert_eq!(res, expected);\n    }\n\n    #[test]\n    #[should_panic]\n    fn kdf_invalid_block_len() {\n        let data = vec![2u8; 1234];\n\n        kdf::<Bls12>(&data, 44);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/crypto/mod.rs",
    "content": "pub mod aes;\npub mod blake2s;\npub mod feistel;\npub mod kdf;\npub mod pedersen;\npub mod sloth;\npub mod xor;\n"
  },
  {
    "path": "storage-proofs/src/crypto/pedersen.rs",
    "content": "use pairing::bls12_381::{Bls12, Fr, FrRepr};\nuse pairing::PrimeFieldRepr;\nuse sapling_crypto::jubjub::JubjubBls12;\nuse sapling_crypto::pedersen_hash::{pedersen_hash, Personalization};\n\nuse crate::fr32::bytes_into_frs;\n\nuse bitvec::{self, BitVec};\n\nlazy_static! {\n    pub static ref JJ_PARAMS: JubjubBls12 = JubjubBls12::new();\n}\n\npub const PEDERSEN_BLOCK_SIZE: usize = 256;\npub const PEDERSEN_BLOCK_BYTES: usize = PEDERSEN_BLOCK_SIZE / 8;\n\npub fn pedersen(data: &[u8]) -> Fr {\n    pedersen_hash::<Bls12, _>(\n        Personalization::NoteCommitment,\n        BitVec::<bitvec::LittleEndian, u8>::from(data)\n            .iter()\n            .take(data.len() * 8),\n        &JJ_PARAMS,\n    )\n    .into_xy()\n    .0\n}\n\n/// Pedersen hashing for inputs that have length mulitple of the block size `256`. Based on pedersen hashes and a Merkle-Damgard construction.\npub fn pedersen_md_no_padding(data: &[u8]) -> Fr {\n    assert!(\n        data.len() >= 2 * PEDERSEN_BLOCK_BYTES,\n        \"must be at least 2 block sizes long, got {}bits\",\n        data.len()\n    );\n    assert_eq!(\n        data.len() % PEDERSEN_BLOCK_BYTES,\n        0,\n        \"input must be a multiple of the blocksize\"\n    );\n    let mut chunks = data.chunks(PEDERSEN_BLOCK_BYTES);\n    let mut cur = Vec::with_capacity(2 * PEDERSEN_BLOCK_BYTES);\n    cur.resize(PEDERSEN_BLOCK_BYTES, 0);\n    cur[0..PEDERSEN_BLOCK_BYTES].copy_from_slice(chunks.nth(0).unwrap());\n\n    for block in chunks {\n        cur.resize(2 * PEDERSEN_BLOCK_BYTES, 0);\n        cur[PEDERSEN_BLOCK_BYTES..].copy_from_slice(block);\n        pedersen_compression(&mut cur);\n    }\n\n    let frs = bytes_into_frs::<Bls12>(&cur[0..PEDERSEN_BLOCK_BYTES])\n        .expect(\"pedersen must generate valid fr elements\");\n    assert_eq!(frs.len(), 1);\n    frs[0]\n}\n\npub fn pedersen_compression(bytes: &mut Vec<u8>) {\n    let bits = BitVec::<bitvec::LittleEndian, u8>::from(&bytes[..]);\n    let (x, _) = pedersen_hash::<Bls12, _>(\n        Personalization::NoteCommitment,\n        bits.iter().take(bytes.len() * 8),\n        &JJ_PARAMS,\n    )\n    .into_xy();\n    let x: FrRepr = x.into();\n\n    bytes.truncate(0);\n    x.write_le(bytes).expect(\"failed to write result hash\");\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::util::bytes_into_bits;\n    use pairing::bls12_381::Fr;\n    use pairing::Field;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n    #[test]\n    fn test_bit_vec_le() {\n        let bytes = b\"ABC\";\n        let bits = bytes_into_bits(bytes);\n\n        let mut bits2 = bitvec![LittleEndian, u8; 0; bits.len()];\n        bits2.as_mut()[0..bytes.len()].copy_from_slice(&bytes[..]);\n\n        assert_eq!(bits, bits2.iter().collect::<Vec<bool>>());\n    }\n\n    #[test]\n    fn test_pedersen_compression() {\n        let bytes = b\"some bytes\";\n        let mut data = vec![0; bytes.len()];\n        data.copy_from_slice(&bytes[..]);\n        pedersen_compression(&mut data);\n        let expected = vec![\n            213, 235, 66, 156, 7, 85, 177, 39, 249, 31, 160, 247, 29, 106, 36, 46, 225, 71, 116,\n            23, 1, 89, 82, 149, 45, 189, 27, 189, 144, 98, 23, 98,\n        ];\n        assert_eq!(expected, data);\n    }\n\n    #[test]\n    fn test_pedersen_md_no_padding() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        for i in 2..5 {\n            let x: Vec<u8> = (0..i * 32).map(|_| rng.gen()).collect();\n            let hashed = pedersen_md_no_padding(x.as_slice());\n            assert_ne!(hashed, Fr::zero());\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/crypto/sloth.rs",
    "content": "use pairing::{Engine, Field};\n\npub const DEFAULT_ROUNDS: usize = 1;\n\n/// The `v` constant for sloth.\n/// This is the same as in `Fr::from_str(\"20974350070050476191779096203274386335076221000211055129041463479975432473805\").unwrap().into_repr()`.\n///\n/// (v * 5) % (r - 1) = 1\n/// where r is the modulus of bls12_381::Fr.\n/// r = 52435875175126190479447740508185965837690552500527637822603658699938581184513\n///\n///  See ::tests::const_identities\nconst SLOTH_V: [u64; 4] = [\n    3_689_348_813_023_923_405,\n    2_413_663_763_415_232_921,\n    16_233_882_818_423_549_954,\n    3_341_406_743_785_779_740,\n];\n\n/// The number five, as an array so we can use it in `pow`.\nconst FIVE: [u64; 1] = [5];\n\n/// Sloth based encoding.\npub fn encode<E: Engine>(key: &E::Fr, plaintext: &E::Fr, rounds: usize) -> E::Fr {\n    let mut ciphertext = *plaintext;\n\n    if rounds == 0 {\n        ciphertext.add_assign(key); // c + k\n    };\n\n    for _ in 0..rounds {\n        ciphertext.add_assign(key); // c + k\n        ciphertext = ciphertext.pow(&SLOTH_V); // (c + k)^v\n    }\n\n    ciphertext\n}\n\n/// Sloth based decoding.\npub fn decode<E: Engine>(key: &E::Fr, ciphertext: &E::Fr, rounds: usize) -> E::Fr {\n    let mut plaintext = *ciphertext;\n\n    for _ in 0..rounds {\n        plaintext = plaintext.pow(&FIVE); // c^5\n        plaintext.sub_assign(key); // c^5 - k\n    }\n\n    if rounds == 0 {\n        plaintext.sub_assign(key); // c^5 - k\n    }\n\n    plaintext\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use num_bigint::BigUint;\n    use pairing::bls12_381::{Bls12, Fr, FrRepr};\n    use pairing::PrimeField;\n    use std::str::FromStr;\n\n    // the modulus from `bls12_381::Fr`\n    // The definition of MODULUS and comment defining r come from pairing/src/bls_12_381/fr.rs.\n    // r = 52435875175126190479447740508185965837690552500527637822603658699938581184513\n    const MODULUS: [u64; 4] = [\n        0xffffffff00000001,\n        0x53bda402fffe5bfe,\n        0x3339d80809a1d805,\n        0x73eda753299d7d48,\n    ];\n\n    const MODULUS_STR: &str =\n        &\"52435875175126190479447740508185965837690552500527637822603658699938581184513\";\n\n    const V_STR: &str =\n        &\"20974350070050476191779096203274386335076221000211055129041463479975432473805\";\n\n    #[test]\n    fn const_identities() {\n        let sloth_v = Fr::from_str(V_STR).unwrap();\n        assert_eq!(sloth_v, Fr::from_repr(FrRepr(SLOTH_V)).unwrap());\n\n        let v = BigUint::from_str(V_STR).unwrap();\n\n        let r = BigUint::from_str(MODULUS_STR).unwrap();\n\n        let one = BigUint::from(1u32);\n        let five = BigUint::from(5u32);\n\n        assert_eq!((v * five) % (r - &one), one);\n    }\n\n    #[test]\n    fn sloth_bls_12() {\n        sloth_bls_12_aux(0);\n        sloth_bls_12_aux(10);\n    }\n\n    fn sloth_bls_12_aux(rounds: usize) {\n        let key = Fr::from_str(\"11111111\").unwrap();\n        let plaintext = Fr::from_str(\"123456789\").unwrap();\n        let ciphertext = encode::<Bls12>(&key, &plaintext, rounds);\n        let decrypted = decode::<Bls12>(&key, &ciphertext, rounds);\n        assert_eq!(plaintext, decrypted);\n        assert_ne!(plaintext, ciphertext);\n    }\n\n    #[test]\n    fn sloth_bls_12_fake() {\n        sloth_bls_12_fake_aux(0);\n        sloth_bls_12_fake_aux(10);\n    }\n\n    fn sloth_bls_12_fake_aux(rounds: usize) {\n        let key = Fr::from_str(\"11111111\").unwrap();\n        let key_fake = Fr::from_str(\"11111112\").unwrap();\n        let plaintext = Fr::from_str(\"123456789\").unwrap();\n        let ciphertext = encode::<Bls12>(&key, &plaintext, rounds);\n        let decrypted = decode::<Bls12>(&key_fake, &ciphertext, rounds);\n        assert_ne!(plaintext, decrypted);\n    }\n\n    prop_compose! {\n        fn arb_fr()(a in 0..MODULUS[0], b in 0..MODULUS[1], c in 0..MODULUS[2], d in 0..MODULUS[3]) -> Fr {\n            Fr::from_repr(FrRepr([a, b, c, d])).unwrap()\n        }\n    }\n    proptest! {\n        #[test]\n        fn sloth_bls_roundtrip(key in arb_fr(), plaintext in arb_fr()) {\n            let ciphertext = encode::<Bls12>(&key, &plaintext, 10);\n            assert_eq!(decode::<Bls12>(&key, &ciphertext, 10), plaintext);\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/crypto/xor.rs",
    "content": "use crate::error::Result;\n\n/// Encodes plaintext by elementwise xoring with the passed in key.\npub fn encode(key: &[u8], plaintext: &[u8]) -> Result<Vec<u8>> {\n    xor(key, plaintext)\n}\n\n/// Decodes ciphertext by elementwise xoring with the passed in key.\npub fn decode(key: &[u8], ciphertext: &[u8]) -> Result<Vec<u8>> {\n    xor(key, ciphertext)\n}\n\nfn xor(key: &[u8], input: &[u8]) -> Result<Vec<u8>> {\n    let key_len = key.len();\n    assert_eq!(key_len, 32);\n\n    Ok(input\n        .iter()\n        .enumerate()\n        .map(|(i, byte)| byte ^ key[i % key_len])\n        .collect())\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n\n    #[test]\n    fn test_xor() {\n        let mut rng = XorShiftRng::from_seed([0x5dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        for i in 0..10 {\n            let key: Vec<u8> = (0..32).map(|_| rng.gen()).collect();\n            let plaintext: Vec<u8> = (0..(i + 1) * 32).map(|_| rng.gen()).collect();\n\n            let ciphertext = encode(key.as_slice(), plaintext.as_slice()).unwrap();\n\n            assert_ne!(\n                plaintext, ciphertext,\n                \"plaintext and ciphertext are identical\"\n            );\n            assert_eq!(plaintext.len(), ciphertext.len());\n\n            let roundtrip = decode(key.as_slice(), ciphertext.as_slice()).unwrap();\n            assert_eq!(plaintext, roundtrip, \"failed to roundtrip\");\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/drgporep.rs",
    "content": "use std::marker::PhantomData;\n\nuse byteorder::{LittleEndian, WriteBytesExt};\nuse serde::de::Deserialize;\nuse serde::ser::Serialize;\n\nuse crate::drgraph::Graph;\nuse crate::error::Result;\nuse crate::hasher::{Domain, Hasher};\nuse crate::merkle::{MerkleProof, MerkleTree};\nuse crate::parameter_cache::ParameterSetIdentifier;\nuse crate::porep::{self, PoRep};\nuse crate::proof::ProofScheme;\nuse crate::vde::{self, decode_block, decode_domain_block};\n\n#[derive(Debug, Clone)]\npub struct PublicInputs<T: Domain> {\n    pub replica_id: T,\n    pub challenges: Vec<usize>,\n    pub tau: Option<porep::Tau<T>>,\n}\n\n#[derive(Debug)]\npub struct PrivateInputs<'a, H: 'a + Hasher> {\n    pub aux: &'a porep::ProverAux<H>,\n}\n\n#[derive(Debug)]\npub struct SetupParams {\n    pub drg: DrgParams,\n    pub sloth_iter: usize,\n}\n\n#[derive(Debug, Clone)]\npub struct DrgParams {\n    // Number of nodes\n    pub nodes: usize,\n\n    // Base degree of DRG\n    pub degree: usize,\n\n    pub expansion_degree: usize,\n\n    // Random seed\n    pub seed: [u32; 7],\n}\n\n#[derive(Debug, Clone)]\npub struct PublicParams<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetIdentifier,\n{\n    pub graph: G,\n    pub sloth_iter: usize,\n\n    _h: PhantomData<H>,\n}\n\nimpl<H, G> PublicParams<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetIdentifier,\n{\n    pub fn new(graph: G, sloth_iter: usize) -> Self {\n        PublicParams {\n            graph,\n            sloth_iter,\n            _h: PhantomData,\n        }\n    }\n}\n\nimpl<H, G> ParameterSetIdentifier for PublicParams<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetIdentifier,\n{\n    fn parameter_set_identifier(&self) -> String {\n        format!(\n            \"drgporep::PublicParams{{graph: {}; sloth_iter: {}}}\",\n            self.graph.parameter_set_identifier(),\n            self.sloth_iter\n        )\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct DataProof<H: Hasher> {\n    #[serde(bound(\n        serialize = \"MerkleProof<H>: Serialize\",\n        deserialize = \"MerkleProof<H>: Deserialize<'de>\"\n    ))]\n    pub proof: MerkleProof<H>,\n    pub data: H::Domain,\n}\n\nimpl<H: Hasher> DataProof<H> {\n    pub fn new(n: usize) -> Self {\n        DataProof {\n            proof: MerkleProof::new(n),\n            data: Default::default(),\n        }\n    }\n\n    pub fn serialize(&self) -> Vec<u8> {\n        let mut out = self.proof.serialize();\n        let len = out.len();\n        out.resize(len + 32, 0u8);\n        self.data.write_bytes(&mut out[len..]).unwrap();\n\n        out\n    }\n\n    /// proves_challenge returns true if this self.proof corresponds to challenge.\n    /// This is useful for verifying that a supplied proof is actually relevant to a given challenge.\n    pub fn proves_challenge(&self, challenge: usize) -> bool {\n        let mut c = challenge;\n        for (_, is_right) in self.proof.path().iter() {\n            if ((c & 1) == 1) ^ is_right {\n                return false;\n            };\n            c >>= 1;\n        }\n        true\n    }\n}\n\npub type ReplicaParents<H> = Vec<(usize, DataProof<H>)>;\n\n#[derive(Default, Debug, Clone, Serialize, Deserialize)]\npub struct Proof<H: Hasher> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    pub data_root: H::Domain,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    pub replica_root: H::Domain,\n    #[serde(bound(\n        serialize = \"DataProof<H>: Serialize\",\n        deserialize = \"DataProof<H>: Deserialize<'de>\"\n    ))]\n    pub replica_nodes: Vec<DataProof<H>>,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    pub replica_parents: Vec<ReplicaParents<H>>,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    pub nodes: Vec<DataProof<H>>,\n}\n\nimpl<H: Hasher> Proof<H> {\n    // FIXME: should we also take a number of challenges here and construct\n    // vectors of that length?\n    pub fn new_empty(height: usize, degree: usize) -> Proof<H> {\n        Proof {\n            data_root: Default::default(),\n            replica_root: Default::default(),\n            replica_nodes: vec![DataProof::new(height)],\n            replica_parents: vec![vec![(0, DataProof::new(height)); degree]],\n            nodes: vec![DataProof::new(height)],\n        }\n    }\n    pub fn serialize(&self) -> Vec<u8> {\n        let res: Vec<_> = (0..self.nodes.len())\n            .map(|i| {\n                vec![\n                    self.replica_nodes[i].serialize(),\n                    self.replica_parents[i]\n                        .iter()\n                        .fold(Vec::new(), |mut acc, (s, p)| {\n                            let mut v = vec![0u8; 4];\n                            v.write_u32::<LittleEndian>(*s as u32).unwrap();\n                            acc.extend(v);\n                            acc.extend(p.serialize());\n                            acc\n                        }),\n                    self.nodes[i].serialize(),\n                ]\n                .concat()\n            })\n            .collect::<Vec<Vec<u8>>>()\n            .concat();\n\n        res\n    }\n\n    pub fn new(\n        replica_nodes: Vec<DataProof<H>>,\n        replica_parents: Vec<ReplicaParents<H>>,\n        nodes: Vec<DataProof<H>>,\n    ) -> Proof<H> {\n        Proof {\n            data_root: *nodes[0].proof.root(),\n            replica_root: *replica_nodes[0].proof.root(),\n            replica_nodes,\n            replica_parents,\n            nodes,\n        }\n    }\n}\n\nimpl<'a, H: Hasher> From<&'a Proof<H>> for Proof<H> {\n    fn from(p: &Proof<H>) -> Proof<H> {\n        Proof {\n            data_root: *p.nodes[0].proof.root(),\n            replica_root: *p.replica_nodes[0].proof.root(),\n            replica_nodes: p.replica_nodes.clone(),\n            replica_parents: p.replica_parents.clone(),\n            nodes: p.nodes.clone(),\n        }\n    }\n}\n\n#[derive(Default)]\npub struct DrgPoRep<'a, H, G>\nwhere\n    H: 'a + Hasher,\n    G: 'a + Graph<H>,\n{\n    _h: PhantomData<&'a H>,\n    _g: PhantomData<G>,\n}\n\nimpl<'a, H, G> ProofScheme<'a> for DrgPoRep<'a, H, G>\nwhere\n    H: 'a + Hasher,\n    G: 'a + Graph<H> + ParameterSetIdentifier,\n{\n    type PublicParams = PublicParams<H, G>;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<H::Domain>;\n    type PrivateInputs = PrivateInputs<'a, H>;\n    type Proof = Proof<H>;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        let graph = G::new(\n            sp.drg.nodes,\n            sp.drg.degree,\n            sp.drg.expansion_degree,\n            sp.drg.seed,\n        );\n\n        Ok(PublicParams::new(graph, sp.sloth_iter))\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        let len = pub_inputs.challenges.len();\n\n        let mut replica_nodes = Vec::with_capacity(len);\n        let mut replica_parents = Vec::with_capacity(len);\n        let mut data_nodes: Vec<DataProof<H>> = Vec::with_capacity(len);\n\n        for i in 0..len {\n            let challenge = pub_inputs.challenges[i] % pub_params.graph.size();\n            assert_ne!(challenge, 0, \"cannot prove the first node\");\n\n            let tree_d = &priv_inputs.aux.tree_d;\n            let tree_r = &priv_inputs.aux.tree_r;\n            let domain_replica = tree_r.as_slice();\n\n            let data = domain_replica[challenge];\n\n            replica_nodes.push(DataProof {\n                proof: MerkleProof::new_from_proof(&tree_r.gen_proof(challenge)),\n                data,\n            });\n\n            let parents = pub_params.graph.parents(challenge);\n            let mut replica_parentsi = Vec::with_capacity(parents.len());\n\n            for p in parents {\n                replica_parentsi.push((p, {\n                    let proof = tree_r.gen_proof(p);\n                    DataProof {\n                        proof: MerkleProof::new_from_proof(&proof),\n                        data: domain_replica[p],\n                    }\n                }));\n            }\n\n            replica_parents.push(replica_parentsi);\n\n            let node_proof = tree_d.gen_proof(challenge);\n\n            {\n                // TODO: use this again, I can't make lifetimes work though atm and I do not know why\n                // let extracted = Self::extract(\n                //     pub_params,\n                //     &pub_inputs.replica_id.into_bytes(),\n                //     &replica,\n                //     challenge,\n                // )?;\n\n                let extracted = decode_domain_block(\n                    &pub_params.graph,\n                    pub_params.sloth_iter,\n                    &pub_inputs.replica_id,\n                    domain_replica,\n                    challenge,\n                )?\n                .into_bytes();\n                data_nodes.push(DataProof {\n                    data: H::Domain::try_from_bytes(&extracted)?,\n                    proof: MerkleProof::new_from_proof(&node_proof),\n                });\n            }\n        }\n\n        let proof = Proof::new(replica_nodes, replica_parents, data_nodes);\n\n        Ok(proof)\n    }\n\n    fn verify(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        proof: &Self::Proof,\n    ) -> Result<bool> {\n        for i in 0..pub_inputs.challenges.len() {\n            {\n                // This was verify_proof_meta.\n                if pub_inputs.challenges[i] >= pub_params.graph.size() {\n                    return Ok(false);\n                }\n\n                if !(proof.nodes[i].proves_challenge(pub_inputs.challenges[i])) {\n                    return Ok(false);\n                }\n\n                if !(proof.replica_nodes[i].proves_challenge(pub_inputs.challenges[i])) {\n                    return Ok(false);\n                }\n\n                let expected_parents = pub_params.graph.parents(pub_inputs.challenges[i]);\n                if proof.replica_parents[i].len() != expected_parents.len() {\n                    println!(\n                        \"proof parents were not the same length as in public parameters: {} != {}\",\n                        proof.replica_parents[i].len(),\n                        expected_parents.len()\n                    );\n                    return Ok(false);\n                }\n\n                let parents_as_expected = proof.replica_parents[i]\n                    .iter()\n                    .zip(&expected_parents)\n                    .all(|(actual, expected)| actual.0 == *expected);\n\n                if !parents_as_expected {\n                    println!(\"proof parents were not those provided in public parameters\");\n                    return Ok(false);\n                }\n            }\n\n            let challenge = pub_inputs.challenges[i] % pub_params.graph.size();\n            assert_ne!(challenge, 0, \"cannot prove the first node\");\n\n            if !proof.replica_nodes[i].proof.validate(challenge) {\n                println!(\"invalid replica node\");\n                return Ok(false);\n            }\n\n            for (parent_node, p) in &proof.replica_parents[i] {\n                if !p.proof.validate(*parent_node) {\n                    println!(\"invalid replica parent: {:?}\", p);\n                    return Ok(false);\n                }\n            }\n\n            let prover_bytes = &pub_inputs.replica_id.into_bytes();\n\n            let key_input =\n                proof.replica_parents[i]\n                    .iter()\n                    .fold(prover_bytes.clone(), |mut acc, (_, p)| {\n                        acc.extend(&p.data.into_bytes());\n                        acc\n                    });\n\n            let key = H::kdf(key_input.as_slice(), pub_params.graph.degree());\n            let unsealed =\n                H::sloth_decode(&key, &proof.replica_nodes[i].data, pub_params.sloth_iter);\n\n            if unsealed != proof.nodes[i].data {\n                return Ok(false);\n            }\n\n            if !proof.nodes[i].proof.validate_data(&unsealed.into_bytes()) {\n                println!(\"invalid data for merkle path {:?}\", unsealed);\n                return Ok(false);\n            }\n        }\n\n        Ok(true)\n    }\n}\n\nimpl<'a, H, G> PoRep<'a, H> for DrgPoRep<'a, H, G>\nwhere\n    H: 'a + Hasher,\n    G: 'a + Graph<H> + ParameterSetIdentifier + Sync + Send,\n{\n    type Tau = porep::Tau<H::Domain>;\n    type ProverAux = porep::ProverAux<H>;\n\n    fn replicate(\n        pp: &Self::PublicParams,\n        replica_id: &H::Domain,\n        data: &mut [u8],\n        data_tree: Option<MerkleTree<H::Domain, H::Function>>,\n    ) -> Result<(porep::Tau<H::Domain>, porep::ProverAux<H>)> {\n        let tree_d = match data_tree {\n            Some(tree) => tree,\n            None => pp.graph.merkle_tree(data)?,\n        };\n\n        vde::encode(&pp.graph, pp.sloth_iter, replica_id, data)?;\n\n        let comm_d = tree_d.root();\n        let tree_r = pp.graph.merkle_tree(data)?;\n        let comm_r = tree_r.root();\n\n        Ok((\n            porep::Tau::new(comm_d, comm_r),\n            porep::ProverAux::new(tree_d, tree_r),\n        ))\n    }\n\n    fn extract_all<'b>(\n        pp: &'b Self::PublicParams,\n        replica_id: &'b H::Domain,\n        data: &'b [u8],\n    ) -> Result<Vec<u8>> {\n        vde::decode(&pp.graph, pp.sloth_iter, replica_id, data)\n    }\n\n    fn extract(\n        pp: &Self::PublicParams,\n        replica_id: &H::Domain,\n        data: &[u8],\n        node: usize,\n    ) -> Result<Vec<u8>> {\n        Ok(decode_block(&pp.graph, pp.sloth_iter, replica_id, data, node)?.into_bytes())\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use memmap::MmapMut;\n    use memmap::MmapOptions;\n    use pairing::bls12_381::Bls12;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n    use std::fs::File;\n    use std::io::Write;\n    use tempfile;\n\n    use crate::drgraph::{new_seed, BucketGraph};\n    use crate::fr32::fr_into_bytes;\n    use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher};\n    use crate::util::data_at_node;\n\n    pub fn file_backed_mmap_from(data: &[u8]) -> MmapMut {\n        let mut tmpfile: File = tempfile::tempfile().unwrap();\n        tmpfile.write_all(data).unwrap();\n\n        unsafe { MmapOptions::new().map_mut(&tmpfile).unwrap() }\n    }\n\n    fn test_extract_all<H: Hasher>() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let sloth_iter = 1;\n        let replica_id: H::Domain = rng.gen();\n        let data = vec![2u8; 32 * 3];\n        // create a copy, so we can compare roundtrips\n        let mut mmapped_data_copy = file_backed_mmap_from(&data);\n\n        let sp = SetupParams {\n            drg: DrgParams {\n                nodes: data.len() / 32,\n                degree: 5,\n                expansion_degree: 0,\n                seed: new_seed(),\n            },\n            sloth_iter,\n        };\n\n        let pp = DrgPoRep::<H, BucketGraph<H>>::setup(&sp).unwrap();\n\n        DrgPoRep::replicate(&pp, &replica_id, &mut mmapped_data_copy, None).unwrap();\n\n        let mut copied = vec![0; data.len()];\n        copied.copy_from_slice(&mmapped_data_copy);\n        assert_ne!(data, copied, \"replication did not change data\");\n\n        let decoded_data = DrgPoRep::extract_all(&pp, &replica_id, &mut mmapped_data_copy).unwrap();\n\n        assert_eq!(data, decoded_data.as_slice(), \"failed to extract data\");\n    }\n\n    #[test]\n    fn extract_all_pedersen() {\n        test_extract_all::<PedersenHasher>();\n    }\n\n    #[test]\n    fn extract_all_sha256() {\n        test_extract_all::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn extract_all_blake2s() {\n        test_extract_all::<Blake2sHasher>();\n    }\n\n    fn test_extract<H: Hasher>() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let sloth_iter = 1;\n        let replica_id: H::Domain = rng.gen();\n        let nodes = 3;\n        let data = vec![2u8; 32 * nodes];\n\n        // create a copy, so we can compare roundtrips\n        let mut mmapped_data_copy = file_backed_mmap_from(&data);\n\n        let sp = SetupParams {\n            drg: DrgParams {\n                nodes: data.len() / 32,\n                degree: 5,\n                expansion_degree: 0,\n                seed: new_seed(),\n            },\n            sloth_iter,\n        };\n\n        let pp = DrgPoRep::<H, BucketGraph<H>>::setup(&sp).unwrap();\n\n        DrgPoRep::replicate(&pp, &replica_id, &mut mmapped_data_copy, None).unwrap();\n\n        let mut copied = vec![0; data.len()];\n        copied.copy_from_slice(&mmapped_data_copy);\n        assert_ne!(data, copied, \"replication did not change data\");\n\n        for i in 0..nodes {\n            let decoded_data = DrgPoRep::extract(&pp, &replica_id, &mmapped_data_copy, i).unwrap();\n\n            let original_data = data_at_node(&data, i).unwrap();\n\n            assert_eq!(\n                original_data,\n                decoded_data.as_slice(),\n                \"failed to extract data\"\n            );\n        }\n    }\n\n    #[test]\n    fn extract_pedersen() {\n        test_extract::<PedersenHasher>();\n    }\n\n    #[test]\n    fn extract_sha256() {\n        test_extract::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn extract_blake2s() {\n        test_extract::<Blake2sHasher>();\n    }\n\n    fn prove_verify_aux<H: Hasher>(\n        nodes: usize,\n        i: usize,\n        use_wrong_challenge: bool,\n        use_wrong_parents: bool,\n    ) {\n        assert!(i < nodes);\n\n        // The loop is here in case we need to retry because of an edge case in the test design.\n        loop {\n            let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n            let sloth_iter = 1;\n            let degree = 10;\n            let expansion_degree = 0;\n            let seed = new_seed();\n\n            let replica_id: H::Domain = rng.gen();\n            let data: Vec<u8> = (0..nodes)\n                .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n                .collect();\n\n            // create a copy, so we can comare roundtrips\n            let mut mmapped_data_copy = file_backed_mmap_from(&data);\n\n            let challenge = i;\n\n            let sp = SetupParams {\n                drg: DrgParams {\n                    nodes,\n                    degree,\n                    expansion_degree,\n                    seed,\n                },\n                sloth_iter,\n            };\n\n            let pp = DrgPoRep::<H, BucketGraph<_>>::setup(&sp).unwrap();\n\n            let (tau, aux) =\n                DrgPoRep::<H, _>::replicate(&pp, &replica_id, &mut mmapped_data_copy, None)\n                    .unwrap();\n\n            let mut copied = vec![0; data.len()];\n            copied.copy_from_slice(&mmapped_data_copy);\n\n            assert_ne!(data, copied, \"replication did not change data\");\n\n            let pub_inputs = PublicInputs::<H::Domain> {\n                replica_id,\n                challenges: vec![challenge, challenge],\n                tau: Some(tau.clone().into()),\n            };\n\n            let priv_inputs = PrivateInputs::<H> { aux: &aux };\n\n            let real_proof = DrgPoRep::<H, _>::prove(&pp, &pub_inputs, &priv_inputs).unwrap();\n\n            if use_wrong_parents {\n                // Only one 'wrong' option will be tested at a time.\n                assert!(!use_wrong_challenge);\n                let real_parents = real_proof.replica_parents;\n\n                // Parent vector claiming the wrong parents.\n                let fake_parents = vec![real_parents[0]\n                    .iter()\n                    // Incrementing each parent node will give us a different parent set.\n                    // It's fine to be out of range, since this only needs to fail.\n                    .map(|(i, data_proof)| (i + 1, data_proof.clone()))\n                    .collect::<Vec<_>>()];\n\n                let proof = Proof::new(\n                    real_proof.replica_nodes.clone(),\n                    fake_parents,\n                    real_proof.nodes.clone().into(),\n                );\n\n                assert!(\n                    !DrgPoRep::verify(&pp, &pub_inputs, &proof).unwrap(),\n                    \"verified in error -- with wrong parents\"\n                );\n\n                let mut all_same = true;\n                for (p, _) in &real_parents[0] {\n                    if *p != real_parents[0][0].0 {\n                        all_same = false;\n                    }\n                }\n\n                if all_same {\n                    println!(\"invalid test data can't scramble proofs with all same parents.\");\n\n                    // If for some reason, we hit this condition because of the data passeed in,\n                    // try again.\n                    continue;\n                }\n\n                // Parent vector claiming the right parents but providing valid proofs for different\n                // parents.\n                let fake_proof_parents = vec![real_parents[0]\n                    .iter()\n                    .enumerate()\n                    .map(|(i, (p, _))| {\n                        // Rotate the real parent proofs.\n                        let x = (i + 1) % real_parents[0].len();\n                        let j = real_parents[0][x].0;\n                        (*p, real_parents[0][j].1.clone())\n                    })\n                    .collect::<Vec<_>>()];\n\n                let proof2 = Proof::new(\n                    real_proof.replica_nodes,\n                    fake_proof_parents,\n                    real_proof.nodes.into(),\n                );\n\n                assert!(\n                    !DrgPoRep::<H, _>::verify(&pp, &pub_inputs, &proof2).unwrap(),\n                    \"verified in error -- with wrong parent proofs\"\n                );\n\n                return ();\n            }\n\n            let proof = real_proof;\n\n            if use_wrong_challenge {\n                let pub_inputs_with_wrong_challenge_for_proof = PublicInputs::<H::Domain> {\n                    replica_id,\n                    challenges: vec![if challenge == 1 { 2 } else { 1 }],\n                    tau: Some(tau.into()),\n                };\n                let verified = DrgPoRep::<H, _>::verify(\n                    &pp,\n                    &pub_inputs_with_wrong_challenge_for_proof,\n                    &proof,\n                )\n                .unwrap();\n                assert!(\n                    !verified,\n                    \"wrongly verified proof which does not match challenge in public input\"\n                );\n            } else {\n                assert!(\n                    DrgPoRep::<H, _>::verify(&pp, &pub_inputs, &proof).unwrap(),\n                    \"failed to verify\"\n                );\n            }\n\n            // Normally, just run once.\n            break;\n        }\n    }\n\n    fn prove_verify(n: usize, i: usize) {\n        prove_verify_aux::<PedersenHasher>(n, i, false, false);\n        prove_verify_aux::<Sha256Hasher>(n, i, false, false);\n        prove_verify_aux::<Blake2sHasher>(n, i, false, false);\n    }\n\n    fn prove_verify_wrong_challenge(n: usize, i: usize) {\n        prove_verify_aux::<PedersenHasher>(n, i, true, false);\n        prove_verify_aux::<Sha256Hasher>(n, i, true, false);\n        prove_verify_aux::<Blake2sHasher>(n, i, true, false);\n    }\n\n    fn prove_verify_wrong_parents(n: usize, i: usize) {\n        prove_verify_aux::<PedersenHasher>(n, i, false, true);\n        prove_verify_aux::<Sha256Hasher>(n, i, false, true);\n        prove_verify_aux::<Blake2sHasher>(n, i, false, true);\n    }\n\n    table_tests! {\n        prove_verify {\n            prove_verify_32_2_1(2, 1);\n\n            prove_verify_32_3_1(3, 1);\n            prove_verify_32_3_2(3, 2);\n\n            prove_verify_32_10_1(10, 1);\n            prove_verify_32_10_2(10, 2);\n            prove_verify_32_10_3(10, 3);\n            prove_verify_32_10_4(10, 4);\n            prove_verify_32_10_5(10, 5);\n        }\n    }\n\n    #[test]\n    fn test_drgporep_verifies_using_challenge() {\n        prove_verify_wrong_challenge(5, 1);\n    }\n\n    #[test]\n    fn test_drgporep_verifies_parents() {\n        // Challenge a node (3) that doesn't have all the same parents.\n        prove_verify_wrong_parents(7, 4);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/drgraph.rs",
    "content": "use std::cmp;\nuse std::marker::PhantomData;\n\nuse rand::{ChaChaRng, OsRng, Rng, SeedableRng};\nuse rayon::prelude::*;\n\nuse crate::error::*;\nuse crate::hasher::pedersen::PedersenHasher;\nuse crate::hasher::{Domain, Hasher};\nuse crate::merkle::MerkleTree;\nuse crate::parameter_cache::ParameterSetIdentifier;\nuse crate::util::data_at_node;\n/// The default hasher currently in use.\npub type DefaultTreeHasher = PedersenHasher;\n\npub const PARALLEL_MERKLE: bool = true;\n\n/// A depth robust graph.\npub trait Graph<H: Hasher>: ::std::fmt::Debug + Clone + PartialEq + Eq {\n    /// Returns the expected size of all nodes in the graph.\n    fn expected_size(&self, node_size: usize) -> usize {\n        self.size() * node_size\n    }\n\n    /// Builds a merkle tree based on the given data.\n    fn merkle_tree<'a>(&self, data: &'a [u8]) -> Result<MerkleTree<H::Domain, H::Function>> {\n        self.merkle_tree_aux(data, 32, PARALLEL_MERKLE)\n    }\n\n    /// Builds a merkle tree based on the given data.\n    fn merkle_tree_aux<'a>(\n        &self,\n        data: &'a [u8],\n        node_size: usize,\n        parallel: bool,\n    ) -> Result<MerkleTree<H::Domain, H::Function>> {\n        if data.len() != (node_size * self.size()) as usize {\n            return Err(Error::InvalidMerkleTreeArgs(\n                data.len(),\n                node_size,\n                self.size(),\n            ));\n        }\n\n        // To avoid hashing the first node, the node size has to be the hash size.else\n        // We make this assumption pervasively anyway.\n        if node_size != 32 {\n            return Err(Error::InvalidNodeSize(node_size));\n        }\n\n        let f = |i| {\n            let d = data_at_node(&data, i).expect(\"data_at_node math failed\");\n            // TODO/FIXME: This can panic. FOR NOW, let's leave this since we're experimenting with\n            // optimization paths. However, we need to ensure that bad input will not lead to a panic\n            // that isn't caught by the FPS API.\n            // Unfortunately, it's not clear how to perform this error-handling in the parallel\n            // iterator case.\n            H::Domain::try_from_bytes(d).unwrap()\n        };\n\n        if parallel {\n            Ok(MerkleTree::from_par_iter(\n                (0..self.size()).into_par_iter().map(f),\n            ))\n        } else {\n            Ok(MerkleTree::new((0..self.size()).map(f)))\n        }\n    }\n\n    /// Returns the merkle tree depth.\n    fn merkle_tree_depth(&self) -> u64 {\n        graph_height(self.size()) as u64\n    }\n\n    /// Returns a sorted list of all parents of this node.\n    fn parents(&self, node: usize) -> Vec<usize>;\n\n    /// Returns the size of the node.\n    fn size(&self) -> usize;\n\n    /// Returns the degree of the graph.\n    fn degree(&self) -> usize;\n\n    fn new(nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7]) -> Self;\n    fn seed(&self) -> [u32; 7];\n\n    // Returns true if a node's parents have lower index than the node.\n    fn forward(&self) -> bool {\n        true\n    }\n}\n\npub fn graph_height(size: usize) -> usize {\n    (size as f64).log2().ceil() as usize\n}\n\n/// Bucket sampling algorithm.\n#[derive(Clone, Debug, PartialEq, Eq, Copy)]\npub struct BucketGraph<H: Hasher> {\n    nodes: usize,\n    base_degree: usize,\n    seed: [u32; 7],\n    _h: PhantomData<H>,\n}\n\nimpl<H: Hasher> ParameterSetIdentifier for BucketGraph<H> {\n    fn parameter_set_identifier(&self) -> String {\n        // NOTE: Seed is not included because it does not influence parameter generation.\n        format!(\n            \"drgraph::BucketGraph{{size: {}; degree: {}}}\",\n            self.nodes, self.base_degree,\n        )\n    }\n}\n\nimpl<H: Hasher> Graph<H> for BucketGraph<H> {\n    #[inline]\n    fn parents(&self, node: usize) -> Vec<usize> {\n        let m = self.base_degree;\n\n        match node {\n            // Special case for the first node, it self references.\n            0 => vec![0; m as usize],\n            // Special case for the second node, it references only the first one.\n            1 => vec![0; m as usize],\n            _ => {\n                // seed = self.seed | node\n                let mut seed = [0u32; 8];\n                seed[0..7].copy_from_slice(&self.seed);\n                seed[7] = node as u32;\n                let mut rng = ChaChaRng::from_seed(&seed);\n\n                let mut parents = Vec::with_capacity(m);\n                for k in 0..m {\n                    // iterate over m meta nodes of the ith real node\n                    // simulate the edges that we would add from previous graph nodes\n                    // if any edge is added from a meta node of jth real node then add edge (j,i)\n                    let logi = ((node * m) as f32).log2().floor() as usize;\n                    let j = rng.gen::<usize>() % logi;\n                    let jj = cmp::min(node * m + k, 1 << (j + 1));\n                    let back_dist = rng.gen_range(cmp::max(jj >> 1, 2), jj + 1);\n                    let out = (node * m + k - back_dist) / m;\n\n                    // remove self references and replace with reference to previous node\n                    if out == node {\n                        parents.push(node - 1);\n                    } else {\n                        assert!(out <= node);\n                        parents.push(out);\n                    }\n                }\n\n                parents.sort_unstable();\n\n                parents\n            }\n        }\n    }\n\n    #[inline]\n    fn size(&self) -> usize {\n        self.nodes\n    }\n\n    #[inline]\n    fn degree(&self) -> usize {\n        self.base_degree\n    }\n\n    fn seed(&self) -> [u32; 7] {\n        self.seed\n    }\n\n    fn new(nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7]) -> Self {\n        assert_eq!(expansion_degree, 0);\n        BucketGraph {\n            nodes,\n            base_degree,\n            seed,\n            _h: PhantomData,\n        }\n    }\n}\n\npub fn new_seed() -> [u32; 7] {\n    OsRng::new().unwrap().gen()\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use memmap::MmapMut;\n    use memmap::MmapOptions;\n\n    use crate::drgraph::new_seed;\n    use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher};\n\n    // Create and return an object of MmapMut backed by in-memory copy of data.\n    pub fn mmap_from(data: &[u8]) -> MmapMut {\n        let mut mm = MmapOptions::new().len(data.len()).map_anon().unwrap();\n        mm.copy_from_slice(data);\n        mm\n    }\n\n    fn graph_bucket<H: Hasher>() {\n        for size in vec![3, 10, 200, 2000] {\n            for degree in 2..12 {\n                let g = BucketGraph::<H>::new(size, degree, 0, new_seed());\n\n                assert_eq!(g.size(), size, \"wrong nodes count\");\n\n                assert_eq!(g.parents(0), vec![0; degree as usize]);\n                assert_eq!(g.parents(1), vec![0; degree as usize]);\n\n                for i in 2..size {\n                    let pa1 = g.parents(i);\n                    let pa2 = g.parents(i);\n\n                    assert_eq!(pa1.len(), degree);\n                    assert_eq!(pa1, pa2, \"different parents on the same node\");\n\n                    let p1 = g.parents(i);\n                    let p2 = g.parents(i);\n\n                    for parent in p1 {\n                        // TODO: fix me\n                        assert_ne!(i, parent, \"self reference found\");\n                    }\n\n                    let mut p1 = p2.clone();\n                    p1.sort();\n                    assert_eq!(p1, p2, \"not sorted\");\n                }\n            }\n        }\n    }\n\n    #[test]\n    fn graph_bucket_sha256() {\n        graph_bucket::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn graph_bucket_blake2s() {\n        graph_bucket::<Blake2sHasher>();\n    }\n\n    #[test]\n    fn graph_bucket_pedersen() {\n        graph_bucket::<PedersenHasher>();\n    }\n\n    fn gen_proof<H: Hasher>(parallel: bool) {\n        let g = BucketGraph::<H>::new(5, 3, 0, new_seed());\n        let node_size = 32;\n        let data = vec![2u8; node_size * 5];\n\n        let mmapped = &mmap_from(&data);\n        let tree = g.merkle_tree_aux(mmapped, node_size, parallel).unwrap();\n        let proof = tree.gen_proof(2);\n\n        assert!(proof.validate::<H::Function>());\n    }\n\n    #[test]\n    fn gen_proof_pedersen() {\n        gen_proof::<PedersenHasher>(true);\n        gen_proof::<PedersenHasher>(false);\n    }\n\n    #[test]\n    fn gen_proof_sha256() {\n        gen_proof::<Sha256Hasher>(true);\n        gen_proof::<Sha256Hasher>(false);\n    }\n\n    #[test]\n    fn gen_proof_blake2s() {\n        gen_proof::<Blake2sHasher>(true);\n        gen_proof::<Blake2sHasher>(false);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/error.rs",
    "content": "use bellman::SynthesisError;\n\npub type Result<T> = ::std::result::Result<T, Error>;\n\n/// Custom error types\n#[derive(Debug, Fail)]\npub enum Error {\n    #[fail(display = \"Bytes could not be converted to Fr\")]\n    BadFrBytes,\n    #[fail(display = \"Out of bounds access {} > {}\", _0, _1)]\n    OutOfBounds(usize, usize),\n    #[fail(\n        display = \"mismatch of data, node_size and nodes {} != {} * {}\",\n        _0, _1, _2\n    )]\n    InvalidMerkleTreeArgs(usize, usize, usize),\n    #[fail(display = \"invalid node size ({}), must be 16, 32 or 64\", _0)]\n    InvalidNodeSize(usize),\n    #[fail(display = \"{}\", _0)]\n    Synthesis(#[cause] SynthesisError),\n    #[fail(display = \"{}\", _0)]\n    Io(#[cause] ::std::io::Error),\n    #[fail(display = \"tree root and commitment do not match\")]\n    InvalidCommitment,\n    #[fail(display = \"malformed input\")]\n    MalformedInput,\n    #[fail(display = \"invalid input size\")]\n    InvalidInputSize,\n    #[fail(display = \"merkle tree generation error: {}\", _0)]\n    MerkleTreeGenerationError(String),\n}\n\nimpl From<SynthesisError> for Error {\n    fn from(inner: SynthesisError) -> Error {\n        Error::Synthesis(inner)\n    }\n}\n\nimpl From<::std::io::Error> for Error {\n    fn from(inner: ::std::io::Error) -> Error {\n        Error::Io(inner)\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/example_helper.rs",
    "content": "use std::fs::File;\nuse std::io::stderr;\nuse std::path::Path;\nuse std::time::{Duration, Instant};\n\nuse bellman::groth16::*;\nuse bellman::Circuit;\nuse clap::{self, App, Arg, SubCommand};\nuse pairing::bls12_381::Bls12;\nuse pbr::ProgressBar;\nuse rand::{Rng, SeedableRng, XorShiftRng};\nuse sapling_crypto::jubjub::{JubjubBls12, JubjubEngine};\n\nuse crate::circuit::bench::BenchCS;\nuse crate::circuit::test::TestConstraintSystem;\n\nuse crate::SP_LOG;\n\npub fn prettyb(num: usize) -> String {\n    let num = num as f64;\n    let negative = if num.is_sign_positive() { \"\" } else { \"-\" };\n    let num = num.abs();\n    let units = [\"B\", \"kB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\", \"YB\"];\n    if num < 1_f64 {\n        return format!(\"{}{} {}\", negative, num, \"B\");\n    }\n    let delimiter = 1024_f64;\n    let exponent = ::std::cmp::min(\n        (num.ln() / delimiter.ln()).floor() as i32,\n        (units.len() - 1) as i32,\n    );\n    let pretty_bytes = format!(\"{:.2}\", num / delimiter.powi(exponent))\n        .parse::<f64>()\n        .unwrap()\n        * 1_f64;\n    let unit = units[exponent as usize];\n    format!(\"{}{} {}\", negative, pretty_bytes, unit)\n}\n\n/// Generate a unique cache path, based on the inputs.\nfn get_cache_path(\n    name: &str,\n    data_size: usize,\n    challenge_count: usize,\n    m: usize,\n    sloth: usize,\n) -> String {\n    format!(\n        \"/tmp/filecoin-proofs-cache-{}-{}-{}-{}-{}\",\n        name.to_ascii_lowercase(),\n        data_size,\n        challenge_count,\n        m,\n        sloth,\n    )\n}\n\n/// The available circuit types for benchmarking.\n#[derive(Debug)]\npub enum CSType {\n    Groth,\n    Bench,\n    Circuit,\n}\n\nlazy_static! {\n    static ref JUBJUB_BLS_PARAMS: JubjubBls12 = JubjubBls12::new();\n}\n\n/// A trait that makes it easy to implement \"Examples\". These are really tunable benchmarking CLI tools.\npub trait Example<'a, C: Circuit<Bls12>>: Default {\n    /// The actual work.\n    fn work_groth(\n        &mut self,\n        typ: CSType,\n        data_size: usize,\n        challenge_count: usize,\n        m: usize,\n        sloth_iter: usize,\n    ) {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let leaves = data_size / 32;\n        let tree_depth = (leaves as f64).log2().ceil() as usize;\n\n        info!(SP_LOG, \"constraint system: {:?}\", typ; \"target\" => \"config\");\n        info!(SP_LOG, \"data_size:  {}\", prettyb(data_size); \"target\" => \"config\");\n        info!(SP_LOG, \"challenge_count: {}\", challenge_count; \"target\" => \"config\");\n        info!(SP_LOG, \"m: {}\", m; \"target\" => \"config\");\n        info!(SP_LOG, \"sloth: {}\", sloth_iter; \"target\" => \"config\");\n        info!(SP_LOG, \"tree_depth: {}\", tree_depth; \"target\" => \"config\");\n\n        let start = Instant::now();\n        let mut param_duration = Duration::new(0, 0);\n\n        let name = Self::name();\n\n        // caching\n        let p = get_cache_path(&name, data_size, challenge_count, m, sloth_iter);\n        let cache_path = Path::new(&p);\n        let groth_params: Parameters<Bls12> = if cache_path.exists() {\n            info!(SP_LOG, \"reading groth params from cache: {:?}\", cache_path; \"target\" => \"params\");\n            let f = File::open(&cache_path).expect(\"failed to read cache\");\n            Parameters::read(&f, false).expect(\"failed to read cached params\")\n        } else {\n            info!(SP_LOG, \"generating new groth params\"; \"target\" => \"params\");\n            let p = self.generate_groth_params(\n                rng,\n                &JUBJUB_BLS_PARAMS,\n                tree_depth,\n                challenge_count,\n                m,\n                sloth_iter,\n            );\n            info!(SP_LOG, \"writing params to cache: {:?}\", cache_path; \"target\" => \"params\");\n\n            let mut f = File::create(&cache_path).expect(\"faild to open cache file\");\n            p.write(&mut f).expect(\"failed to write params to cache\");\n\n            p\n        };\n\n        info!(SP_LOG, \"generating verification key\"; \"target\" => \"params\");\n        let pvk = prepare_verifying_key(&groth_params.vk);\n\n        param_duration += start.elapsed();\n\n        let samples = Self::samples() as u32;\n\n        let mut proof_vec = vec![];\n        let mut total_proving = Duration::new(0, 0);\n        let mut total_verifying = Duration::new(0, 0);\n\n        let mut pb = ProgressBar::on(stderr(), u64::from(samples * 2));\n\n        for _ in 0..samples {\n            proof_vec.truncate(0);\n\n            // -- create proof\n\n            let start = Instant::now();\n            let proof = self.create_proof(\n                rng,\n                &JUBJUB_BLS_PARAMS,\n                &groth_params,\n                tree_depth,\n                challenge_count,\n                leaves,\n                m,\n                sloth_iter,\n            );\n            proof\n                .write(&mut proof_vec)\n                .expect(\"failed to serialize proof\");\n            total_proving += start.elapsed();\n            pb.inc();\n\n            // -- verify proof\n\n            let start = Instant::now();\n\n            if let Some(is_valid) = self.verify_proof(&proof, &pvk) {\n                assert!(is_valid, \"failed to verify proof\");\n            }\n\n            total_verifying += start.elapsed();\n            pb.inc();\n        }\n\n        // -- print statistics\n\n        let proving_avg = total_proving / samples;\n        let proving_avg = f64::from(proving_avg.subsec_nanos()) / 1_000_000_000f64\n            + (proving_avg.as_secs() as f64);\n\n        let verifying_avg = total_verifying / samples;\n        let verifying_avg = f64::from(verifying_avg.subsec_nanos()) / 1_000_000_000f64\n            + (verifying_avg.as_secs() as f64);\n\n        info!(SP_LOG, \"avg_proving_time: {:?} seconds\", proving_avg; \"target\" => \"stats\");\n        info!(SP_LOG, \"avg_verifying_time: {:?} seconds\", verifying_avg; \"target\" => \"stats\");\n        info!(SP_LOG, \"params_generation_time: {:?}\", param_duration; \"target\" => \"stats\");\n    }\n\n    fn work_bench(\n        &mut self,\n        typ: CSType,\n        data_size: usize,\n        challenge_count: usize,\n        m: usize,\n        sloth_iter: usize,\n    ) {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let leaves = data_size / 32;\n        let tree_depth = (leaves as f64).log2().ceil() as usize;\n\n        info!(SP_LOG, \"constraint system: {:?}\", typ; \"target\" => \"config\");\n        info!(SP_LOG, \"data_size:  {}\", prettyb(data_size); \"target\" => \"config\");\n        info!(SP_LOG, \"challenge_count: {}\", challenge_count; \"target\" => \"config\");\n        info!(SP_LOG, \"m: {}\", m; \"target\" => \"config\");\n        info!(SP_LOG, \"sloth: {}\", sloth_iter; \"target\" => \"config\");\n        info!(SP_LOG, \"tree_depth: {}\", tree_depth; \"target\" => \"config\");\n\n        // need more samples as this is a faster operation\n        let samples = (Self::samples() * 10) as u32;\n\n        let mut total_synth = Duration::new(0, 0);\n\n        let mut pb = ProgressBar::on(stderr(), u64::from(samples));\n\n        info!(\n            SP_LOG,\n            \"constraints: {}\",\n            self.get_num_constraints(\n                rng,\n                &JUBJUB_BLS_PARAMS,\n                tree_depth,\n                challenge_count,\n                leaves,\n                m,\n                sloth_iter,\n            )\n        );\n\n        for _ in 0..samples {\n            // -- create proof\n\n            let start = Instant::now();\n            let c = self.create_circuit(\n                rng,\n                &JUBJUB_BLS_PARAMS,\n                tree_depth,\n                challenge_count,\n                leaves,\n                m,\n                sloth_iter,\n            );\n            let mut cs = BenchCS::<Bls12>::new();\n            c.synthesize(&mut cs).expect(\"failed to synthesize circuit\");\n\n            total_synth += start.elapsed();\n            pb.inc();\n        }\n\n        // -- print statistics\n\n        let synth_avg = total_synth / samples;\n        let synth_avg =\n            f64::from(synth_avg.subsec_nanos()) / 1_000_000_000f64 + (synth_avg.as_secs() as f64);\n\n        info!(SP_LOG, \"avg_synthesize_time: {:?} seconds\", synth_avg; \"target\" => \"stats\");\n    }\n\n    fn work_circuit(\n        &mut self,\n        typ: CSType,\n        data_size: usize,\n        challenge_count: usize,\n        m: usize,\n        sloth_iter: usize,\n    ) {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let leaves = data_size / 32;\n        let tree_depth = (leaves as f64).log2().ceil() as usize;\n\n        info!(SP_LOG, \"constraint system: {:?}\", typ; \"target\" => \"config\");\n        info!(SP_LOG, \"data_size:  {}\", prettyb(data_size); \"target\" => \"config\");\n        info!(SP_LOG, \"challenge_count: {}\", challenge_count; \"target\" => \"config\");\n        info!(SP_LOG, \"m: {}\", m; \"target\" => \"config\");\n        info!(SP_LOG, \"sloth: {}\", sloth_iter; \"target\" => \"config\");\n        info!(SP_LOG, \"tree_depth: {}\", tree_depth; \"target\" => \"config\");\n\n        let c = self.create_circuit(\n            rng,\n            &JUBJUB_BLS_PARAMS,\n            tree_depth,\n            challenge_count,\n            leaves,\n            m,\n            sloth_iter,\n        );\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        c.synthesize(&mut cs).expect(\"failed to synthesize circuit\");\n\n        println!(\"{}\", cs.pretty_print());\n    }\n\n    fn clap(&self) -> clap::ArgMatches {\n        App::new(stringify!($name))\n            .version(\"1.0\")\n            .arg(\n                Arg::with_name(\"size\")\n                    .required(true)\n                    .long(\"size\")\n                    .help(\"The data size in KB\")\n                    .takes_value(true),\n            )\n            .arg(\n                Arg::with_name(\"challenges\")\n                    .long(\"challenges\")\n                    .help(\"How many challenges to execute, defaults to 1\")\n                    .default_value(\"1\")\n                    .takes_value(true),\n            )\n            .arg(\n                Arg::with_name(\"m\")\n                    .help(\"The size of m\")\n                    .long(\"m\")\n                    .default_value(\"6\")\n                    .takes_value(true),\n            )\n            .arg(\n                Arg::with_name(\"sloth\")\n                    .help(\"The number of sloth iterations, defaults to 1\")\n                    .long(\"sloth\")\n                    .default_value(\"1\")\n                    .takes_value(true),\n            )\n            .subcommand(\n                SubCommand::with_name(\"groth\")\n                    .about(\"execute circuits using groth constraint system\"),\n            )\n            .subcommand(\n                SubCommand::with_name(\"bench\")\n                    .about(\"execute circuits using a minimal benchmarking constraint\"),\n            )\n            .subcommand(SubCommand::with_name(\"circuit\").about(\"print the constraint system\"))\n            .get_matches()\n    }\n\n    fn main() {\n        let mut instance = Self::default();\n\n        let (data_size, challenge_count, m, sloth_iter, typ) = {\n            let matches = instance.clap();\n\n            let data_size = value_t!(matches, \"size\", usize).unwrap() * 1024;\n            let challenge_count = value_t!(matches, \"challenges\", usize).unwrap();\n            let m = value_t!(matches, \"m\", usize).unwrap();\n            let sloth_iter = value_t!(matches, \"sloth\", usize).unwrap();\n\n            let typ = match matches.subcommand_name() {\n                Some(\"groth\") => CSType::Groth,\n                Some(\"bench\") => CSType::Bench,\n                Some(\"circuit\") => CSType::Circuit,\n                _ => panic!(\"please select a valid subcommand\"),\n            };\n\n            (data_size, challenge_count, m, sloth_iter, typ)\n        };\n\n        match typ {\n            CSType::Groth => instance.work_groth(typ, data_size, challenge_count, m, sloth_iter),\n            CSType::Bench => instance.work_bench(typ, data_size, challenge_count, m, sloth_iter),\n            CSType::Circuit => {\n                instance.work_circuit(typ, data_size, challenge_count, m, sloth_iter)\n            }\n        }\n    }\n\n    /// The name of the application. Used for identifying caches.\n    fn name() -> String;\n\n    /// Generate groth parameters\n    fn generate_groth_params<R: Rng>(\n        &mut self,\n        _: &mut R,\n        _: &'a <Bls12 as JubjubEngine>::Params,\n        _: usize,\n        _: usize,\n        _: usize,\n        _: usize,\n    ) -> Parameters<Bls12>;\n\n    /// How many samples should be taken when proofing and verifying\n    fn samples() -> usize;\n\n    /// Create a new random proof\n    fn create_circuit<R: Rng>(\n        &mut self,\n        _: &mut R,\n        _: &'a <Bls12 as JubjubEngine>::Params,\n        _: usize,\n        _: usize,\n        _: usize,\n        _: usize,\n        _: usize,\n    ) -> C;\n\n    fn create_proof<R: Rng>(\n        &mut self,\n        rng: &mut R,\n        engine_params: &'a <Bls12 as JubjubEngine>::Params,\n        groth_params: &Parameters<Bls12>,\n        tree_depth: usize,\n        challenge_count: usize,\n        leaves: usize,\n        m: usize,\n        sloth_iter: usize,\n    ) -> Proof<Bls12> {\n        let c = self.create_circuit(\n            rng,\n            engine_params,\n            tree_depth,\n            challenge_count,\n            leaves,\n            m,\n            sloth_iter,\n        );\n        create_random_proof(c, groth_params, rng).expect(\"failed to create proof\")\n    }\n\n    /// Verify the given proof, return `None` if not implemented.\n    fn verify_proof(&mut self, _: &Proof<Bls12>, _: &PreparedVerifyingKey<Bls12>) -> Option<bool>;\n\n    /// Get the number of constraints of the circuit\n    fn get_num_constraints<R: Rng>(\n        &mut self,\n        rng: &mut R,\n        engine_params: &'a JubjubBls12,\n        tree_depth: usize,\n        challenge_count: usize,\n        leaves: usize,\n        m: usize,\n        sloth_iter: usize,\n    ) -> usize {\n        let c = self.create_circuit(\n            rng,\n            engine_params,\n            tree_depth,\n            challenge_count,\n            leaves,\n            m,\n            sloth_iter,\n        );\n\n        let mut cs = BenchCS::<Bls12>::new();\n        c.synthesize(&mut cs).expect(\"failed to synthesize circuit\");\n        cs.num_constraints()\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/fr32.rs",
    "content": "use crate::error::*;\n\nuse byteorder::{LittleEndian, WriteBytesExt};\n\nuse pairing::{Engine, PrimeField, PrimeFieldRepr};\n\n// Contains 32 bytes whose little-endian value represents an Fr.\n// Invariants:\n// - Value MUST represent a valid Fr.\n// - Length must be 32.\npub type Fr32 = [u8];\n\n// Contains one or more 32-byte chunks whose little-endian values represent Frs.\n// Invariants:\n// - Value of each 32-byte chunks MUST represent valid Frs.\n// - Total length must be a multiple of 32.\n// That is to say: each 32-byte chunk taken alone must be a valid Fr32.\npub type Fr32Vec = Vec<u8>;\n\n// Array whose little-endian value represents an Fr.\n// Invariants:\n// - Value MUST represent a valid Fr.\npub type Fr32Ary = [u8; 32];\n\n// Takes a slice of bytes and returns an Fr if byte slice is exactly 32 bytes and does not overflow.\n// Otherwise, returns a BadFrBytesError.\npub fn bytes_into_fr<E: Engine>(bytes: &[u8]) -> Result<E::Fr> {\n    if bytes.len() != 32 {\n        return Err(Error::BadFrBytes);\n    }\n    let mut fr_repr = <<<E as Engine>::Fr as PrimeField>::Repr as Default>::default();\n    fr_repr.read_le(bytes).map_err(|_| Error::BadFrBytes)?;\n\n    E::Fr::from_repr(fr_repr).map_err(|_| Error::BadFrBytes)\n}\n\n// Takes an Fr and returns a vector of exactly 32 bytes guaranteed to contain a valid Fr.\npub fn fr_into_bytes<E: Engine>(fr: &E::Fr) -> Fr32Vec {\n    let mut out = Vec::with_capacity(32);\n    fr.into_repr().write_le(&mut out).unwrap();\n    out\n}\n\n// Takes a slice of bytes and returns a vector of Fr -- or an error if either bytes is not a multiple of 32 bytes\n// or any 32-byte chunk overflows and does not contain a valid Fr.\npub fn bytes_into_frs<E: Engine>(bytes: &[u8]) -> Result<Vec<E::Fr>> {\n    bytes\n        .chunks(32)\n        .map(|ref chunk| bytes_into_fr::<E>(chunk))\n        .collect()\n}\n\n// Takes a slice of Frs and returns a vector of bytes, guaranteed to have a size which is a multiple of 32,\n// with every 32-byte chunk representing a valid Fr.\npub fn frs_into_bytes<E: Engine>(frs: &[E::Fr]) -> Fr32Vec {\n    frs.iter().flat_map(|fr| fr_into_bytes::<E>(fr)).collect()\n}\n\n// Takes a u32 and returns an Fr.\npub fn u32_into_fr<E: Engine>(n: u32) -> E::Fr {\n    let mut buf: Fr32Vec = vec![0u8; 32];\n    let mut w = &mut buf[0..4];\n    w.write_u32::<LittleEndian>(n).unwrap();\n\n    bytes_into_fr::<E>(&buf).expect(\"should never fail since u32 is in the field\")\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use pairing::bls12_381::Bls12;\n\n    fn bytes_fr_test<E: Engine>(bytes: Fr32Ary, expect_success: bool) {\n        let mut b = &bytes[..];\n        let fr_result = bytes_into_fr::<E>(&mut b);\n        if expect_success {\n            let f = fr_result.unwrap();\n            let b2 = fr_into_bytes::<E>(&f);\n\n            assert_eq!(bytes.to_vec(), b2);\n        } else {\n            assert!(fr_result.is_err(), \"expected a decoding error\")\n        }\n    }\n    #[test]\n    fn test_bytes_into_fr_into_bytes() {\n        bytes_fr_test::<Bls12>(\n            [\n                0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,\n                23, 24, 25, 26, 27, 28, 29, 30, 31,\n            ],\n            true,\n        );\n        bytes_fr_test::<Bls12>(\n            // Some bytes fail because they are not in the field.\n            [\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 115,\n            ],\n            false,\n        );\n        bytes_fr_test::<Bls12>(\n            // This is okay.\n            [\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 114,\n            ],\n            true,\n        );\n        bytes_fr_test::<Bls12>(\n            // So is this.\n            [\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 236, 115,\n            ],\n            true,\n        );\n        bytes_fr_test::<Bls12>(\n            // But not this.\n            [\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,\n                255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 237, 115,\n            ],\n            false,\n        );\n    }\n\n    fn bytes_into_frs_into_bytes_test<E: Engine>(bytes: &Fr32) {\n        let mut bytes = bytes.clone();\n        let frs = bytes_into_frs::<E>(&mut bytes).unwrap();\n        assert!(frs.len() == 3);\n        let bytes_back = frs_into_bytes::<E>(&frs);\n        assert!(bytes.to_vec() == bytes_back);\n    }\n\n    #[test]\n    fn test_bytes_into_frs_into_bytes() {\n        let bytes = b\"012345678901234567890123456789--012345678901234567890123456789--012345678901234567890123456789--\";\n        bytes_into_frs_into_bytes_test::<Bls12>(&bytes[..]);\n\n        let _short_bytes = b\"012345678901234567890123456789--01234567890123456789\";\n        // This will panic because _short_bytes is not a multiple of 32 bytes.\n        // bytes_into_frs_into_bytes_test::<Bls12>(&_short_bytes[..]);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/hasher/blake2s.rs",
    "content": "use blake2::Blake2s;\n\nuse super::{DigestHasher, Digester};\n\nimpl Digester for Blake2s {}\n\npub type Blake2sHasher = DigestHasher<Blake2s>;\n"
  },
  {
    "path": "storage-proofs/src/hasher/digest.rs",
    "content": "use std::fmt;\nuse std::hash::Hasher as StdHasher;\nuse std::marker::PhantomData;\n\nuse merkle_light::hash::{Algorithm, Hashable};\nuse pairing::bls12_381::{Bls12, Fr, FrRepr};\nuse pairing::{PrimeField, PrimeFieldRepr};\nuse rand::{Rand, Rng};\nuse sha2::Digest;\n\nuse super::{Domain, HashFunction, Hasher};\nuse crate::crypto::sloth;\nuse crate::error::*;\n\npub trait Digester: Digest + Clone + Default + ::std::fmt::Debug + Send + Sync {}\n\n#[derive(Default, Copy, Clone, Debug)]\npub struct DigestHasher<D: Digester> {\n    _d: PhantomData<D>,\n}\n\nimpl<D: Digester> PartialEq for DigestHasher<D> {\n    fn eq(&self, other: &Self) -> bool {\n        self._d == other._d\n    }\n}\n\nimpl<D: Digester> Eq for DigestHasher<D> {}\n\nimpl<D: Digester> Hasher for DigestHasher<D> {\n    type Domain = DigestDomain;\n    type Function = DigestFunction<D>;\n\n    fn kdf(data: &[u8], m: usize) -> Self::Domain {\n        assert_eq!(\n            data.len(),\n            32 * (1 + m),\n            \"invalid input length: data.len(): {} m: {}\",\n            data.len(),\n            m\n        );\n\n        <Self::Function as HashFunction<Self::Domain>>::hash(data)\n    }\n\n    fn sloth_encode(key: &Self::Domain, ciphertext: &Self::Domain, rounds: usize) -> Self::Domain {\n        // TODO: validate this is how sloth should work in this case\n        let k = (*key).into();\n        let c = (*ciphertext).into();\n\n        sloth::encode::<Bls12>(&k, &c, rounds).into()\n    }\n\n    fn sloth_decode(key: &Self::Domain, ciphertext: &Self::Domain, rounds: usize) -> Self::Domain {\n        // TODO: validate this is how sloth should work in this case\n        sloth::decode::<Bls12>(&(*key).into(), &(*ciphertext).into(), rounds).into()\n    }\n}\n\n#[derive(Default, Clone)]\npub struct DigestFunction<D: Digester>(D);\n\nimpl<D: Digester> PartialEq for DigestFunction<D> {\n    fn eq(&self, other: &Self) -> bool {\n        format!(\"{:?}\", self) == format!(\"{:?}\", other)\n    }\n}\n\nimpl<D: Digester> Eq for DigestFunction<D> {}\n\nimpl<D: Digester> fmt::Debug for DigestFunction<D> {\n    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n        write!(f, \"DigestFunction({:?})\", self.0)\n    }\n}\n\nimpl<D: Digester> StdHasher for DigestFunction<D> {\n    #[inline]\n    fn write(&mut self, msg: &[u8]) {\n        self.0.input(msg)\n    }\n\n    #[inline]\n    fn finish(&self) -> u64 {\n        unreachable!(\"unused by Function -- should never be called\")\n    }\n}\n\n#[derive(Copy, Clone, PartialEq, Eq, Debug, PartialOrd, Ord, Default, Serialize, Deserialize)]\npub struct DigestDomain(pub [u8; 32]);\n\nimpl DigestDomain {\n    fn trim_to_fr32(&mut self) {\n        // strip last two bits, to ensure result is in Fr.\n        self.0[31] &= 0b0011_1111;\n    }\n}\n\nimpl Rand for DigestDomain {\n    fn rand<R: Rng>(rng: &mut R) -> Self {\n        // generating an Fr and converting it, to ensure we stay in the field\n        rng.gen::<Fr>().into()\n    }\n}\n\nimpl AsRef<[u8]> for DigestDomain {\n    fn as_ref(&self) -> &[u8] {\n        &self.0[..]\n    }\n}\n\nimpl<D: Digester> Hashable<DigestFunction<D>> for DigestDomain {\n    fn hash(&self, state: &mut DigestFunction<D>) {\n        state.write(self.as_ref())\n    }\n}\n\nimpl From<Fr> for DigestDomain {\n    fn from(val: Fr) -> Self {\n        let mut res = Self::default();\n        val.into_repr().write_le(&mut res.0[0..32]).unwrap();\n\n        res\n    }\n}\n\nimpl From<DigestDomain> for Fr {\n    fn from(val: DigestDomain) -> Self {\n        let mut res = FrRepr::default();\n        res.read_le(&val.0[0..32]).unwrap();\n\n        Fr::from_repr(res).unwrap()\n    }\n}\n\nimpl Domain for DigestDomain {\n    fn serialize(&self) -> Vec<u8> {\n        self.0.to_vec()\n    }\n\n    fn into_bytes(&self) -> Vec<u8> {\n        self.0.to_vec()\n    }\n\n    fn try_from_bytes(raw: &[u8]) -> Result<Self> {\n        if raw.len() != 32 {\n            return Err(Error::InvalidInputSize);\n        }\n        let mut res = DigestDomain::default();\n        res.0.copy_from_slice(&raw[0..32]);\n        Ok(res)\n    }\n\n    fn write_bytes(&self, dest: &mut [u8]) -> Result<()> {\n        if dest.len() < 32 {\n            return Err(Error::InvalidInputSize);\n        }\n        dest[0..32].copy_from_slice(&self.0[..]);\n        Ok(())\n    }\n}\n\nimpl<D: Digester> HashFunction<DigestDomain> for DigestFunction<D> {\n    fn hash(data: &[u8]) -> DigestDomain {\n        let hashed = D::digest(data);\n        let mut res = DigestDomain::default();\n        res.0.copy_from_slice(&hashed[..]);\n        res.trim_to_fr32();\n        res\n    }\n}\n\nimpl<D: Digester> Algorithm<DigestDomain> for DigestFunction<D> {\n    #[inline]\n    fn hash(&mut self) -> DigestDomain {\n        let mut h = [0u8; 32];\n        h.copy_from_slice(self.0.clone().result().as_ref());\n        let mut dd = DigestDomain::from(h);\n        dd.trim_to_fr32();\n        dd\n    }\n\n    #[inline]\n    fn reset(&mut self) {\n        self.0.reset();\n    }\n\n    fn leaf(&mut self, leaf: DigestDomain) -> DigestDomain {\n        leaf\n    }\n\n    fn node(&mut self, left: DigestDomain, right: DigestDomain, height: usize) -> DigestDomain {\n        height.hash(self);\n\n        left.hash(self);\n        right.hash(self);\n        self.hash()\n    }\n}\n\nimpl From<[u8; 32]> for DigestDomain {\n    #[inline]\n    fn from(val: [u8; 32]) -> Self {\n        DigestDomain(val)\n    }\n}\n\nimpl From<DigestDomain> for [u8; 32] {\n    #[inline]\n    fn from(val: DigestDomain) -> Self {\n        val.0\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/hasher/mod.rs",
    "content": "pub mod blake2s;\npub mod pedersen;\npub mod sha256;\n\nmod digest;\nmod types;\n\npub use self::digest::{DigestDomain, DigestFunction, DigestHasher, Digester};\npub use self::types::{Domain, HashFunction, Hasher};\n\npub use self::blake2s::Blake2sHasher;\npub use self::pedersen::PedersenHasher;\npub use self::sha256::Sha256Hasher;\n"
  },
  {
    "path": "storage-proofs/src/hasher/pedersen.rs",
    "content": "use std::hash::Hasher as StdHasher;\n\nuse bitvec::{self, BitVec};\nuse byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};\nuse merkle_light::hash::{Algorithm as LightAlgorithm, Hashable};\nuse pairing::bls12_381::{Bls12, Fr, FrRepr};\nuse pairing::{PrimeField, PrimeFieldRepr};\nuse rand::{Rand, Rng};\nuse sapling_crypto::pedersen_hash::{pedersen_hash, Personalization};\nuse serde::de::{Deserialize, Deserializer};\nuse serde::ser::Serializer;\n\nuse super::{Domain, HashFunction, Hasher};\nuse crate::crypto::{kdf, pedersen, sloth};\nuse crate::error::{Error, Result};\n\n#[derive(Default, Copy, Clone, Debug, PartialEq, Eq)]\npub struct PedersenHasher {}\n\nimpl Hasher for PedersenHasher {\n    type Domain = PedersenDomain;\n    type Function = PedersenFunction;\n\n    fn kdf(data: &[u8], m: usize) -> Self::Domain {\n        kdf::kdf::<Bls12>(data, m).into()\n    }\n\n    fn sloth_encode(key: &Self::Domain, ciphertext: &Self::Domain, rounds: usize) -> Self::Domain {\n        let key = Fr::from_repr(key.0).unwrap();\n        let ciphertext = Fr::from_repr(ciphertext.0).unwrap();\n        sloth::encode::<Bls12>(&key, &ciphertext, rounds).into()\n    }\n\n    fn sloth_decode(key: &Self::Domain, ciphertext: &Self::Domain, rounds: usize) -> Self::Domain {\n        let key = Fr::from_repr(key.0).unwrap();\n        let ciphertext = Fr::from_repr(ciphertext.0).unwrap();\n\n        sloth::decode::<Bls12>(&key, &ciphertext, rounds).into()\n    }\n}\n\n#[derive(Copy, Clone, Debug, PartialEq, Eq)]\npub struct PedersenFunction(Fr);\n\nimpl Default for PedersenFunction {\n    fn default() -> PedersenFunction {\n        PedersenFunction(Fr::from_repr(FrRepr::default()).expect(\"failed default\"))\n    }\n}\n\nimpl Hashable<PedersenFunction> for Fr {\n    fn hash(&self, state: &mut PedersenFunction) {\n        let mut bytes = Vec::with_capacity(32);\n        self.into_repr().write_le(&mut bytes).unwrap();\n        state.write(&bytes);\n    }\n}\n\nimpl Hashable<PedersenFunction> for PedersenDomain {\n    fn hash(&self, state: &mut PedersenFunction) {\n        let mut bytes = Vec::with_capacity(32);\n        self.0.write_le(&mut bytes).unwrap();\n        state.write(&bytes);\n    }\n}\n\n#[derive(Copy, Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]\npub struct PedersenDomain(#[serde(with = \"FrReprDef\")] pub FrRepr);\n\npub struct FrReprDef(pub [u64; 4]);\n\nimpl FrReprDef {\n    fn serialize<S>(__self: &FrRepr, serializer: S) -> ::std::result::Result<S::Ok, S::Error>\n    where\n        S: Serializer,\n    {\n        let mut writer = Vec::with_capacity(32);\n\n        for digit in __self.0.as_ref().iter() {\n            writer.write_u64::<LittleEndian>(*digit).unwrap();\n        }\n\n        if serializer.is_human_readable() {\n            serializer.collect_str(&base64::display::Base64Display::with_config(\n                &writer,\n                base64::STANDARD,\n            ))\n        } else {\n            serializer.serialize_bytes(&writer)\n        }\n    }\n\n    fn deserialize<'de, D>(deserializer: D) -> ::std::result::Result<FrRepr, D::Error>\n    where\n        D: Deserializer<'de>,\n    {\n        let arr: Vec<u8> = if deserializer.is_human_readable() {\n            let raw = String::deserialize(deserializer)?;\n            base64::decode(&raw).unwrap()\n        } else {\n            Vec::deserialize(deserializer)?\n        };\n\n        let mut digits = [0u64; 4];\n        let mut source = ::std::io::Cursor::new(arr);\n        for digit in digits.iter_mut() {\n            *digit = source.read_u64::<LittleEndian>().unwrap();\n        }\n\n        Ok(FrRepr(digits))\n    }\n}\n\nimpl Default for PedersenDomain {\n    fn default() -> PedersenDomain {\n        PedersenDomain(FrRepr::default())\n    }\n}\n\nimpl Rand for PedersenDomain {\n    fn rand<R: Rng>(rng: &mut R) -> Self {\n        let fr: Fr = rng.gen();\n        PedersenDomain(fr.into_repr())\n    }\n}\n\nimpl Ord for PedersenDomain {\n    #[inline(always)]\n    fn cmp(&self, other: &PedersenDomain) -> ::std::cmp::Ordering {\n        (self.0).cmp(&other.0)\n    }\n}\n\nimpl PartialOrd for PedersenDomain {\n    #[inline(always)]\n    fn partial_cmp(&self, other: &PedersenDomain) -> Option<::std::cmp::Ordering> {\n        Some((self.0).cmp(&other.0))\n    }\n}\n\nimpl AsRef<[u8]> for PedersenDomain {\n    #[inline]\n    fn as_ref(&self) -> &[u8] {\n        as_ref(&(self.0).0)\n    }\n}\n\n// This is unsafe, and I wish it wasn't here, but I really need AsRef<[u8]> to work, without allocating.\n// https://internals.rust-lang.org/t/safe-trasnsmute-for-slices-e-g-u64-u32-particularly-simd-types/2871\n// https://github.com/briansmith/ring/blob/abb3fdfc08562f3f02e95fb551604a871fd4195e/src/polyfill.rs#L93-L110\n#[inline(always)]\n#[allow(clippy::needless_lifetimes)]\nfn as_ref<'a>(src: &'a [u64; 4]) -> &'a [u8] {\n    unsafe {\n        std::slice::from_raw_parts(\n            src.as_ptr() as *const u8,\n            src.len() * std::mem::size_of::<u64>(),\n        )\n    }\n}\n\nimpl Domain for PedersenDomain {\n    // QUESTION: When, if ever, should serialize and into_bytes return different results?\n    // The definitions here at least are equivalent.\n    fn serialize(&self) -> Vec<u8> {\n        let mut bytes = Vec::with_capacity(32);\n        self.0.write_le(&mut bytes).unwrap();\n        bytes\n    }\n\n    fn into_bytes(&self) -> Vec<u8> {\n        let mut out = Vec::with_capacity(32);\n        self.0.write_le(&mut out).unwrap();\n\n        out\n    }\n\n    fn try_from_bytes(raw: &[u8]) -> Result<Self> {\n        if raw.len() != 32 {\n            return Err(Error::BadFrBytes);\n        }\n        let mut res: FrRepr = Default::default();\n        res.read_le(raw).map_err(|_| Error::BadFrBytes)?;\n\n        Ok(PedersenDomain(res))\n    }\n\n    fn write_bytes(&self, dest: &mut [u8]) -> Result<()> {\n        self.0.write_le(dest)?;\n        Ok(())\n    }\n}\n\nimpl StdHasher for PedersenFunction {\n    #[inline]\n    fn write(&mut self, msg: &[u8]) {\n        self.0 = pedersen::pedersen(msg);\n    }\n\n    #[inline]\n    fn finish(&self) -> u64 {\n        unimplemented!()\n    }\n}\n\nimpl HashFunction<PedersenDomain> for PedersenFunction {\n    fn hash(data: &[u8]) -> PedersenDomain {\n        pedersen::pedersen_md_no_padding(data).into()\n    }\n}\n\nimpl LightAlgorithm<PedersenDomain> for PedersenFunction {\n    #[inline]\n    fn hash(&mut self) -> PedersenDomain {\n        self.0.into()\n    }\n\n    #[inline]\n    fn reset(&mut self) {\n        self.0 = Fr::from_repr(FrRepr::from(0)).expect(\"failed 0\");\n    }\n\n    fn leaf(&mut self, leaf: PedersenDomain) -> PedersenDomain {\n        leaf\n    }\n\n    fn node(\n        &mut self,\n        left: PedersenDomain,\n        right: PedersenDomain,\n        height: usize,\n    ) -> PedersenDomain {\n        let lhs = BitVec::<bitvec::LittleEndian, u64>::from(&(left.0).0[..]);\n        let rhs = BitVec::<bitvec::LittleEndian, u64>::from(&(right.0).0[..]);\n\n        let bits = lhs\n            .iter()\n            .take(Fr::NUM_BITS as usize)\n            .chain(rhs.iter().take(Fr::NUM_BITS as usize));\n\n        pedersen_hash::<Bls12, _>(\n            Personalization::MerkleTree(height),\n            bits,\n            &pedersen::JJ_PARAMS,\n        )\n        .into_xy()\n        .0\n        .into()\n    }\n}\n\nimpl From<Fr> for PedersenDomain {\n    #[inline]\n    fn from(val: Fr) -> Self {\n        PedersenDomain(val.into_repr())\n    }\n}\n\nimpl From<PedersenDomain> for Fr {\n    #[inline]\n    fn from(val: PedersenDomain) -> Self {\n        Fr::from_repr(val.0).unwrap()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use std::mem;\n\n    use merkle_light::hash::Hashable;\n\n    use crate::merkle::MerkleTree;\n\n    #[test]\n    fn test_path() {\n        let values = [\"hello\", \"world\", \"you\", \"two\"];\n        let t = MerkleTree::<PedersenDomain, PedersenFunction>::from_data(values.iter());\n\n        let p = t.gen_proof(0); // create a proof for the first value = \"hello\"\n        assert_eq!(*p.path(), vec![true, true]);\n        assert_eq!(p.validate::<PedersenFunction>(), true);\n    }\n\n    #[test]\n    fn test_pedersen_hasher() {\n        let values = [\"hello\", \"world\", \"you\", \"two\"];\n\n        let t = MerkleTree::<PedersenDomain, PedersenFunction>::from_data(values.iter());\n\n        assert_eq!(t.leafs(), 4);\n\n        let mut a = PedersenFunction::default();\n        let leaves: Vec<PedersenDomain> = values\n            .iter()\n            .map(|v| {\n                v.hash(&mut a);\n                let h = a.hash();\n                a.reset();\n                h\n            })\n            .collect();\n\n        assert_eq!(t[0], leaves[0]);\n        assert_eq!(t[1], leaves[1]);\n        assert_eq!(t[2], leaves[2]);\n        assert_eq!(t[3], leaves[3]);\n\n        let i1 = a.node(leaves[0], leaves[1], 0);\n        a.reset();\n        let i2 = a.node(leaves[2], leaves[3], 0);\n        a.reset();\n\n        assert_eq!(t[4], i1);\n        assert_eq!(t[5], i2);\n\n        let root = a.node(i1, i2, 1);\n        a.reset();\n\n        assert_eq!(\n            t[0].0,\n            FrRepr([\n                5516429847681692214,\n                1363403528947283679,\n                5429691745410183571,\n                7730413689037971367\n            ])\n        );\n\n        let expected = FrRepr([\n            14963070332212552755,\n            2414807501862983188,\n            16116531553419129213,\n            6357427774790868134,\n        ]);\n        let actual = t[6].0;\n\n        assert_eq!(actual, expected);\n\n        assert_eq!(t[6], root);\n    }\n\n    #[test]\n    fn test_as_ref() {\n        let cases: Vec<[u64; 4]> = vec![\n            [0, 0, 0, 0],\n            [\n                14963070332212552755,\n                2414807501862983188,\n                16116531553419129213,\n                6357427774790868134,\n            ],\n        ];\n\n        for case in cases.into_iter() {\n            let repr = FrRepr(case);\n            let val = PedersenDomain(repr);\n\n            for _ in 0..100 {\n                assert_eq!(val.as_ref().to_vec(), val.as_ref().to_vec());\n            }\n\n            let raw: &[u8] = val.as_ref();\n\n            for i in 0..4 {\n                assert_eq!(case[i], unsafe {\n                    let mut val = [0u8; 8];\n                    val.clone_from_slice(&raw[i * 8..(i + 1) * 8]);\n                    mem::transmute::<[u8; 8], u64>(val)\n                });\n            }\n        }\n    }\n\n    #[test]\n    fn test_serialize() {\n        let repr = FrRepr([1, 2, 3, 4]);\n        let val = PedersenDomain(repr);\n\n        let ser = serde_json::to_string(&val).unwrap();\n        let val_back = serde_json::from_str(&ser).unwrap();\n\n        assert_eq!(val, val_back);\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/hasher/sha256.rs",
    "content": "use sha2::Sha256;\n\nuse super::{DigestHasher, Digester};\n\nimpl Digester for Sha256 {}\n\npub type Sha256Hasher = DigestHasher<Sha256>;\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use std::fmt;\n    use std::iter::FromIterator;\n\n    use merkle_light::hash::{Algorithm, Hashable};\n    use merkle_light::merkle::MerkleTree;\n\n    use super::super::{DigestDomain, Hasher};\n\n    struct HexSlice<'a>(&'a [u8]);\n\n    impl<'a> HexSlice<'a> {\n        fn new<T>(data: &'a T) -> HexSlice<'a>\n        where\n            T: ?Sized + AsRef<[u8]> + 'a,\n        {\n            HexSlice(data.as_ref())\n        }\n    }\n\n    /// reverse order\n    impl<'a> fmt::Display for HexSlice<'a> {\n        fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n            let len = self.0.len();\n            for i in 0..len {\n                let byte = self.0[len - 1 - i];\n                write!(f, \"{:x}{:x}\", byte >> 4, byte & 0xf)?;\n            }\n            Ok(())\n        }\n    }\n\n    #[test]\n    fn test_sha256_hash() {\n        let mut a = <Sha256Hasher as Hasher>::Function::default();\n        \"hello\".hash(&mut a);\n        let h1 = a.hash();\n        assert_eq!(\n            format!(\"{}\", HexSlice::new(h1.as_ref())),\n            \"24988b93623304735e42a71f5c1e161b9ee2b9c52a3be8260ea3b05fba4df22c\"\n        );\n    }\n\n    #[test]\n    fn test_sha256_node() {\n        let mut h1 = [0u8; 32];\n        let mut h2 = [0u8; 32];\n        let mut h3 = [0u8; 32];\n        h1[0] = 0x00;\n        h2[0] = 0x11;\n        h3[0] = 0x22;\n\n        let mut a = <Sha256Hasher as Hasher>::Function::default();\n        let h11 = h1;\n        let h12 = h2;\n        let h13 = h3;\n        let h21 = a.node(h11.into(), h12.into(), 0);\n        a.reset();\n        let h22 = a.node(h13.into(), h13.into(), 0);\n        a.reset();\n        let _h31 = a.node(h21.into(), h22.into(), 1);\n        a.reset();\n\n        let l1 = a.leaf(h1.into());\n        a.reset();\n\n        let l2 = a.leaf(h2.into());\n        a.reset();\n\n        // let mut s = vec![0x00];\n        // s.extend(h1.to_vec());\n        // println!(\n        //     \"1: {}\",\n        //     HexSlice::new(sha256_digest(s.as_slice()).as_slice())\n        // );\n\n        // assert_eq!(\n        //     format!(\"{}\", HexSlice::new(l1.as_ref())),\n        //     \"e96c39a7e54a9ac9d54330a0f2686f7dbc2d26df8385252fca5682ac319e9c7f\"\n        // );\n\n        // assert_eq!(\n        //     format!(\"{}\", HexSlice::new(h21.as_ref())),\n        //     \"f820fce7caf5f38f47d4893692c90ea92af47f10cdd3facd1b9e4642e5dfa84f\"\n        // );\n        // assert_eq!(\n        //     format!(\"{}\", HexSlice::new(h22.as_ref())),\n        //     \"888ee00d8142c7c7ca5635c1f175e11f3aa811c00ad3a200cd36584ce2a75384\"\n        // );\n        // assert_eq!(\n        //     format!(\"{}\", HexSlice::new(h31.as_ref())),\n        //     \"e6a6b12f6147ce9ce87c9f2a7f41ddd9587f6ea59ccbfb33fba08e3740d96200\"\n        // );\n\n        let v: Vec<DigestDomain> = vec![h1.into(), h2.into(), h3.into()];\n        let v2: Vec<DigestDomain> = vec![h1.into(), h2.into()];\n        let t = MerkleTree::<<Sha256Hasher as Hasher>::Domain, <Sha256Hasher as Hasher>::Function>::from_iter(v);\n        let t2 = MerkleTree::<<Sha256Hasher as Hasher>::Domain, <Sha256Hasher as Hasher>::Function>::from_iter(v2);\n\n        assert_eq!(t2.as_slice()[0].as_ref(), l1.as_ref());\n        assert_eq!(t2.as_slice()[1].as_ref(), l2.as_ref());\n        assert_eq!(t2.as_slice()[2].as_ref(), h21.as_ref());\n\n        // TODO: Verify this is the right hash — bearing in mind that the two most significant bits must be cleared after each hash.\n        assert_eq!(\n            format!(\"{}\", HexSlice::new(t.root().as_ref())),\n            \"1c1afe57ff6efa4204cf4e17e20bf4d7f6ebf3a4c27391f93993291560107f88\"\n        );\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/hasher/types.rs",
    "content": "use crate::error::Result;\nuse merkle_light::hash::{Algorithm as LightAlgorithm, Hashable as LightHashable};\nuse pairing::bls12_381::Fr;\nuse rand::Rand;\nuse serde::de::DeserializeOwned;\nuse serde::ser::Serialize;\n\npub trait Domain:\n    Ord\n    + Copy\n    + Clone\n    + AsRef<[u8]>\n    + Default\n    + ::std::fmt::Debug\n    + Eq\n    + Send\n    + Sync\n    + From<Fr>\n    + Into<Fr>\n    + Rand\n    + Serialize\n    + DeserializeOwned\n{\n    fn serialize(&self) -> Vec<u8>;\n    fn into_bytes(&self) -> Vec<u8>;\n    fn try_from_bytes(raw: &[u8]) -> Result<Self>;\n    /// Write itself into the given slice, LittleEndian bytes.\n    fn write_bytes(&self, _: &mut [u8]) -> Result<()>;\n}\n\npub trait HashFunction<T: Domain>:\n    Clone + ::std::fmt::Debug + Eq + Send + Sync + LightAlgorithm<T>\n{\n    fn hash(data: &[u8]) -> T;\n\n    fn hash_leaf(data: &LightHashable<Self>) -> T {\n        let mut a = Self::default();\n        data.hash(&mut a);\n        let item_hash = a.hash();\n        a.leaf(item_hash)\n    }\n\n    fn hash_single_node(data: &LightHashable<Self>) -> T {\n        let mut a = Self::default();\n        data.hash(&mut a);\n        a.hash()\n    }\n}\n\npub trait Hasher: Clone + ::std::fmt::Debug + Eq + Default + Send + Sync {\n    type Domain: Domain + LightHashable<Self::Function>;\n    type Function: HashFunction<Self::Domain>;\n\n    fn kdf(data: &[u8], m: usize) -> Self::Domain;\n    fn sloth_encode(key: &Self::Domain, ciphertext: &Self::Domain, rounds: usize) -> Self::Domain;\n    fn sloth_decode(key: &Self::Domain, ciphertext: &Self::Domain, rounds: usize) -> Self::Domain;\n}\n"
  },
  {
    "path": "storage-proofs/src/layered_drgporep.rs",
    "content": "use std::sync::mpsc::channel;\n\nuse crossbeam_utils::thread;\nuse rayon::prelude::*;\nuse serde::de::Deserialize;\nuse serde::ser::Serialize;\nuse slog::*;\n\nuse crate::challenge_derivation::derive_challenges;\nuse crate::drgporep::{self, DrgPoRep};\nuse crate::drgraph::Graph;\nuse crate::error::{Error, Result};\nuse crate::hasher::{Domain, HashFunction, Hasher};\nuse crate::merkle::MerkleTree;\nuse crate::parameter_cache::ParameterSetIdentifier;\nuse crate::porep::{self, PoRep};\nuse crate::proof::ProofScheme;\nuse crate::vde;\nuse crate::SP_LOG;\n\n#[derive(Debug)]\npub struct SetupParams {\n    pub drg_porep_setup_params: drgporep::SetupParams,\n    pub layers: usize,\n    pub challenge_count: usize,\n}\n\n#[derive(Debug, Clone)]\npub struct PublicParams<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetIdentifier,\n{\n    pub drg_porep_public_params: drgporep::PublicParams<H, G>,\n    pub layers: usize,\n    pub challenge_count: usize,\n}\n\n#[derive(Clone)]\npub struct Tau<T: Domain> {\n    pub layer_taus: Vec<porep::Tau<T>>,\n    pub comm_r_star: T,\n}\n\nimpl<T: Domain> Tau<T> {\n    /// Return a single porep::Tau with the initial data and final replica commitments of layer_taus.\n    pub fn simplify(&self) -> porep::Tau<T> {\n        porep::Tau {\n            comm_r: self.layer_taus[self.layer_taus.len() - 1].comm_r,\n            comm_d: self.layer_taus[0].comm_d,\n        }\n    }\n}\n\nimpl<H, G> ParameterSetIdentifier for PublicParams<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetIdentifier,\n{\n    fn parameter_set_identifier(&self) -> String {\n        format!(\n            \"layered_drgporep::PublicParams{{ drg_porep_identifier: {}, layers: {}, challenge_count: {} }}\",\n            self.drg_porep_public_params.parameter_set_identifier(),\n            self.layers,\n            self.challenge_count,\n        )\n    }\n}\n\nimpl<'a, H, G> From<&'a PublicParams<H, G>> for PublicParams<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetIdentifier,\n{\n    fn from(pp: &PublicParams<H, G>) -> PublicParams<H, G> {\n        PublicParams {\n            drg_porep_public_params: pp.drg_porep_public_params.clone(),\n            layers: pp.layers,\n            challenge_count: pp.challenge_count,\n        }\n    }\n}\n\npub type EncodingProof<H> = drgporep::Proof<H>;\n\n#[derive(Debug, Clone)]\npub struct PublicInputs<T: Domain> {\n    pub replica_id: T,\n    pub challenge_count: usize,\n    pub tau: Option<porep::Tau<T>>,\n    pub comm_r_star: T,\n    pub k: Option<usize>,\n}\n\nimpl<T: Domain> PublicInputs<T> {\n    pub fn challenges(&self, leaves: usize, layer: u8, partition_k: Option<usize>) -> Vec<usize> {\n        derive_challenges::<T>(\n            self.challenge_count,\n            layer,\n            leaves,\n            &self.replica_id,\n            &self.comm_r_star,\n            partition_k.unwrap_or(0) as u8,\n        )\n    }\n}\n\npub struct PrivateInputs<'a, H: Hasher> {\n    pub replica: &'a [u8],\n    pub aux: Vec<MerkleTree<H::Domain, H::Function>>,\n    pub tau: Vec<porep::Tau<H::Domain>>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Proof<H: Hasher> {\n    #[serde(bound(\n        serialize = \"EncodingProof<H>: Serialize\",\n        deserialize = \"EncodingProof<H>: Deserialize<'de>\"\n    ))]\n    pub encoding_proofs: Vec<EncodingProof<H>>,\n    pub tau: Vec<porep::Tau<H::Domain>>,\n}\n\nimpl<H: Hasher> Proof<H> {\n    pub fn serialize(&self) -> Vec<u8> {\n        unimplemented!();\n    }\n}\n\npub type PartitionProofs<H> = Vec<Proof<H>>;\n\nimpl<H: Hasher> Proof<H> {\n    pub fn new(\n        encoding_proofs: Vec<EncodingProof<H>>,\n        tau: Vec<porep::Tau<H::Domain>>,\n    ) -> Proof<H> {\n        Proof {\n            encoding_proofs,\n            tau,\n        }\n    }\n}\n\npub trait Layerable<H: Hasher>: Graph<H> {}\n\n/// Layers provides default implementations of methods required to handle proof and verification\n/// of layered proofs of replication. Implementations must provide transform and invert_transform methods.\npub trait Layers {\n    type Hasher: Hasher;\n    type Graph: Layerable<Self::Hasher> + ParameterSetIdentifier + Sync + Send;\n\n    /// transform a layer's public parameters, returning new public parameters corresponding to the next layer.\n    fn transform(\n        pp: &drgporep::PublicParams<Self::Hasher, Self::Graph>,\n        layer: usize,\n        layers: usize,\n    ) -> drgporep::PublicParams<Self::Hasher, Self::Graph>;\n\n    /// transform a layer's public parameters, returning new public parameters corresponding to the previous layer.\n    fn invert_transform(\n        pp: &drgporep::PublicParams<Self::Hasher, Self::Graph>,\n        layer: usize,\n        layers: usize,\n    ) -> drgporep::PublicParams<Self::Hasher, Self::Graph>;\n\n    fn prove_layers<'a>(\n        pp: &drgporep::PublicParams<Self::Hasher, Self::Graph>,\n        pub_inputs: &PublicInputs<<Self::Hasher as Hasher>::Domain>,\n        tau: &[porep::Tau<<Self::Hasher as Hasher>::Domain>],\n        aux: &'a [MerkleTree<\n            <Self::Hasher as Hasher>::Domain,\n            <Self::Hasher as Hasher>::Function,\n        >],\n        layers: usize,\n        total_layers: usize,\n        partition_count: usize,\n    ) -> Result<Vec<Vec<EncodingProof<Self::Hasher>>>> {\n        assert!(layers > 0);\n\n        let mut new_pp = None;\n\n        (0..layers)\n            .map(|layer| {\n                let pp = match new_pp {\n                    Some(ref new_pp) => new_pp,\n                    None => pp,\n                };\n                let inner_layers = layers - layer;\n\n                let new_priv_inputs = drgporep::PrivateInputs {\n                    aux: &porep::ProverAux {\n                        tree_d: aux[layer].clone(),\n                        tree_r: aux[layer + 1].clone(),\n                    },\n                };\n                let layer_diff = total_layers - inner_layers;\n\n                let partition_proofs: Vec<_> = (0..partition_count)\n                    .into_par_iter()\n                    .map(|k| {\n                        let drgporep_pub_inputs = drgporep::PublicInputs {\n                            replica_id: pub_inputs.replica_id,\n                            challenges: pub_inputs.challenges(\n                                pp.graph.size(),\n                                layer_diff as u8,\n                                Some(k),\n                            ),\n                            tau: Some(tau[layer]),\n                        };\n\n                        DrgPoRep::prove(pp, &drgporep_pub_inputs, &new_priv_inputs)\n                    })\n                    .collect::<Result<Vec<_>>>()?;\n\n                new_pp = Some(Self::transform(pp, layer_diff, total_layers));\n\n                Ok(partition_proofs)\n            })\n            .collect::<Result<Vec<_>>>()\n    }\n\n    fn extract_and_invert_transform_layers<'a>(\n        drgpp: &drgporep::PublicParams<Self::Hasher, Self::Graph>,\n        layers: usize,\n        replica_id: &<Self::Hasher as Hasher>::Domain,\n        data: &'a mut [u8],\n    ) -> Result<()> {\n        assert!(layers > 0);\n\n        (0..layers).fold((*drgpp).clone(), |current_drgpp, layer| {\n            let inverted = Self::invert_transform(&current_drgpp, layer, layers);\n            let mut res = DrgPoRep::extract_all(&inverted, replica_id, data).unwrap();\n\n            for (i, r) in res.iter_mut().enumerate() {\n                data[i] = *r;\n            }\n            inverted\n        });\n\n        Ok(())\n    }\n\n    fn transform_and_replicate_layers(\n        drgpp: &drgporep::PublicParams<Self::Hasher, Self::Graph>,\n        layers: usize,\n        replica_id: &<Self::Hasher as Hasher>::Domain,\n        data: &mut [u8],\n    ) -> Result<(\n        Vec<porep::Tau<<Self::Hasher as Hasher>::Domain>>,\n        Vec<MerkleTree<<Self::Hasher as Hasher>::Domain, <Self::Hasher as Hasher>::Function>>,\n    )> {\n        assert!(layers > 0);\n        let mut taus = Vec::with_capacity(layers);\n        let mut auxs: Vec<\n            MerkleTree<<Self::Hasher as Hasher>::Domain, <Self::Hasher as Hasher>::Function>,\n        > = Vec::with_capacity(layers);\n\n        let generate_merkle_trees_in_parallel = true;\n        if !generate_merkle_trees_in_parallel {\n            // This branch serializes encoding and merkle tree generation.\n            // However, it makes clear the underlying algorithm we reproduce\n            // in the parallel case. We should keep this code for documentation and to help\n            // alert us if drgporep's implementation changes (and breaks type-checking).\n            // It would not be a bad idea to add tests ensuring the parallel and serial cases\n            // generate the same results.\n            (0..layers).fold((*drgpp).clone(), |current_drgpp, layer| {\n                let previous_replica_tree = if !auxs.is_empty() {\n                    auxs.last().cloned()\n                } else {\n                    None\n                };\n\n                let (tau, aux) =\n                    DrgPoRep::replicate(&current_drgpp, replica_id, data, previous_replica_tree)\n                        .unwrap();\n\n                taus.push(tau);\n                auxs.push(aux.tree_r);\n\n                Self::transform(&current_drgpp, layer, layers)\n            });\n        } else {\n            // The parallel case is more complicated but should produce the same results as the\n            // serial case. Note that to make lifetimes work out, we have to inline and tease apart\n            // the definition of DrgPoRep::replicate. This is because as implemented, it entangles\n            // encoding and merkle tree generation too tightly to be used as a subcomponent.\n            // Instead, we need to create a scope which encloses all the work, spawning threads\n            // for merkle tree generation and sending the results back to a channel.\n            // The received results need to be sorted by layer because ordering of the completed results\n            // is not guaranteed. Misordered results will be seen in practice when trees are small.\n\n            // The outer scope ensure that `tx` is dropped and closed before we read from `outer_rx`.\n            // Otherwise, the read loop will block forever waiting for more input.\n            let outer_rx = {\n                let (tx, rx) = channel();\n\n                let errf = |e| {\n                    let err_string = format!(\"{:?}\", e);\n                    error!(SP_LOG, \"MerkleTreeGenerationError\"; \"err\" => &err_string, \"backtrace\" => format!(\"{:?}\", failure::Backtrace::new()));\n                    Error::MerkleTreeGenerationError(err_string)\n                };\n\n                let _ = thread::scope(|scope| -> Result<()> {\n                    let mut threads = Vec::with_capacity(layers + 1);\n                    let initial_pp = (*drgpp).clone();\n                    (0..=layers).fold(initial_pp, |current_drgpp, layer| {\n                        let mut data_copy = vec![0; data.len()];\n                        data_copy[0..data.len()].clone_from_slice(data);\n\n                        let return_channel = tx.clone();\n                        let (transfer_tx, transfer_rx) =\n                            channel::<drgporep::PublicParams<Self::Hasher, Self::Graph>>();\n\n                        transfer_tx.send(current_drgpp.clone()).unwrap();\n\n                        let thread = scope.spawn(move |_| {\n                            // If we panic anywhere in this closure, thread.join() below will receive an error —\n                            // so it is safe to unwrap.\n                            let drgpp = transfer_rx.recv().unwrap();\n                            let tree_d = drgpp.graph.merkle_tree(&data_copy).unwrap();\n\n                            info!(SP_LOG, \"returning tree\"; \"layer\" => format!(\"{}\", layer));\n                            return_channel.send((layer, tree_d)).unwrap();\n                        });\n\n                        threads.push(thread);\n\n                        if layer < layers {\n                            info!(SP_LOG, \"encoding\"; \"layer {}\" => format!(\"{}\", layer));\n                            vde::encode(\n                                &current_drgpp.graph,\n                                current_drgpp.sloth_iter,\n                                replica_id,\n                                data,\n                            )\n                            .expect(\"encoding failed in thread\");\n                        }\n                        Self::transform(&current_drgpp, layer, layers)\n                    });\n\n                    for thread in threads {\n                        thread.join().map_err(errf)?;\n                    }\n\n                    Ok(())\n                })\n                .map_err(errf)?;\n\n                rx\n            };\n\n            let sorted_trees = {\n                let mut labeled_trees = outer_rx.iter().collect::<Vec<_>>();\n                labeled_trees.sort_by_key(|x| x.0);\n                labeled_trees\n            };\n\n            sorted_trees.iter().fold(\n                None,\n                |previous_tree: Option<&MerkleTree<_, _>>, (i, replica_tree)| {\n                    // Each iteration's replica_tree becomes the next iteration's previous_tree (data_tree).\n                    // The first iteration has no previous_tree.\n                    if let Some(data_tree) = previous_tree {\n                        let tau = porep::Tau {\n                            comm_r: replica_tree.root(),\n                            comm_d: data_tree.root(),\n                        };\n                        info!(SP_LOG, \"setting tau/aux\"; \"layer\" => format!(\"{}\", i - 1));\n                        taus.push(tau);\n                    };\n                    auxs.push(replica_tree.clone());\n\n                    Some(replica_tree)\n                },\n            );\n        };\n        Ok((taus, auxs))\n    }\n}\n\nimpl<'a, L: Layers> ProofScheme<'a> for L {\n    type PublicParams = PublicParams<L::Hasher, L::Graph>;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<<L::Hasher as Hasher>::Domain>;\n    type PrivateInputs = PrivateInputs<'a, L::Hasher>;\n    type Proof = Proof<L::Hasher>;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        let dp_sp = DrgPoRep::setup(&sp.drg_porep_setup_params)?;\n        let pp = PublicParams {\n            drg_porep_public_params: dp_sp,\n            layers: sp.layers,\n            challenge_count: sp.challenge_count,\n        };\n\n        Ok(pp)\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        let proofs = Self::prove_all_partitions(pub_params, pub_inputs, priv_inputs, 1)?;\n        let k = match pub_inputs.k {\n            None => 0,\n            Some(k) => k,\n        };\n\n        Ok(proofs[k].to_owned())\n    }\n\n    fn prove_all_partitions<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n        partition_count: usize,\n    ) -> Result<Vec<Self::Proof>> {\n        assert!(partition_count > 0);\n\n        let proofs = Self::prove_layers(\n            &pub_params.drg_porep_public_params,\n            pub_inputs,\n            &priv_inputs.tau,\n            &priv_inputs.aux,\n            pub_params.layers,\n            pub_params.layers,\n            partition_count,\n        )?;\n\n        let mut proof_columns = vec![Vec::new(); partition_count];\n\n        for partition_proofs in proofs.into_iter() {\n            for (j, proof) in partition_proofs.into_iter().enumerate() {\n                proof_columns[j].push(proof);\n            }\n        }\n\n        let proofs = proof_columns\n            .into_iter()\n            .map(|p| Proof::new(p, priv_inputs.tau.clone()))\n            .collect();\n\n        Ok(proofs)\n    }\n\n    fn verify_all_partitions(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        partition_proofs: &[Self::Proof],\n    ) -> Result<bool> {\n        for (k, proof) in partition_proofs.iter().enumerate() {\n            if proof.encoding_proofs.len() != pub_params.layers {\n                return Ok(false);\n            }\n\n            let total_layers = pub_params.layers;\n            let mut pp = pub_params.drg_porep_public_params.clone();\n            // TODO: verification is broken for the first node, figure out how to unbreak\n            // with permutations\n\n            let mut comm_rs = Vec::new();\n\n            for (layer, proof_layer) in proof.encoding_proofs.iter().enumerate() {\n                comm_rs.push(proof.tau[layer].comm_r);\n\n                let new_pub_inputs = drgporep::PublicInputs {\n                    replica_id: pub_inputs.replica_id,\n                    challenges: pub_inputs.challenges(\n                        pub_params.drg_porep_public_params.graph.size(),\n                        layer as u8,\n                        Some(k),\n                    ),\n                    tau: Some(proof.tau[layer]),\n                };\n\n                let ep = &proof_layer;\n                let res = DrgPoRep::verify(\n                    &pp,\n                    &new_pub_inputs,\n                    &drgporep::Proof {\n                        data_root: ep.data_root,\n                        replica_root: ep.replica_root,\n                        replica_nodes: ep.replica_nodes.clone(),\n                        replica_parents: ep.replica_parents.clone(),\n                        // TODO: investigate if clone can be avoided by using a reference in drgporep::DataProof\n                        nodes: ep.nodes.clone(),\n                    },\n                )?;\n\n                pp = Self::transform(&pp, layer, total_layers);\n\n                if !res {\n                    return Ok(false);\n                }\n            }\n            let crs = comm_r_star::<L::Hasher>(&pub_inputs.replica_id, &comm_rs)?;\n\n            if crs != pub_inputs.comm_r_star {\n                return Ok(false);\n            }\n        }\n        Ok(true)\n    }\n\n    fn with_partition(pub_in: Self::PublicInputs, k: Option<usize>) -> Self::PublicInputs {\n        self::PublicInputs {\n            replica_id: pub_in.replica_id,\n            challenge_count: pub_in.challenge_count,\n            tau: pub_in.tau,\n            comm_r_star: pub_in.comm_r_star,\n            k,\n        }\n    }\n}\n\n// We need to calculate CommR* -- which is: H(replica_id|comm_r[0]|comm_r[1]|…comm_r[n])\nfn comm_r_star<H: Hasher>(replica_id: &H::Domain, comm_rs: &[H::Domain]) -> Result<H::Domain> {\n    let l = (comm_rs.len() + 1) * 32;\n    let mut bytes = vec![0; l];\n\n    replica_id.write_bytes(&mut bytes[0..32])?;\n\n    for (i, comm_r) in comm_rs.iter().enumerate() {\n        comm_r.write_bytes(&mut bytes[(i + 1) * 32..(i + 2) * 32])?;\n    }\n\n    Ok(H::Function::hash(&bytes))\n}\n\nimpl<'a, 'c, L: Layers> PoRep<'a, L::Hasher> for L {\n    type Tau = Tau<<L::Hasher as Hasher>::Domain>;\n    type ProverAux =\n        Vec<MerkleTree<<L::Hasher as Hasher>::Domain, <L::Hasher as Hasher>::Function>>;\n\n    fn replicate(\n        pp: &'a PublicParams<L::Hasher, L::Graph>,\n        replica_id: &<L::Hasher as Hasher>::Domain,\n        data: &mut [u8],\n        _data_tree: Option<\n            MerkleTree<<L::Hasher as Hasher>::Domain, <L::Hasher as Hasher>::Function>,\n        >,\n    ) -> Result<(Self::Tau, Self::ProverAux)> {\n        let (taus, auxs) = Self::transform_and_replicate_layers(\n            &pp.drg_porep_public_params,\n            pp.layers,\n            replica_id,\n            data,\n        )?;\n\n        let comm_rs: Vec<_> = taus.iter().map(|tau| tau.comm_r).collect();\n        let crs = comm_r_star::<L::Hasher>(replica_id, &comm_rs)?;\n        let tau = Tau {\n            layer_taus: taus,\n            comm_r_star: crs,\n        };\n        Ok((tau, auxs))\n    }\n\n    fn extract_all<'b>(\n        pp: &'b PublicParams<L::Hasher, L::Graph>,\n        replica_id: &'b <L::Hasher as Hasher>::Domain,\n        data: &'b [u8],\n    ) -> Result<Vec<u8>> {\n        let mut data = data.to_vec();\n\n        Self::extract_and_invert_transform_layers(\n            &pp.drg_porep_public_params,\n            pp.layers,\n            replica_id,\n            &mut data,\n        )?;\n\n        Ok(data)\n    }\n\n    fn extract(\n        _pp: &PublicParams<L::Hasher, L::Graph>,\n        _replica_id: &<L::Hasher as Hasher>::Domain,\n        _data: &[u8],\n        _node: usize,\n    ) -> Result<Vec<u8>> {\n        unimplemented!();\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/lib.rs",
    "content": "pub use storage_proofs_core::*;\npub use storage_proofs_porep as porep;\npub use storage_proofs_post as post;\n"
  },
  {
    "path": "storage-proofs/src/merkle.rs",
    "content": "#![allow(clippy::len_without_is_empty)]\n\nuse std::marker::PhantomData;\n\n// Reexport here, so we don't depend on merkle_light directly in other places.\nuse merkle_light::hash::Algorithm;\npub use merkle_light::merkle::MerkleTree;\nuse merkle_light::proof;\nuse pairing::bls12_381::Fr;\n\nuse crate::hasher::{Domain, Hasher};\n\n/// Representation of a merkle proof.\n/// Each element in the `path` vector consists of a tuple `(hash, is_right)`, with `hash` being the the hash of the node at the current level and `is_right` a boolean indicating if the path is taking the right path.\n/// The first element is the hash of leaf itself, and the last is the root hash.\n#[derive(Default, Debug, Clone, Serialize, Deserialize)]\npub struct MerkleProof<H: Hasher> {\n    pub root: H::Domain,\n    path: Vec<(H::Domain, bool)>,\n    leaf: H::Domain,\n\n    #[serde(skip)]\n    _h: PhantomData<H>,\n}\n\npub fn make_proof_for_test<H: Hasher>(\n    root: H::Domain,\n    leaf: H::Domain,\n    path: Vec<(H::Domain, bool)>,\n) -> MerkleProof<H> {\n    MerkleProof {\n        path,\n        root,\n        leaf,\n        _h: PhantomData,\n    }\n}\n\nimpl<H: Hasher> MerkleProof<H> {\n    pub fn new(n: usize) -> MerkleProof<H> {\n        let mut m = MerkleProof::default();\n        m.path = vec![(Default::default(), false); n];\n\n        m\n    }\n\n    pub fn new_from_proof(p: &proof::Proof<H::Domain>) -> MerkleProof<H> {\n        MerkleProof {\n            path: p\n                .lemma()\n                .iter()\n                .skip(1)\n                .zip(p.path().iter())\n                .map(|(hash, is_left)| (*hash, !is_left))\n                .collect::<Vec<_>>(),\n            root: p.root(),\n            leaf: p.item(),\n            _h: PhantomData,\n        }\n    }\n\n    /// Convert the merkle path into the format expected by the circuits, which is a vector of options of the tuples.\n    /// This does __not__ include the root and the leaf.\n    pub fn as_options(&self) -> Vec<Option<(Fr, bool)>> {\n        self.path\n            .iter()\n            .map(|v| Some((v.0.into(), v.1)))\n            .collect::<Vec<_>>()\n    }\n\n    pub fn as_pairs(&self) -> Vec<(Fr, bool)> {\n        self.path\n            .iter()\n            .map(|v| (v.0.into(), v.1))\n            .collect::<Vec<_>>()\n    }\n\n    /// Validates the MerkleProof and that it corresponds to the supplied node.\n    pub fn validate(&self, node: usize) -> bool {\n        let mut a = H::Function::default();\n\n        if path_index(&self.path) != node {\n            return false;\n        }\n\n        self.root()\n            == &(0..self.path.len()).fold(self.leaf, |h, i| {\n                a.reset();\n                let is_right = self.path[i].1;\n\n                let (left, right) = if is_right {\n                    (self.path[i].0, h)\n                } else {\n                    (h, self.path[i].0)\n                };\n\n                a.node(left, right, i)\n            })\n    }\n\n    /// Validates that the data hashes to the leaf of the merkle path.\n    pub fn validate_data(&self, data: &[u8]) -> bool {\n        self.leaf().into_bytes() == data\n    }\n\n    /// Returns the hash of leaf that this MerkleProof represents.\n    pub fn leaf(&self) -> &H::Domain {\n        &self.leaf\n    }\n\n    /// Returns the root hash\n    pub fn root(&self) -> &H::Domain {\n        &self.root\n    }\n\n    /// Returns the length of the proof. That is all path elements plus 1 for the\n    /// leaf and 1 for the root.\n    pub fn len(&self) -> usize {\n        self.path.len() + 2\n    }\n\n    /// Serialize into bytes.\n    /// TODO: probably improve\n    pub fn serialize(&self) -> Vec<u8> {\n        let mut out = Vec::new();\n\n        for (hash, is_right) in &self.path {\n            out.extend(hash.serialize());\n            out.push(*is_right as u8);\n        }\n        out.extend(self.leaf().serialize());\n        out.extend(self.root().serialize());\n\n        out\n    }\n\n    pub fn path(&self) -> &Vec<(H::Domain, bool)> {\n        &self.path\n    }\n}\n\nfn path_index<T: Domain>(path: &[(T, bool)]) -> usize {\n    path.iter().rev().fold(0, |acc, (_, is_right)| {\n        (acc << 1) + if *is_right { 1 } else { 0 }\n    })\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use rand::{self, Rng};\n    use std::io::Write;\n\n    use crate::drgraph::new_seed;\n    use crate::drgraph::{BucketGraph, Graph};\n    use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher};\n\n    fn merklepath<H: Hasher>() {\n        let g = BucketGraph::<H>::new(10, 5, 0, new_seed());\n        let mut rng = rand::thread_rng();\n        let node_size = 32;\n        let mut data = Vec::new();\n        for _ in 0..10 {\n            let elt: H::Domain = rng.gen();\n            let bytes = H::Domain::into_bytes(&elt);\n            data.write(&bytes).unwrap();\n        }\n\n        let tree = g.merkle_tree(data.as_slice()).unwrap();\n        for i in 0..10 {\n            let proof = tree.gen_proof(i);\n\n            assert!(proof.validate::<H::Function>());\n            let len = proof.lemma().len();\n            let mp = MerkleProof::<H>::new_from_proof(&proof);\n\n            assert_eq!(mp.len(), len);\n\n            assert!(mp.validate(i), \"failed to validate valid merkle path\");\n            let data_slice = &data[i * node_size..(i + 1) * node_size].to_vec();\n            assert!(\n                mp.validate_data(data_slice),\n                \"failed to validate valid data\"\n            );\n        }\n    }\n\n    #[test]\n    fn merklepath_pedersen() {\n        merklepath::<PedersenHasher>();\n    }\n\n    #[test]\n    fn merklepath_sha256() {\n        merklepath::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn merklepath_blake2s() {\n        merklepath::<Blake2sHasher>();\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/merklepor.rs",
    "content": "use std::marker::PhantomData;\n\nuse crate::drgporep::DataProof;\nuse crate::drgraph::graph_height;\nuse crate::error::*;\nuse crate::hasher::{Domain, Hasher};\nuse crate::merkle::{MerkleProof, MerkleTree};\nuse crate::parameter_cache::ParameterSetIdentifier;\nuse crate::proof::ProofScheme;\n\n/// The parameters shared between the prover and verifier.\n#[derive(Clone, Debug)]\npub struct PublicParams {\n    /// How many leaves the underlying merkle tree has.\n    pub leaves: usize,\n    pub private: bool,\n}\n\nimpl ParameterSetIdentifier for PublicParams {\n    fn parameter_set_identifier(&self) -> String {\n        format!(\n            \"merklepor::PublicParams{{leaves: {}; private: {}}}\",\n            self.leaves, self.private\n        )\n    }\n}\n\n/// The inputs that are necessary for the verifier to verify the proof.\n#[derive(Debug, Clone)]\npub struct PublicInputs<T: Domain> {\n    /// The root hash of the underlying merkle tree.\n    pub commitment: Option<T>,\n    /// The challenge, which leaf to prove.\n    pub challenge: usize,\n}\n\n/// The inputs that are only available to the prover.\n#[derive(Debug)]\npub struct PrivateInputs<'a, H: 'a + Hasher> {\n    /// The data of the leaf.\n    pub leaf: H::Domain,\n    /// The underlying merkle tree.\n    pub tree: &'a MerkleTree<H::Domain, H::Function>,\n    _h: PhantomData<H>,\n}\n\nimpl<'a, H: Hasher> PrivateInputs<'a, H> {\n    pub fn new(leaf: H::Domain, tree: &'a MerkleTree<H::Domain, H::Function>) -> Self {\n        PrivateInputs {\n            leaf,\n            tree,\n            _h: PhantomData,\n        }\n    }\n}\n\n/// The proof that is returned from `prove`.\npub type Proof<H> = DataProof<H>;\n\n#[derive(Debug)]\npub struct SetupParams {\n    pub leaves: usize,\n    pub private: bool,\n}\n\n/// Merkle tree based proof of retrievability.\n#[derive(Debug, Default)]\npub struct MerklePoR<H: Hasher> {\n    _h: PhantomData<H>,\n}\n\nimpl<'a, H: 'a + Hasher> ProofScheme<'a> for MerklePoR<H> {\n    type PublicParams = PublicParams;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<H::Domain>;\n    type PrivateInputs = PrivateInputs<'a, H>;\n    type Proof = Proof<H>;\n\n    fn setup(sp: &SetupParams) -> Result<PublicParams> {\n        Ok(PublicParams {\n            leaves: sp.leaves,\n            private: sp.private,\n        })\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        let challenge = pub_inputs.challenge % pub_params.leaves;\n        let tree = priv_inputs.tree;\n\n        if let Some(ref commitment) = pub_inputs.commitment {\n            if commitment != &tree.root() {\n                return Err(Error::InvalidCommitment);\n            }\n        }\n\n        Ok(Proof {\n            proof: MerkleProof::new_from_proof(&tree.gen_proof(challenge)),\n            data: priv_inputs.leaf,\n        })\n    }\n\n    fn verify(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        proof: &Self::Proof,\n    ) -> Result<bool> {\n        {\n            // This was verify_proof_meta.\n            let commitments_match = match pub_inputs.commitment {\n                Some(ref commitment) => commitment == proof.proof.root(),\n                None => true,\n            };\n\n            let path_length_match = graph_height(pub_params.leaves) == proof.proof.path().len();\n\n            if !(commitments_match && path_length_match) {\n                return Ok(false);\n            }\n        }\n        let data_valid = proof.proof.validate_data(&proof.data.into_bytes());\n        let path_valid = proof.proof.validate(pub_inputs.challenge);\n\n        Ok(data_valid && path_valid)\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use pairing::bls12_381::Bls12;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n\n    use crate::drgraph::{new_seed, BucketGraph, Graph};\n    use crate::fr32::fr_into_bytes;\n    use crate::hasher::{Blake2sHasher, HashFunction, PedersenHasher, Sha256Hasher};\n    use crate::merkle::make_proof_for_test;\n    use crate::util::data_at_node;\n\n    fn test_merklepor<H: Hasher>() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let pub_params = PublicParams {\n            leaves: 32,\n            private: false,\n        };\n\n        let data: Vec<u8> = (0..32)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n\n        let graph = BucketGraph::<H>::new(32, 5, 0, new_seed());\n        let tree = graph.merkle_tree(data.as_slice()).unwrap();\n\n        let pub_inputs = PublicInputs {\n            challenge: 3,\n            commitment: Some(tree.root()),\n        };\n\n        let leaf =\n            H::Domain::try_from_bytes(data_at_node(data.as_slice(), pub_inputs.challenge).unwrap())\n                .unwrap();\n\n        let priv_inputs = PrivateInputs::<H>::new(leaf, &tree);\n\n        let proof = MerklePoR::<H>::prove(&pub_params, &pub_inputs, &priv_inputs).unwrap();\n\n        assert!(MerklePoR::<H>::verify(&pub_params, &pub_inputs, &proof).unwrap());\n    }\n\n    #[test]\n    fn merklepor_pedersen() {\n        test_merklepor::<PedersenHasher>();\n    }\n\n    #[test]\n    fn merklepor_sha256() {\n        test_merklepor::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn merklepor_blake2s() {\n        test_merklepor::<Blake2sHasher>();\n    }\n\n    // Construct a proof that satisfies a cursory validation:\n    // Data and proof are minimally consistent.\n    // Proof root matches that requested in public inputs.\n    // However, note that data has no relationship to anything,\n    // and proof path does not actually prove that data was in the tree corresponding to expected root.\n    fn make_bogus_proof<H: Hasher>(\n        pub_inputs: &PublicInputs<H::Domain>,\n        rng: &mut XorShiftRng,\n    ) -> DataProof<H> {\n        let bogus_leaf: H::Domain = rng.gen();\n        let hashed_leaf = H::Function::hash_leaf(&bogus_leaf);\n\n        DataProof {\n            data: bogus_leaf,\n            proof: make_proof_for_test(\n                pub_inputs.commitment.unwrap(),\n                hashed_leaf,\n                vec![(hashed_leaf, true)],\n            ),\n        }\n    }\n\n    fn test_merklepor_validates<H: Hasher>() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let pub_params = PublicParams {\n            leaves: 32,\n            private: false,\n        };\n\n        let data: Vec<u8> = (0..32)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n\n        let graph = BucketGraph::<H>::new(32, 5, 0, new_seed());\n        let tree = graph.merkle_tree(data.as_slice()).unwrap();\n\n        let pub_inputs = PublicInputs {\n            challenge: 3,\n            commitment: Some(tree.root()),\n        };\n\n        let bad_proof = make_bogus_proof::<H>(&pub_inputs, rng);\n\n        let verified = MerklePoR::verify(&pub_params, &pub_inputs, &bad_proof).unwrap();\n\n        // A bad proof should not be verified!\n        assert!(!verified);\n    }\n\n    #[test]\n    fn merklepor_actually_validates_sha256() {\n        test_merklepor_validates::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_blake2s() {\n        test_merklepor_validates::<Blake2sHasher>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_pedersen() {\n        test_merklepor_validates::<PedersenHasher>();\n    }\n\n    fn test_merklepor_validates_challenge_identity<H: Hasher>() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let pub_params = PublicParams {\n            leaves: 32,\n            private: false,\n        };\n\n        let data: Vec<u8> = (0..32)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n\n        let graph = BucketGraph::<H>::new(32, 5, 0, new_seed());\n        let tree = graph.merkle_tree(data.as_slice()).unwrap();\n\n        let pub_inputs = PublicInputs {\n            challenge: 3,\n            commitment: Some(tree.root()),\n        };\n\n        let leaf =\n            H::Domain::try_from_bytes(data_at_node(data.as_slice(), pub_inputs.challenge).unwrap())\n                .unwrap();\n\n        let priv_inputs = PrivateInputs::<H>::new(leaf, &tree);\n\n        let proof = MerklePoR::<H>::prove(&pub_params, &pub_inputs, &priv_inputs).unwrap();\n\n        let different_pub_inputs = PublicInputs {\n            challenge: 999,\n            commitment: Some(tree.root()),\n        };\n\n        let verified = MerklePoR::<H>::verify(&pub_params, &different_pub_inputs, &proof).unwrap();\n\n        // A proof created with a the wrong challenge not be verified!\n        assert!(!verified);\n    }\n\n    #[test]\n    fn merklepor_actually_validates_challenge_identity_sha256() {\n        test_merklepor_validates_challenge_identity::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_challenge_identity_blake2s() {\n        test_merklepor_validates_challenge_identity::<Blake2sHasher>();\n    }\n\n    #[test]\n    fn merklepor_actually_validates_challenge_identity_pedersen() {\n        test_merklepor_validates_challenge_identity::<PedersenHasher>();\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/parameter_cache.rs",
    "content": "use crate::error::*;\nuse bellman::groth16::Parameters;\nuse bellman::{groth16, Circuit};\nuse fs2::FileExt;\nuse itertools::Itertools;\nuse rand::XorShiftRng;\nuse sapling_crypto::jubjub::JubjubEngine;\nuse sha2::{Digest, Sha256};\n\nuse std::env;\nuse std::fs::{self, create_dir_all};\nuse std::io::{Seek, SeekFrom};\nuse std::path::{Path, PathBuf};\nuse std::time::Instant;\n\nuse crate::SP_LOG;\n\n/// Bump this when circuits change to invalidate the cache.\npub const VERSION: usize = 9;\n\npub const PARAMETER_CACHE_DIR: &str = \"/tmp/filecoin-proof-parameters/\";\n\nfn parameter_cache_dir_name() -> String {\n    match env::var(\"FILECOIN_PARAMETER_CACHE\") {\n        Ok(dir) => dir,\n        Err(_) => String::from(PARAMETER_CACHE_DIR),\n    }\n}\n\npub fn parameter_cache_dir() -> PathBuf {\n    Path::new(&parameter_cache_dir_name()).to_path_buf()\n}\n\npub fn parameter_cache_path(filename: &str) -> PathBuf {\n    let name = parameter_cache_dir_name();\n    let dir = Path::new(&name);\n    dir.join(format!(\"v{}-{}\", VERSION, filename))\n}\n\npub trait ParameterSetIdentifier: Clone {\n    fn parameter_set_identifier(&self) -> String;\n}\n\npub trait CacheableParameters<E: JubjubEngine, C: Circuit<E>, PP>\nwhere\n    PP: ParameterSetIdentifier,\n{\n    fn cache_prefix() -> String;\n    fn cache_identifier(pub_params: &PP) -> Option<String> {\n        let param_identifier = pub_params.parameter_set_identifier();\n        info!(SP_LOG, \"parameter set identifier for cache: {}\", param_identifier; \"target\" => \"params\");\n        let mut hasher = Sha256::default();\n        hasher.input(&param_identifier.into_bytes());\n        let circuit_hash = hasher.result();\n        Some(format!(\n            \"{}-{:02x}\",\n            Self::cache_prefix(),\n            circuit_hash.iter().format(\"\")\n        ))\n    }\n\n    fn get_groth_params(\n        circuit: C,\n        pub_params: &PP,\n        rng: &mut XorShiftRng,\n    ) -> Result<groth16::Parameters<E>> {\n        let generate = || {\n            info!(SP_LOG, \"Actually generating groth params.\"; \"target\" => \"params\");\n            let start = Instant::now();\n            let parameters = groth16::generate_random_parameters::<E, _, _>(circuit, rng);\n            let generation_time = start.elapsed();\n            info!(SP_LOG, \"groth_parameter_generation_time: {:?}\", generation_time; \"target\" => \"stats\");\n            parameters\n        };\n\n        match Self::cache_identifier(pub_params) {\n            Some(id) => {\n                let cache_dir = parameter_cache_dir();\n                create_dir_all(cache_dir)?;\n                let cache_path = parameter_cache_path(&id);\n                info!(SP_LOG, \"checking cache_path: {:?}\", cache_path; \"target\" => \"params\");\n\n                read_cached_params(&cache_path).or_else(|_| {\n                    ensure_parent(&cache_path)?;\n\n                    let mut f = fs::OpenOptions::new()\n                        .read(true)\n                        .write(true)\n                        .create(true)\n                        .open(&cache_path)?;\n                    f.lock_exclusive()?;\n\n                    let p = generate()?;\n\n                    p.write(&mut f)?;\n\n                    let bytes = f.seek(SeekFrom::End(0))?;\n\n                    info!(SP_LOG, \"wrote parameters to cache {:?} \", f; \"target\" => \"params\");\n                    info!(SP_LOG, \"groth_parameter_bytes: {}\", bytes; \"target\" => \"stats\");\n                    Ok(p)\n                })\n            }\n            None => Ok(generate()?),\n        }\n    }\n}\n\nfn ensure_parent(path: &PathBuf) -> Result<()> {\n    match path.parent() {\n        Some(dir) => {\n            create_dir_all(dir)?;\n            Ok(())\n        }\n        None => Ok(()),\n    }\n}\n\npub fn read_cached_params<E: JubjubEngine>(cache_path: &PathBuf) -> Result<groth16::Parameters<E>> {\n    ensure_parent(cache_path)?;\n\n    let mut f = fs::OpenOptions::new().read(true).open(&cache_path)?;\n    f.lock_exclusive()?;\n    info!(SP_LOG, \"reading groth params from cache: {:?}\", cache_path; \"target\" => \"params\");\n\n    let params = Parameters::read(&f, false).map_err(Error::from);\n\n    let bytes = f.seek(SeekFrom::End(0))?;\n    info!(SP_LOG, \"groth_parameter_bytes: {}\", bytes; \"target\" => \"stats\");\n\n    params\n}\n\npub fn write_params_to_cache<E: JubjubEngine>(\n    p: groth16::Parameters<E>,\n    cache_path: &PathBuf,\n) -> Result<groth16::Parameters<E>> {\n    ensure_parent(cache_path)?;\n\n    let mut f = fs::OpenOptions::new()\n        .read(true)\n        .write(true)\n        .create(true)\n        .open(&cache_path)?;\n    f.lock_exclusive()?;\n\n    p.write(&mut f)?;\n    info!(SP_LOG, \"wrote parameters to cache {:?} \", f; \"target\" => \"params\");\n    Ok(p)\n}\n"
  },
  {
    "path": "storage-proofs/src/partitions.rs",
    "content": "pub type Partitions = Option<usize>;\n\npub fn partition_count(partitions: Partitions) -> usize {\n    match partitions {\n        None => 1,\n        Some(0) => panic!(\"cannot specify zero partitions\"),\n        Some(k) => k,\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/piece_inclusion_proof.rs",
    "content": "use std::marker::PhantomData;\n\nuse merkle_light::hash::Algorithm;\nuse merkle_light::proof::Proof;\n\nuse crate::error::*;\nuse crate::hasher::{Domain, Hasher};\nuse crate::merkle::MerkleTree;\n\ntype InclusionProof<T> = Proof<T>;\n\n/// A FileInclusionProof contains a merkle inclusion proof for the first and last node\n/// of a piece. This ensures all 'edge' hashes necessary to generate a complete merkle\n/// tree are available.\n///\n/// Depending on the position of the nodes, not every hash provided will actually be needed.\n/// As a space optimization, and at the cost of greater complexity in the encoding, 'interior' nodes\n/// of either path may be omitted.\npub struct PieceInclusionProof<H: Hasher> {\n    first_node_proof: InclusionProof<H::Domain>,\n    last_node_proof: InclusionProof<H::Domain>,\n    _h: PhantomData<H>,\n}\n\n/// file_inclusion_proofs takes a merkle tree and a slice of piece lengths, and returns\n/// a vector of file inclusion proofs corresponding to the pieces. This assumes that the first\n/// piece begins at offset 0, and that each piece begins directly after the previous piece ends.\n/// For this method to work, the piece data used to validate pieces will need to be padded as necessary,\n/// and pieces will need to be aligned (to 128-byte chunks for Fr32 bit-padding) when written.\npub fn file_inclusion_proofs<H: Hasher>(\n    tree: &MerkleTree<H::Domain, H::Function>,\n    piece_lengths: &[usize],\n) -> Vec<PieceInclusionProof<H>> {\n    bounds(piece_lengths)\n        .iter()\n        .map(|(start, end)| file_inclusion_proof(tree, *start, end - 1))\n        .collect()\n}\n\n// Given a set of lengths, return corresponding (start, end) pairs for successive pieces.\nfn bounds(lengths: &[usize]) -> Vec<(usize, usize)> {\n    let mut start = 0;\n    let mut bounds = Vec::with_capacity(lengths.len());\n    for length in lengths {\n        let end = start + length;\n        bounds.push((start, end));\n        start = end;\n    }\n    bounds\n}\n\n/// file_inclusion_proof takes a merkle tree and the index positions of the first and last nodes\n/// of the piece whose inclusion should be proved. It returns a corresponding file_inclusion_proof.\n/// For the resulting proof to be valid, first_node must be <= last_node.\npub fn file_inclusion_proof<H: Hasher>(\n    tree: &MerkleTree<H::Domain, H::Function>,\n    first_node: usize,\n    last_node: usize,\n) -> PieceInclusionProof<H> {\n    PieceInclusionProof {\n        first_node_proof: tree.gen_proof(first_node),\n        last_node_proof: tree.gen_proof(last_node),\n        _h: PhantomData,\n    }\n}\n\nimpl<H: Hasher> PieceInclusionProof<H> {\n    /// verify takes a merkle root and (pre-processed) piece data.\n    /// Iff it returns true, then FileInclusionProof indeed proves that piece's\n    /// bytes were included in the merkle tree corresponding to root -- and at the\n    /// position encoded in the proof.\n    fn verify(&self, root: &H::Domain, piece: &[u8]) -> bool {\n        // These checks are superfluous but inexpensive and clarifying.\n        if !(self.first_node_proof.validate::<H::Function>()\n            && self.last_node_proof.validate::<H::Function>())\n        {\n            return false;\n        }\n        // If the computed root is equal to the provided root, then the piece was provably\n        // present in the data from which the merkle tree was constructed.\n        match compute_root::<H>(&self.first_node_proof, &self.last_node_proof, piece) {\n            Ok(computed_root) => *root == computed_root,\n            Err(_) => false,\n        }\n    }\n}\n\n/// Compute the root which results when hashing the supplied piece_data, supplemented by the hashes\n/// in the left (first) and right (last) inclusion proofs from the FileInclusionProof.\nfn compute_root<H: Hasher>(\n    left_proof: &InclusionProof<H::Domain>,\n    right_proof: &InclusionProof<H::Domain>,\n    piece_data: &[u8],\n) -> Result<H::Domain> {\n    // Zip the left and right proof_vecs into one.\n    let proof_vecs = proof_vec(left_proof).zip(proof_vec(right_proof));\n\n    let mut hasher = H::Function::default();\n\n    let mut last_row = Vec::new();\n\n    for chunk in piece_data.chunks(32) {\n        last_row.push(H::Domain::try_from_bytes(chunk)?);\n    }\n\n    for (height, ((l_hash, l_is_left), (r_hash, r_is_left))) in proof_vecs.enumerate() {\n        let mut row = Vec::new();\n        if !*l_is_left {\n            row.push(*l_hash);\n        };\n        row.extend(last_row);\n        if *r_is_left {\n            row.push(*r_hash);\n        }\n        last_row = hash_pairs::<H>(&mut hasher, row.as_slice(), height)?;\n    }\n    assert_eq!(last_row.len(), 1);\n    Ok(last_row[0])\n}\n\n/// For each successive pair of input hashes in row, construct a new hash according to the method used by `hasher.node`,\n/// returning a vector of the constructed hashes, in order.\n/// The result will be an error (resulting in a failed proof) if row does not contain an even number of hashes.\nfn hash_pairs<H: Hasher>(\n    hasher: &mut H::Function,\n    row: &[H::Domain],\n    height: usize,\n) -> Result<Vec<H::Domain>> {\n    let hashed: Result<Vec<_>> = row\n        .chunks(2)\n        .map(|pair| {\n            if pair.len() != 2 {\n                // If input is malformed, return an Err, which will fail the proof.\n                return Err(Error::MalformedInput);\n            }\n            hasher.reset();\n            Ok(hasher.node(pair[0], pair[1], height))\n        })\n        .collect();\n\n    hashed\n}\n\n/// Return a vector of (hash, bool) pairs, where the bool indicates whether the paired hash\n/// will be the left child of the next node hashed. In order to accomplish this, we skip\n/// the first hash provided (in proof.lemma()). This is an implementation detail of how a\n/// merkle_light::proof::Proof is structured.\nfn proof_vec<T: Domain>(proof: &Proof<T>) -> impl Iterator<Item = (&T, &bool)> {\n    proof.lemma().iter().skip(1).zip(proof.path().iter())\n}\n\n/// verify_file_inclusion_proofs returns true iff each provided piece is proved with respect to root\n/// by the corresponding (by index) proof.\npub fn verify_file_inclusion_proofs<H: Hasher>(\n    root: &H::Domain,\n    proofs: &[PieceInclusionProof<H>],\n    pieces: &[&[u8]],\n) -> bool {\n    proofs\n        .iter()\n        .zip(pieces)\n        .all(|(proof, piece)| proof.verify(root, piece))\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::drgraph::{new_seed, BucketGraph, Graph};\n    use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher};\n\n    const NODE_BYTES: usize = 32;\n\n    #[test]\n    fn compute_bounds() {\n        assert_eq!(bounds(&[3, 5, 7, 9]), [(0, 3), (3, 8), (8, 15), (15, 24)]);\n    }\n\n    #[test]\n    fn file_inclusion_proof_pedersen() {\n        test_file_inclusion_proof::<PedersenHasher>();\n    }\n\n    #[test]\n    fn file_inclusion_proof_sha256() {\n        test_file_inclusion_proof::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn file_inclusion_proof_blake2s() {\n        test_file_inclusion_proof::<Blake2sHasher>();\n    }\n\n    fn test_file_inclusion_proof<H: Hasher>() {\n        let nodes = 5;\n        for i in 1..nodes {\n            for j in 1..(nodes - i) {\n                file_inclusion_proof_aux::<H>(nodes, &[i, j]);\n            }\n        }\n\n        file_inclusion_proof_aux::<H>(32, &[32usize]);\n        file_inclusion_proof_aux::<H>(32, &[10usize, 15, 7]);\n        file_inclusion_proof_aux::<H>(32, &[3usize, 9, 20]);\n        file_inclusion_proof_aux::<H>(32, &[4usize, 6, 14]);\n    }\n\n    fn file_inclusion_proof_aux<H: Hasher>(nodes: usize, node_lengths: &[usize]) {\n        let size = nodes * NODE_BYTES;\n        let g = BucketGraph::<H>::new(nodes, 0, 0, new_seed());\n        let mut data = Vec::<u8>::with_capacity(nodes);\n\n        for i in 0..size {\n            data.push(\n                (((i / NODE_BYTES) + i)\n                // Mask out two most significant bits so we will always be Fr32,\n                & 63) as u8,\n            )\n        }\n\n        let tree = g.merkle_tree(&data).unwrap();\n        let lengths: Vec<usize> = node_lengths.iter().map(|x| x * 32).collect();\n\n        let proofs = file_inclusion_proofs::<H>(&tree, &node_lengths);\n        let bounds = bounds(lengths.as_slice());\n        let mut pieces = Vec::new();\n        for (start, end) in &bounds {\n            pieces.push(&data[*start..*end])\n        }\n\n        assert_eq!(\n            true,\n            verify_file_inclusion_proofs(&tree.root(), &proofs, &pieces),\n        );\n\n        let mut wrong_data = Vec::<u8>::with_capacity(size);\n        let mut wrong_pieces = Vec::new();\n\n        for i in 0..size {\n            wrong_data.push(\n                ((i / NODE_BYTES) + (2 * i)\n                // Mask out two most significant bits so we will always be Fr32,\n                & 63) as u8,\n            )\n        }\n\n        for (start, end) in &bounds {\n            wrong_pieces.push(&wrong_data[*start..*end])\n        }\n\n        assert_eq!(\n            false,\n            verify_file_inclusion_proofs(&tree.root(), &proofs, &wrong_pieces),\n        )\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/porc.rs",
    "content": "use std::marker::PhantomData;\n\nuse num_bigint::BigUint;\nuse num_traits::ToPrimitive;\nuse serde::de::Deserialize;\nuse serde::ser::Serialize;\n\nuse crate::drgraph::graph_height;\nuse crate::error::{Error, Result};\nuse crate::hasher::{Domain, Hasher};\nuse crate::merkle::{MerkleProof, MerkleTree};\nuse crate::parameter_cache::ParameterSetIdentifier;\nuse crate::proof::ProofScheme;\n\n#[derive(Debug, Clone)]\npub struct SetupParams {\n    /// How many leaves the underlying merkle tree has.\n    pub leaves: usize,\n    /// The number of sectors that are proven over.\n    pub sectors_count: usize,\n}\n\n#[derive(Debug, Clone)]\npub struct PublicParams {\n    // NOTE: This assumes all sectors are the same size, which may not remain a valid assumption.\n    /// How many leaves the underlying merkle tree has.\n    pub leaves: usize,\n    /// The number of sectors that are proven over.\n    pub sectors_count: usize,\n}\n\nimpl ParameterSetIdentifier for PublicParams {\n    fn parameter_set_identifier(&self) -> String {\n        format!(\n            \"porc::PublicParams{{leaves: {} sectors_count: {}}}\",\n            self.leaves, self.sectors_count,\n        )\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct PublicInputs<'a, T: 'a + Domain> {\n    /// The challenges, which leafs to prove.\n    pub challenges: &'a [usize],\n    pub challenged_sectors: &'a [usize],\n    /// The root hashes of the underlying merkle trees.\n    pub commitments: &'a [T],\n}\n\n#[derive(Debug, Clone)]\npub struct PrivateInputs<'a, H: 'a + Hasher> {\n    pub trees: &'a [&'a MerkleTree<H::Domain, H::Function>],\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Proof<H: Hasher>(\n    #[serde(bound(\n        serialize = \"MerkleProof<H>: Serialize\",\n        deserialize = \"MerkleProof<H>: Deserialize<'de>\"\n    ))]\n    Vec<MerkleProof<H>>,\n);\n\nimpl<H: Hasher> Proof<H> {\n    pub fn leafs(&self) -> Vec<&H::Domain> {\n        self.0.iter().map(|p| p.leaf()).collect()\n    }\n\n    pub fn commitments(&self) -> Vec<&H::Domain> {\n        self.0.iter().map(|p| p.root()).collect()\n    }\n\n    pub fn paths(&self) -> Vec<&Vec<(H::Domain, bool)>> {\n        self.0.iter().map(|p| p.path()).collect()\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct PoRC<'a, H>\nwhere\n    H: 'a + Hasher,\n{\n    _h: PhantomData<&'a H>,\n}\n\nimpl<'a, H: 'a + Hasher> ProofScheme<'a> for PoRC<'a, H> {\n    type PublicParams = PublicParams;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<'a, H::Domain>;\n    type PrivateInputs = PrivateInputs<'a, H>;\n    type Proof = Proof<H>;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        Ok(PublicParams {\n            leaves: sp.leaves,\n            sectors_count: sp.sectors_count,\n        })\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        if priv_inputs.trees.len() != pub_params.sectors_count {\n            return Err(Error::MalformedInput);\n        }\n\n        if priv_inputs.trees.len() != pub_params.sectors_count {\n            return Err(Error::MalformedInput);\n        }\n        let proofs = pub_inputs\n            .challenges\n            .iter()\n            .zip(pub_inputs.challenged_sectors)\n            .map(|(challenged_leaf, challenged_sector)| {\n                let tree = priv_inputs.trees[*challenged_sector];\n\n                if pub_inputs.commitments[*challenged_sector] != tree.root() {\n                    return Err(Error::InvalidCommitment);\n                }\n\n                Ok(MerkleProof::new_from_proof(\n                    &tree.gen_proof(*challenged_leaf),\n                ))\n            })\n            .collect::<Result<Vec<_>>>()?;\n\n        Ok(Proof(proofs))\n    }\n\n    fn verify(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        proof: &Self::Proof,\n    ) -> Result<bool> {\n        // validate each proof\n        for (merkle_proof, (challenged_leaf, challenged_sector)) in proof.0.iter().zip(\n            pub_inputs\n                .challenges\n                .iter()\n                .zip(pub_inputs.challenged_sectors.iter()),\n        ) {\n            // validate the commitment\n            if merkle_proof.root() != &pub_inputs.commitments[*challenged_sector] {\n                return Ok(false);\n            }\n\n            // validate the path length\n            if graph_height(pub_params.leaves) != merkle_proof.path().len() {\n                return Ok(false);\n            }\n\n            if !merkle_proof.validate(*challenged_leaf) {\n                return Ok(false);\n            }\n        }\n\n        Ok(true)\n    }\n}\n\npub fn slice_mod(challenge: impl AsRef<[u8]>, count: usize) -> usize {\n    // TODO: verify this is the correct way to derive the challenge\n    let big_challenge = BigUint::from_bytes_be(challenge.as_ref());\n\n    (big_challenge % count)\n        .to_usize()\n        .expect(\"failed modulus operation\")\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use pairing::bls12_381::Bls12;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n\n    use crate::drgraph::{new_seed, BucketGraph, Graph};\n    use crate::fr32::fr_into_bytes;\n    use crate::hasher::{Blake2sHasher, HashFunction, PedersenHasher, Sha256Hasher};\n    use crate::merkle::make_proof_for_test;\n\n    fn test_porc<H: Hasher>() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let leaves = 32;\n        let pub_params = PublicParams {\n            leaves,\n            sectors_count: 1,\n        };\n\n        let data: Vec<u8> = (0..32)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n\n        let graph = BucketGraph::<H>::new(32, 5, 0, new_seed());\n        let tree = graph.merkle_tree(data.as_slice()).unwrap();\n\n        let pub_inputs = PublicInputs {\n            challenges: &vec![rng.gen_range(0, leaves), rng.gen_range(0, leaves)],\n            challenged_sectors: &[0, 0],\n            commitments: &[tree.root()],\n        };\n\n        let priv_inputs = PrivateInputs::<H> { trees: &[&tree] };\n\n        let proof = PoRC::<H>::prove(&pub_params, &pub_inputs, &priv_inputs).unwrap();\n\n        assert!(PoRC::<H>::verify(&pub_params, &pub_inputs, &proof).unwrap());\n    }\n\n    #[test]\n    fn porc_pedersen() {\n        test_porc::<PedersenHasher>();\n    }\n\n    #[test]\n    fn porc_sha256() {\n        test_porc::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn porc_blake2s() {\n        test_porc::<Blake2sHasher>();\n    }\n\n    // Construct a proof that satisfies a cursory validation:\n    // Data and proof are minimally consistent.\n    // Proof root matches that requested in public inputs.\n    // However, note that data has no relationship to anything,\n    // and proof path does not actually prove that data was in the tree corresponding to expected root.\n    fn make_bogus_proof<H: Hasher>(\n        pub_inputs: &PublicInputs<H::Domain>,\n        rng: &mut XorShiftRng,\n    ) -> MerkleProof<H> {\n        let bogus_leaf: H::Domain = rng.gen();\n        let hashed_leaf = H::Function::hash_leaf(&bogus_leaf);\n\n        make_proof_for_test(\n            pub_inputs.commitments[0],\n            hashed_leaf,\n            vec![(hashed_leaf, true)],\n        )\n    }\n\n    fn test_porc_validates<H: Hasher>() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let pub_params = PublicParams {\n            leaves: 32,\n            sectors_count: 1,\n        };\n\n        let data: Vec<u8> = (0..32)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n\n        let graph = BucketGraph::<H>::new(32, 5, 0, new_seed());\n        let tree = graph.merkle_tree(data.as_slice()).unwrap();\n\n        let pub_inputs = PublicInputs::<H::Domain> {\n            challenges: &vec![rng.gen(), rng.gen()],\n            challenged_sectors: &[0],\n            commitments: &[tree.root()],\n        };\n\n        let bad_proof = Proof(vec![\n            make_bogus_proof::<H>(&pub_inputs, rng),\n            make_bogus_proof::<H>(&pub_inputs, rng),\n        ]);\n\n        let verified = PoRC::verify(&pub_params, &pub_inputs, &bad_proof).unwrap();\n\n        // A bad proof should not be verified!\n        assert!(!verified);\n    }\n\n    #[test]\n    fn porc_actually_validates_sha256() {\n        test_porc_validates::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn porc_actually_validates_blake2s() {\n        test_porc_validates::<Blake2sHasher>();\n    }\n\n    #[test]\n    fn porc_actually_validates_pedersen() {\n        test_porc_validates::<PedersenHasher>();\n    }\n\n    fn test_porc_validates_challenge_identity<H: Hasher>() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let leaves = 32;\n\n        let pub_params = PublicParams {\n            leaves,\n            sectors_count: 1,\n        };\n\n        let data: Vec<u8> = (0..32)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n\n        let graph = BucketGraph::<H>::new(32, 5, 0, new_seed());\n        let tree = graph.merkle_tree(data.as_slice()).unwrap();\n\n        let pub_inputs = PublicInputs {\n            challenges: &vec![rng.gen_range(0, leaves), rng.gen_range(0, leaves)],\n            challenged_sectors: &[0],\n            commitments: &[tree.root()],\n        };\n\n        let priv_inputs = PrivateInputs::<H> { trees: &[&tree] };\n\n        let proof = PoRC::<H>::prove(&pub_params, &pub_inputs, &priv_inputs).unwrap();\n\n        let different_pub_inputs = PublicInputs {\n            challenges: &vec![rng.gen_range(0, leaves), rng.gen_range(0, leaves)],\n            challenged_sectors: &[0],\n            commitments: &[tree.root()],\n        };\n\n        let verified = PoRC::<H>::verify(&pub_params, &different_pub_inputs, &proof).unwrap();\n\n        // A proof created with a the wrong challenge not be verified!\n        assert!(!verified);\n    }\n\n    #[test]\n    fn porc_actually_validates_challenge_identity_sha256() {\n        test_porc_validates_challenge_identity::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn porc_actually_validates_challenge_identity_blake2s() {\n        test_porc_validates_challenge_identity::<Blake2sHasher>();\n    }\n\n    #[test]\n    fn porc_actually_validates_challenge_identity_pedersen() {\n        test_porc_validates_challenge_identity::<PedersenHasher>();\n    }\n\n    #[test]\n    fn test_slice_mod() {\n        let cases: [(Vec<u8>, usize, usize); 5] = [\n            (vec![0], 10, 0),\n            (vec![1], 10, 1),\n            (vec![9], 10, 9),\n            (vec![10], 10, 0),\n            (vec![100, 0, 0, 1], 10, 1),\n        ];\n\n        for (challenge, count, expected) in &cases {\n            assert_eq!(slice_mod(challenge, *count), *expected);\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/porep.rs",
    "content": "use crate::error::Result;\nuse crate::hasher::{Domain, HashFunction, Hasher};\nuse crate::merkle::MerkleTree;\nuse crate::proof::ProofScheme;\n\n#[derive(Debug)]\npub struct PublicParams {\n    pub time: usize,\n}\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize)]\npub struct Tau<T> {\n    pub comm_r: T,\n    pub comm_d: T,\n}\n\nimpl<T: Domain> Tau<T> {\n    pub fn new(comm_d: T, comm_r: T) -> Self {\n        Tau { comm_d, comm_r }\n    }\n}\n\n#[derive(Debug)]\npub struct PublicInputs<'a, T: Domain> {\n    pub id: &'a [u8],\n    pub r: usize,\n    pub tau: Tau<T>,\n}\n\n#[derive(Debug)]\npub struct PrivateInputs<'a> {\n    pub replica: &'a [u8],\n}\n\n#[derive(Debug, Clone)]\npub struct ProverAux<H: Hasher> {\n    pub tree_d: MerkleTree<H::Domain, H::Function>,\n    pub tree_r: MerkleTree<H::Domain, H::Function>,\n}\n\nimpl<H: Hasher> ProverAux<H> {\n    pub fn new(\n        tree_d: MerkleTree<H::Domain, H::Function>,\n        tree_r: MerkleTree<H::Domain, H::Function>,\n    ) -> Self {\n        ProverAux { tree_d, tree_r }\n    }\n}\n\npub trait PoRep<'a, H: Hasher>: ProofScheme<'a> {\n    type Tau;\n    type ProverAux;\n\n    fn replicate(\n        pub_params: &'a Self::PublicParams,\n        replica_id: &H::Domain,\n        data: &mut [u8],\n        data_tree: Option<MerkleTree<H::Domain, H::Function>>,\n    ) -> Result<(Self::Tau, Self::ProverAux)>;\n\n    fn extract_all(\n        pub_params: &'a Self::PublicParams,\n        replica_id: &H::Domain,\n        replica: &[u8],\n    ) -> Result<Vec<u8>>;\n    fn extract(\n        pub_params: &'a Self::PublicParams,\n        replica_id: &H::Domain,\n        replica: &[u8],\n        node: usize,\n    ) -> Result<Vec<u8>>;\n}\n\npub fn replica_id<H: Hasher>(prover_id: [u8; 32], sector_id: [u8; 32]) -> H::Domain {\n    let mut to_hash = [0; 64];\n    to_hash[..32].copy_from_slice(&prover_id);\n    to_hash[32..].copy_from_slice(&sector_id);\n\n    H::Function::hash_leaf(&to_hash)\n}\n"
  },
  {
    "path": "storage-proofs/src/proof.rs",
    "content": "use crate::error::Result;\nuse serde::de::DeserializeOwned;\nuse serde::ser::Serialize;\n\n/// The ProofScheme trait provides the methods that any proof scheme needs to implement.\npub trait ProofScheme<'a> {\n    type PublicParams: Clone;\n    type SetupParams;\n    type PublicInputs: Clone;\n    type PrivateInputs;\n    type Proof: Clone + Serialize + DeserializeOwned;\n\n    /// setup is used to generate public parameters from setup parameters in order to specialize\n    /// a ProofScheme to the specific parameters required by a consumer.\n    fn setup(_: &Self::SetupParams) -> Result<Self::PublicParams>;\n\n    /// prove generates and returns a proof from public parameters, public inputs, and private inputs.\n    fn prove<'b>(\n        _: &'b Self::PublicParams,\n        _: &'b Self::PublicInputs,\n        _: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof>;\n\n    fn prove_all_partitions<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_in: &'b Self::PublicInputs,\n        priv_in: &'b Self::PrivateInputs,\n        partition_count: usize,\n    ) -> Result<Vec<Self::Proof>> {\n        (0..partition_count)\n            .map(|k| {\n                let partition_pub_in = Self::with_partition((*pub_in).clone(), Some(k));\n                Self::prove(pub_params, &partition_pub_in, priv_in)\n            })\n            .collect::<Result<Vec<Self::Proof>>>()\n    }\n\n    /// verify returns true if the supplied proof is valid for the given public parameter and public inputs.\n    /// Note that verify does not have access to private inputs.\n    /// Remember that proof is untrusted, and any data it provides MUST be validated as corresponding\n    /// to the supplied public parameters and inputs.\n    fn verify(\n        _pub_params: &Self::PublicParams,\n        _pub_inputs: &Self::PublicInputs,\n        _proof: &Self::Proof,\n    ) -> Result<bool> {\n        unimplemented!();\n    }\n\n    fn verify_all_partitions(\n        pub_params: &Self::PublicParams,\n        pub_in: &Self::PublicInputs,\n        proofs: &[Self::Proof],\n    ) -> Result<bool> {\n        for (k, proof) in proofs.iter().enumerate() {\n            let partition_pub_in = Self::with_partition((*pub_in).clone(), Some(k));\n            if !Self::verify(pub_params, &partition_pub_in, proof)? {\n                return Ok(false);\n            }\n        }\n        Ok(true)\n    }\n\n    // This method must be specialized by concrete ProofScheme implementations which use partitions.\n    fn with_partition(pub_in: Self::PublicInputs, _k: Option<usize>) -> Self::PublicInputs {\n        pub_in\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/test_helper.rs",
    "content": "use pairing::bls12_381::{Bls12, Fr};\nuse pairing::PrimeFieldRepr;\nuse pairing::{BitIterator, PrimeField};\nuse rand::Rng;\nuse sapling_crypto::pedersen_hash;\n\nuse crate::crypto;\nuse crate::error;\nuse crate::fr32::{bytes_into_fr, fr_into_bytes};\nuse crate::hasher::pedersen::{PedersenDomain, PedersenFunction, PedersenHasher};\nuse crate::merkle::{MerkleProof, MerkleTree};\n\n#[macro_export]\nmacro_rules! table_tests {\n    ($property_test_func:ident {\n        $( $(#[$attr:meta])* $test_name:ident( $( $param:expr ),* ); )+\n    }) => {\n        $(\n            $(#[$attr])*\n                #[test]\n            fn $test_name() {\n                $property_test_func($( $param ),* )\n            }\n        )+\n    }\n}\n\npub struct FakeDrgParams {\n    pub replica_id: Fr,\n    pub replica_nodes: Vec<Fr>,\n    pub replica_nodes_paths: Vec<Vec<Option<(Fr, bool)>>>,\n    pub replica_root: Fr,\n    pub replica_parents: Vec<Vec<Fr>>,\n    pub replica_parents_paths: Vec<Vec<Vec<Option<(Fr, bool)>>>>,\n    pub data_nodes: Vec<Fr>,\n    pub data_nodes_paths: Vec<Vec<Option<(Fr, bool)>>>,\n    pub data_root: Fr,\n}\n\npub fn fake_drgpoprep_proof<R: Rng>(\n    rng: &mut R,\n    tree_depth: usize,\n    m: usize,\n    sloth_rounds: usize,\n    challenge_count: usize,\n) -> FakeDrgParams {\n    let replica_id: Fr = rng.gen();\n    let challenge = m + 1;\n    // Part 1: original data inputs\n    // generate a leaf\n    let data_node: Fr = rng.gen();\n    // generate a fake merkle tree for the leaf and get commD\n    let (data_node_path, data_root) = random_merkle_path_with_value(rng, tree_depth, &data_node, 0);\n\n    // Part 2: replica data inputs\n    // generate parent nodes\n    let replica_parents: Vec<Fr> = (0..m).map(|_| rng.gen()).collect();\n    // run kdf for proverid, parent nodes\n    let ciphertexts = replica_parents\n        .iter()\n        .fold(\n            Ok(fr_into_bytes::<Bls12>(&replica_id)),\n            |acc: error::Result<Vec<u8>>, parent: &Fr| {\n                acc.and_then(|mut acc| {\n                    parent.into_repr().write_le(&mut acc)?;\n                    Ok(acc)\n                })\n            },\n        )\n        .unwrap();\n\n    let key = crypto::kdf::kdf::<Bls12>(ciphertexts.as_slice(), m);\n    // run sloth(key, node)\n    let replica_node: Fr = crypto::sloth::encode::<Bls12>(&key, &data_node, sloth_rounds);\n    // run fake merkle with only the first 1+m real leaves\n\n    let mut leaves = replica_parents.clone();\n    leaves.push(data_node);\n    // ensure we have an even number of leaves\n    if m + 1 % 2 != 0 {\n        leaves.push(rng.gen());\n    }\n\n    // get commR\n    let subtree = MerkleTree::<PedersenDomain, PedersenFunction>::from_data(leaves);\n    let subtree_root: Fr = subtree.root().into();\n    let subtree_depth = subtree.height() - 1; // .height() inludes the leaf\n    let remaining_depth = tree_depth - subtree_depth;\n    let (remaining_path, replica_root) =\n        random_merkle_path_with_value(rng, remaining_depth, &subtree_root, remaining_depth);\n\n    // generate merkle path for challenged node and parents\n    let replica_parents_paths: Vec<_> = (0..m)\n        .map(|i| {\n            let subtree_proof =\n                MerkleProof::<PedersenHasher>::new_from_proof(&subtree.gen_proof(i));\n            let mut subtree_path = subtree_proof.as_options();\n            subtree_path.extend(remaining_path.clone());\n            subtree_path\n        })\n        .collect();\n\n    let replica_node_path = {\n        let subtree_proof =\n            MerkleProof::<PedersenHasher>::new_from_proof(&subtree.gen_proof(challenge));\n        let mut subtree_path = subtree_proof.as_options();\n        subtree_path.extend(&remaining_path);\n        subtree_path\n    };\n\n    assert_eq!(data_node_path.len(), replica_node_path.len());\n\n    FakeDrgParams {\n        replica_id,\n        replica_nodes: (0..challenge_count).map(|_| replica_node).collect(),\n        replica_nodes_paths: (0..challenge_count)\n            .map(|_| replica_node_path.clone())\n            .collect(),\n        replica_root,\n        replica_parents: (0..challenge_count)\n            .map(|_| replica_parents.clone())\n            .collect(),\n        replica_parents_paths: (0..challenge_count)\n            .map(|_| replica_parents_paths.clone())\n            .collect(),\n        data_nodes: (0..challenge_count).map(|_| data_node).collect(),\n        data_nodes_paths: (0..challenge_count)\n            .map(|_| data_node_path.clone())\n            .collect(),\n        data_root,\n    }\n}\n\npub fn random_merkle_path_with_value<R: Rng>(\n    rng: &mut R,\n    tree_depth: usize,\n    value: &Fr,\n    offset: usize,\n) -> (Vec<Option<(Fr, bool)>>, Fr) {\n    let auth_path: Vec<Option<(Fr, bool)>> = vec![Some((rng.gen(), rng.gen())); tree_depth];\n\n    let mut cur = if offset == 0 {\n        let bytes = fr_into_bytes::<Bls12>(&value);\n        bytes_into_fr::<Bls12>(&bytes).unwrap()\n    } else {\n        *value\n    };\n\n    for (i, p) in auth_path.clone().into_iter().enumerate() {\n        let (uncle, is_right) = p.unwrap();\n        let mut lhs = cur;\n        let mut rhs = uncle;\n\n        if is_right {\n            ::std::mem::swap(&mut lhs, &mut rhs);\n        }\n\n        let mut lhs: Vec<bool> = BitIterator::new(lhs.into_repr()).collect();\n        let mut rhs: Vec<bool> = BitIterator::new(rhs.into_repr()).collect();\n\n        lhs.reverse();\n        rhs.reverse();\n\n        cur = pedersen_hash::pedersen_hash::<Bls12, _>(\n            pedersen_hash::Personalization::MerkleTree(i + offset),\n            lhs.into_iter()\n                .take(Fr::NUM_BITS as usize)\n                .chain(rhs.into_iter().take(Fr::NUM_BITS as usize)),\n            &crypto::pedersen::JJ_PARAMS,\n        )\n        .into_xy()\n        .0;\n    }\n\n    (auth_path, cur)\n}\n\npub fn random_merkle_path<R: Rng>(\n    rng: &mut R,\n    tree_depth: usize,\n) -> (Vec<Option<(Fr, bool)>>, Fr, Fr) {\n    let value: Fr = rng.gen();\n\n    let (path, root) = random_merkle_path_with_value(rng, tree_depth, &value, 0);\n\n    (path, value, root)\n}\n"
  },
  {
    "path": "storage-proofs/src/util.rs",
    "content": "use bellman::{ConstraintSystem, SynthesisError};\nuse pairing::Engine;\nuse sapling_crypto::circuit::boolean::{self, AllocatedBit, Boolean};\n\nuse crate::error;\n\npub const NODE_SIZE: usize = 32;\n\n/// Returns the start position of the data, 0-indexed.\npub fn data_at_node_offset(v: usize) -> usize {\n    v * NODE_SIZE\n}\n\n/// Returns the byte slice representing one node (of uniform size, NODE_SIZE) at position v in data.\npub fn data_at_node(data: &[u8], v: usize) -> error::Result<&[u8]> {\n    let offset = data_at_node_offset(v);\n\n    if offset + NODE_SIZE > data.len() {\n        return Err(error::Error::OutOfBounds(offset + NODE_SIZE, data.len()));\n    }\n\n    Ok(&data[offset..offset + NODE_SIZE])\n}\n\n/// Converts bytes into their bit representation, in little endian format.\npub fn bytes_into_bits(bytes: &[u8]) -> Vec<bool> {\n    bytes\n        .iter()\n        .flat_map(|&byte| (0..8).map(move |i| (byte >> i) & 1u8 == 1u8))\n        .collect()\n}\n\n/// Converts the bytes into a boolean vector, in little endian format.\npub fn bytes_into_boolean_vec<E: Engine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    value: Option<&[u8]>,\n    size: usize,\n) -> Result<Vec<boolean::Boolean>, SynthesisError> {\n    let values = match value {\n        Some(value) => bytes_into_bits(value).into_iter().map(Some).collect(),\n        None => vec![None; size],\n    };\n\n    let bits = values\n        .into_iter()\n        .enumerate()\n        .map(|(i, b)| {\n            Ok(Boolean::from(AllocatedBit::alloc(\n                cs.namespace(|| format!(\"bit {}\", i)),\n                b,\n            )?))\n        })\n        .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n    Ok(bits)\n}\n\n#[allow(dead_code)]\n#[inline]\nfn bool_to_u8(bit: bool, offset: usize) -> u8 {\n    if bit {\n        1u8 << offset\n    } else {\n        0u8\n    }\n}\n\n/// Converts a slice of bools into their byte representation, in little endian.\n#[allow(dead_code)]\npub fn bits_to_bytes(bits: &[bool]) -> Vec<u8> {\n    bits.chunks(8)\n        .map(|bits| {\n            bool_to_u8(bits[7], 7)\n                | bool_to_u8(bits[6], 6)\n                | bool_to_u8(bits[5], 5)\n                | bool_to_u8(bits[4], 4)\n                | bool_to_u8(bits[3], 3)\n                | bool_to_u8(bits[2], 2)\n                | bool_to_u8(bits[1], 1)\n                | bool_to_u8(bits[0], 0)\n        })\n        .collect()\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n    use crate::circuit::test::*;\n    use pairing::bls12_381::*;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n\n    #[test]\n    fn test_bytes_into_boolean_vec() {\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        for i in 0..100 {\n            let data: Vec<u8> = (0..i + 10).map(|_| rng.gen()).collect();\n            let bools = {\n                let mut cs = cs.namespace(|| format!(\"round: {}\", i));\n                bytes_into_boolean_vec(&mut cs, Some(data.as_slice()), 8).unwrap()\n            };\n\n            let bytes_actual: Vec<u8> = bits_to_bytes(\n                bools\n                    .iter()\n                    .map(|b| b.get_value().unwrap())\n                    .collect::<Vec<bool>>()\n                    .as_slice(),\n            );\n\n            assert_eq!(data, bytes_actual);\n        }\n    }\n\n    #[test]\n    fn test_bool_to_u8() {\n        assert_eq!(bool_to_u8(false, 2), 0b0000_0000);\n        assert_eq!(bool_to_u8(true, 0), 0b0000_0001);\n        assert_eq!(bool_to_u8(true, 1), 0b0000_0010);\n        assert_eq!(bool_to_u8(true, 7), 0b1000_0000);\n    }\n\n    #[test]\n    fn test_bits_into_bytes() {\n        assert_eq!(\n            bits_to_bytes(&[true, false, false, false, false, false, false, false]),\n            vec![1]\n        );\n        assert_eq!(\n            bits_to_bytes(&[true, true, true, true, true, true, true, true]),\n            vec![255]\n        );\n    }\n\n    #[test]\n    fn test_bytes_into_bits() {\n        assert_eq!(\n            bytes_into_bits(&[1u8]),\n            vec![true, false, false, false, false, false, false, false]\n        );\n\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        for i in 10..100 {\n            let bytes: Vec<u8> = (0..i).map(|_| rng.gen()).collect();\n\n            let bits = bytes_into_bits(bytes.as_slice());\n            assert_eq!(bits_to_bytes(bits.as_slice()), bytes);\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/vde.rs",
    "content": "use crate::drgraph::Graph;\nuse crate::error::Result;\nuse crate::hasher::{Domain, Hasher};\nuse crate::util::{data_at_node, data_at_node_offset};\n\n/// encodes the data and overwrites the original data slice.\npub fn encode<'a, H, G>(\n    graph: &'a G,\n    sloth_iter: usize,\n    replica_id: &'a H::Domain,\n    data: &'a mut [u8],\n) -> Result<()>\nwhere\n    H: Hasher,\n    G: Graph<H>,\n{\n    let degree = graph.degree();\n\n    // Because a node always follows all of its parents in the data,\n    // the nodes are by definition already topologically sorted.\n    // Therefore, if we simply traverse the data in order, encoding each node in place,\n    // we can always get each parent's encodings with a simple lookup --\n    // since we will already have encoded the parent earlier in the traversal.\n    // The only subtlety is that a ZigZag graph may be reversed, so the direction\n    // of the traversal must also be.\n\n    for n in 0..graph.size() {\n        let node = if graph.forward() {\n            n\n        } else {\n            // If the graph is reversed, traverse in reverse order.\n            (graph.size() - n) - 1\n        };\n\n        let parents = graph.parents(node);\n        assert_eq!(parents.len(), graph.degree(), \"wrong number of parents\");\n\n        let key = create_key::<H>(replica_id, node, &parents, data, degree)?;\n        let start = data_at_node_offset(node);\n        let end = start + 32;\n\n        let node_data = H::Domain::try_from_bytes(&data[start..end])?;\n        let encoded = H::sloth_encode(&key, &node_data, sloth_iter);\n\n        encoded.write_bytes(&mut data[start..end])?;\n    }\n\n    Ok(())\n}\n\npub fn decode<'a, H, G>(\n    graph: &'a G,\n    sloth_iter: usize,\n    replica_id: &'a H::Domain,\n    data: &'a [u8],\n) -> Result<Vec<u8>>\nwhere\n    H: Hasher,\n    G: Graph<H>,\n{\n    // TODO: parallelize\n    (0..graph.size()).fold(Ok(Vec::with_capacity(data.len())), |acc, i| {\n        acc.and_then(|mut acc| {\n            acc.extend(decode_block(graph, sloth_iter, replica_id, data, i)?.into_bytes());\n            Ok(acc)\n        })\n    })\n}\n\npub fn decode_block<'a, H, G>(\n    graph: &'a G,\n    sloth_iter: usize,\n    replica_id: &'a H::Domain,\n    data: &'a [u8],\n    v: usize,\n) -> Result<H::Domain>\nwhere\n    H: Hasher,\n    G: Graph<H>,\n{\n    let parents = graph.parents(v);\n    let key = create_key::<H>(replica_id, v, &parents, &data, graph.degree())?;\n    let node_data = H::Domain::try_from_bytes(&data_at_node(data, v)?)?;\n\n    // TODO: round constant\n    Ok(H::sloth_decode(&key, &node_data, sloth_iter))\n}\n\npub fn decode_domain_block<'a, H, G>(\n    graph: &'a G,\n    sloth_iter: usize,\n    replica_id: &'a H::Domain,\n    data: &'a [H::Domain],\n    v: usize,\n) -> Result<H::Domain>\nwhere\n    H: Hasher,\n    G: Graph<H>,\n{\n    let parents = graph.parents(v);\n\n    let byte_data = data\n        .iter()\n        .flat_map(H::Domain::into_bytes)\n        .collect::<Vec<u8>>();\n\n    let key = create_key::<H>(replica_id, v, &parents, &byte_data, graph.degree())?;\n    let node_data = data[v];\n\n    // TODO: round constant\n    Ok(H::sloth_decode(&key, &node_data, sloth_iter))\n}\n\nfn create_key<H: Hasher>(\n    id: &H::Domain,\n    node: usize,\n    parents: &[usize],\n    data: &[u8],\n    m: usize,\n) -> Result<H::Domain> {\n    // ciphertexts will become a buffer of the layout\n    // id | encodedParentNode1 | encodedParentNode1 | ...\n\n    let mut ciphertexts = vec![0u8; 32 * (parents.len() + 1)];\n    id.write_bytes(&mut ciphertexts[0..32])?;\n\n    for (i, parent) in parents.iter().enumerate() {\n        // special super shitty case\n        // TODO: unsuck\n        if node == parents[0] {\n            // skip, as we would only write 0s, but the vector is prefilled with 0.\n        } else {\n            let start = (i + 1) * 32;\n            let end = (i + 2) * 32;\n            ciphertexts[start..end].copy_from_slice(data_at_node(data, *parent)?);\n        }\n    }\n\n    Ok(H::kdf(ciphertexts.as_slice(), m))\n}\n"
  },
  {
    "path": "storage-proofs/src/vdf.rs",
    "content": "use crate::error::Result;\nuse crate::hasher::Domain;\nuse serde::de::DeserializeOwned;\nuse serde::ser::Serialize;\n\n/// Generic trait to represent any Verfiable Delay Function (VDF).\npub trait Vdf<T: Domain>: Clone + ::std::fmt::Debug {\n    type SetupParams: Clone + ::std::fmt::Debug;\n    type PublicParams: Clone + ::std::fmt::Debug;\n    type Proof: Clone + ::std::fmt::Debug + Serialize + DeserializeOwned;\n\n    fn setup(setup_params: &Self::SetupParams) -> Result<Self::PublicParams>;\n    fn eval(public_params: &Self::PublicParams, input: &T) -> Result<(T, Self::Proof)>;\n    fn verify(public_params: &Self::PublicParams, input: &T, proof: &Self::Proof) -> Result<bool>;\n\n    fn key(pp: &Self::PublicParams) -> T;\n    fn rounds(pp: &Self::PublicParams) -> usize;\n    fn extract_output(proof: &Self::Proof) -> T;\n}\n"
  },
  {
    "path": "storage-proofs/src/vdf_post.rs",
    "content": "use std::cmp;\nuse std::marker::PhantomData;\n\nuse bitvec::{self, BitVec};\nuse byteorder::{ByteOrder, LittleEndian};\nuse itertools::Itertools;\nuse pairing::bls12_381::{Bls12, Fr, FrRepr};\nuse pairing::{Engine, Field, PrimeField};\nuse serde::de::Deserialize;\nuse serde::ser::Serialize;\n\nuse crate::error::{Error, Result};\nuse crate::fr32::fr_into_bytes;\nuse crate::hasher::{Domain, HashFunction, Hasher};\nuse crate::merkle::MerkleTree;\nuse crate::parameter_cache::ParameterSetIdentifier;\nuse crate::porc::{self, PoRC};\nuse crate::proof::ProofScheme;\nuse crate::vdf::Vdf;\n\n#[derive(Clone, Debug)]\npub struct SetupParams<T: Domain, V: Vdf<T>> {\n    /// The number of challenges to be asked at each iteration.\n    pub challenge_count: usize,\n    /// Size of a sealed sector in bytes.\n    pub sector_size: usize,\n    /// Number of times we repeat an online Proof-of-Replication in one single PoSt.\n    pub post_epochs: usize,\n    pub setup_params_vdf: V::SetupParams,\n    /// The number of sectors that are proven over.\n    pub sectors_count: usize,\n}\n\n#[derive(Clone, Debug)]\npub struct PublicParams<T: Domain, V: Vdf<T>> {\n    /// The number of challenges to be asked at each iteration.\n    pub challenge_count: usize,\n    /// Size of a sealed sector in bytes.\n    pub sector_size: usize,\n    /// Number of times we repeat an online Proof-of-Replication in one single PoSt.\n    pub post_epochs: usize,\n    pub pub_params_vdf: V::PublicParams,\n    /// The number of leaves in one sector.\n    pub leaves: usize,\n    /// The number of sectors that are proven over.\n    pub sectors_count: usize,\n    /// The number of bits per challenge (the length of a merkle path)\n    pub challenge_bits: usize,\n    pub seed_bits: usize,\n}\n\nimpl<T: Domain, V: Vdf<T>> ParameterSetIdentifier for PublicParams<T, V> {\n    fn parameter_set_identifier(&self) -> String {\n        format!(\n            \"vdf_post::PublicParams{{challenge_count: {}, sector_size: {}, post_epochs: {}, pub_params_vdf: FIXME, leaves: {}, sectors_count: {}}}\",\n            self.challenge_count, self.sector_size, self.post_epochs,\n            //self.pub_params_vdf.parameter_set_identifier(), // FIXME: implement\n            self.leaves, self.sectors_count\n        )\n    }\n}\n\n#[derive(Clone, Debug)]\npub struct PublicInputs<T: Domain> {\n    /// The root hash of the merkle tree of each sealed sector.\n    pub commitments: Vec<T>,\n    /// The initial set of challenges. Must be of length `challenge_count`.\n    pub challenge_seed: T,\n}\n\n#[derive(Clone, Debug)]\npub struct PrivateInputs<'a, H: 'a + Hasher> {\n    pub trees: &'a [&'a MerkleTree<H::Domain, H::Function>],\n    _h: PhantomData<H>,\n}\n\nimpl<'a, H: 'a + Hasher> PrivateInputs<'a, H> {\n    pub fn new(trees: &'a [&'a MerkleTree<H::Domain, H::Function>]) -> Self {\n        PrivateInputs {\n            trees,\n            _h: PhantomData,\n        }\n    }\n}\n\npub fn compute_root_commitment<T: Domain>(commitments: &[T]) -> T {\n    // NOTE: We're just returning the first commitment so we have a consistent, valid value.\n    // In reality, we will need some kind of vector commitment, but we haven't committed to what yet.\n    // This is here so we can get all the plumbing right without having to.\n    commitments[0]\n}\n\n/// VDF-PoSt\n/// This is one construction of a Proof-of-Spacetime.\n/// It currently only supports proving over a single sector.\n#[derive(Clone, Debug, Serialize, Deserialize)]\npub struct Proof<'a, H: Hasher + 'a, V: Vdf<H::Domain>> {\n    /// `post_iteration` online Proof-of-Replication proofs.\n    #[serde(bound(\n        serialize = \"V::Proof: Serialize\",\n        deserialize = \"V::Proof: Deserialize<'de>\"\n    ))]\n    pub porep_proofs: Vec<<PoRC<'a, H> as ProofScheme<'a>>::Proof>,\n    /// `post_epochs - 1` VDF proofs\n    #[serde(bound(\n        serialize = \"V::Proof: Serialize\",\n        deserialize = \"V::Proof: Deserialize<'de>\"\n    ))]\n    pub vdf_proofs: Vec<V::Proof>,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    pub ys: Vec<H::Domain>,\n    pub challenges: Vec<Vec<usize>>,\n    pub challenged_sectors: Vec<Vec<usize>>,\n    _v: PhantomData<V>,\n}\n\n#[derive(Clone, Debug)]\npub struct VDFPoSt<H: Hasher, V: Vdf<H::Domain>> {\n    _t: PhantomData<H>,\n    _v: PhantomData<V>,\n}\n\nimpl<'a, H: Hasher + 'a, V: Vdf<H::Domain>> ProofScheme<'a> for VDFPoSt<H, V> {\n    type PublicParams = PublicParams<H::Domain, V>;\n    type SetupParams = SetupParams<H::Domain, V>;\n    type PublicInputs = PublicInputs<H::Domain>;\n    type PrivateInputs = PrivateInputs<'a, H>;\n    type Proof = Proof<'a, H, V>;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        // Sector sizes which are powers of two have the form 100000 (i.e. leading one and all zeroes after).\n        let sector_size = sp.sector_size;\n        assert_eq!(\n            sector_size.count_ones(),\n            1,\n            \"sector size must be a power of 2\"\n        );\n        // Assuming well-formed (power of two) sector size, log2(sector_size) is given by number of trailing zeroes.\n        let log2 = sector_size.trailing_zeros();\n        let leaves = sector_size / 32;\n        let challenge_bits = (log2 - 5) as usize;\n        assert_eq!(\n            2u64.pow(challenge_bits as u32),\n            leaves as u64,\n            \"sanity check\"\n        );\n\n        Ok(PublicParams {\n            challenge_count: sp.challenge_count,\n            sector_size: sp.sector_size,\n            post_epochs: sp.post_epochs,\n            pub_params_vdf: V::setup(&sp.setup_params_vdf)?,\n            leaves,\n            sectors_count: sp.sectors_count,\n            challenge_bits,\n            seed_bits: 255,\n        })\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        if priv_inputs.trees.len() != pub_params.sectors_count {\n            return Err(Error::MalformedInput);\n        }\n\n        let challenge_count = pub_params.challenge_count;\n        let post_epochs = pub_params.post_epochs;\n\n        let pub_params_porep = porc::PublicParams {\n            leaves: pub_params.leaves,\n            sectors_count: pub_params.sectors_count,\n        };\n\n        let mut porep_proofs = Vec::with_capacity(post_epochs);\n        let mut vdf_proofs = Vec::with_capacity(post_epochs);\n        let mut ys = Vec::with_capacity(post_epochs - 1);\n        let mut challenges_vec = Vec::with_capacity(post_epochs);\n        let mut challenged_sectors_vec = Vec::with_capacity(post_epochs);\n\n        let mut challenge_stream = ChallengeStream::<H, V>::new(pub_params);\n\n        {\n            let mut mix = pub_inputs.challenge_seed;\n            let mut i = 0;\n\n            while let Some((challenges, challenged_sectors)) = challenge_stream.next(mix) {\n                assert!(\n                    challenges.len() == challenge_count,\n                    format!(\n                        \"expected {} challenges, but {} were mixed.\",\n                        challenge_count,\n                        challenges.len()\n                    )\n                );\n                challenges_vec.push(challenges.clone());\n                challenged_sectors_vec.push(challenged_sectors.clone());\n\n                let pub_inputs_porep = porc::PublicInputs {\n                    challenges: &challenges,\n                    challenged_sectors: &challenged_sectors,\n                    commitments: &pub_inputs.commitments,\n                };\n\n                let priv_inputs_porep = porc::PrivateInputs {\n                    trees: priv_inputs.trees,\n                };\n\n                let proof = PoRC::prove(&pub_params_porep, &pub_inputs_porep, &priv_inputs_porep)?;\n\n                // Skip last VDF evaluation.\n                if i < post_epochs {\n                    let x = extract_vdf_input::<H>(&proof);\n                    let (y, vdf_proof) = V::eval(&pub_params.pub_params_vdf, &x)?;\n\n                    ys.push(y);\n                    vdf_proofs.push(vdf_proof);\n                    mix = y;\n                } else {\n                    break;\n                }\n                porep_proofs.push(proof);\n\n                i += 1;\n            }\n        }\n\n        Ok(Proof {\n            porep_proofs,\n            ys,\n            vdf_proofs,\n            challenges: challenges_vec,\n            challenged_sectors: challenged_sectors_vec,\n            _v: PhantomData,\n        })\n    }\n\n    fn verify(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        proof: &Self::Proof,\n    ) -> Result<bool> {\n        let post_epochs = pub_params.post_epochs;\n\n        let mut mix = pub_inputs.challenge_seed;\n        let mut challenge_stream = ChallengeStream::<H, V>::new(pub_params);\n\n        let mut i = 0;\n        while let Some((challenges, challenged_sectors)) = challenge_stream.next(mix) {\n            if i >= post_epochs {\n                break;\n            }\n\n            // VDF Output Verification\n            {\n                if !V::verify(\n                    &pub_params.pub_params_vdf,\n                    &extract_vdf_input::<H>(&proof.porep_proofs[i]),\n                    &proof.vdf_proofs[i],\n                )? {\n                    return Ok(false);\n                }\n            }\n\n            // Explicit challenge verification is not needed here, since we generate the challenges ourselves\n            // and provide them as input to PoRC::verify below.\n\n            // TODO: Root Commitment verification.\n            // FIXME: Skip for now, but this is an absence that needs to be addressed once we have a vector commitment strategy.\n\n            // Online PoRep Verification\n            {\n                let pub_params_porep = porc::PublicParams {\n                    leaves: pub_params.leaves,\n                    sectors_count: pub_params.sectors_count,\n                };\n\n                let pub_inputs_porep = porc::PublicInputs {\n                    challenges: &challenges,\n                    challenged_sectors: &challenged_sectors,\n                    commitments: &pub_inputs.commitments,\n                };\n\n                if !PoRC::verify(&pub_params_porep, &pub_inputs_porep, &proof.porep_proofs[i])? {\n                    return Ok(false);\n                }\n            }\n\n            // update loop state\n            mix = proof.ys[i];\n            i += 1;\n        }\n        Ok(true)\n    }\n}\n\npub fn extract_vdf_input<H: Hasher>(proof: &porc::Proof<H>) -> H::Domain {\n    let leafs: Vec<u8> = proof.leafs().iter().fold(Vec::new(), |mut acc, leaf| {\n        acc.extend(leaf.as_ref());\n        acc\n    });\n\n    H::Function::hash(&leafs)\n}\n\n/// `derive_partial_challenges` generates `count` hashed 'partial' challenges, using `seed` as a source of randomness.\nfn derive_partial_challenges<H: Hasher>(count: usize, seed: &[u8]) -> Vec<H::Domain> {\n    (0..count)\n        .map(|j| {\n            let mut j_bytes = [0u8; 32];\n            LittleEndian::write_u32(&mut j_bytes[0..4], j as u32);\n\n            H::Function::hash(&[seed, &j_bytes].concat())\n        })\n        .collect()\n}\n\n/// `ChallengeStream` manages incremental challenge derivation.\n/// Consumers require groups of `challenge_count` challenges. Each round of challenge generation\n/// requires a new random input (`mix`).\n/// A `ChallengeStream` mediates between this usage requirement and the implementation details\n/// of the actual challenge generation mechanism.\nstruct ChallengeStream<H: Hasher, V: Vdf<H::Domain>> {\n    partial_challenges: Option<Vec<H::Domain>>,\n    challenge_count: usize,\n    partial_challenge_count: usize,\n    sectors_count: usize,\n    challenge_bits: usize,\n    _v: PhantomData<V>,\n}\n\nimpl<H: Hasher, V: Vdf<H::Domain>> ChallengeStream<H, V> {\n    /// A `ChallengeStream` must derive some shared parameters used in challenge derivation.\n    /// `new` initializes a new, stateful, `ChallengeStream` with these parameters.\n    fn new(pp: &PublicParams<H::Domain, V>) -> ChallengeStream<H, V> {\n        let challenge_count = pp.challenge_count;\n        let sectors_count = pp.sectors_count;\n        let challenge_bits = pp.challenge_bits;\n        let sub_challenges = pp.seed_bits / challenge_bits;\n        let partial_challenge_count =\n            ((pp.post_epochs * challenge_count) as f32 / sub_challenges as f32).ceil() as usize;\n\n        ChallengeStream {\n            partial_challenges: None,\n            challenge_count,\n            partial_challenge_count,\n            sectors_count,\n            challenge_bits,\n            _v: PhantomData,\n        }\n    }\n\n    /// A set of partial challenges must be generated as a one-time initialization.\n    /// These partial challenges are 'mixed' with randomness during challenge finalization.\n    /// Because partial challenge generation requires access to the first `mix` value as a random seed,\n    /// it must be deferred until the first set of challenges is requested.\n    fn ensure_partial_challenges(&mut self, mix: H::Domain) {\n        if self.partial_challenges.is_none() {\n            let partial_challenges = derive_partial_challenges::<H>(\n                self.partial_challenge_count,\n                &fr_into_bytes::<Bls12>(&mix.into()),\n            );\n\n            self.partial_challenges = Some(partial_challenges);\n        }\n    }\n\n    /// `next` takes a random value, `mix`, and return an appropriate (conforming with `ChallengeStream`'s parameters)\n    /// set of 'final challenges' (and challenged sectors) suitable as input to PoRC.\n    /// This process consumes `partial_challenges`, mutating `ChallengeStream`'s state.\n    ///\n    // FIXME: It's currently possible that a partial_challenge is not completely consumed by production\n    // of all needed final challenges. In this case, the remainder will be needed as a witness to prove\n    // challenge-generation was performed correctly. However, `next` currently only returns the needed\n    // final challenges. This will have to be addressed when we implement challenge verification in circuits.\n    fn next(&mut self, mix: H::Domain) -> Option<(Vec<usize>, Vec<usize>)> {\n        self.ensure_partial_challenges(mix);\n\n        let mut partial_challenges = self.partial_challenges.clone().unwrap();\n\n        if partial_challenges.is_empty() {\n            None\n        } else {\n            let partial_challenge = partial_challenges.remove(0);\n            self.partial_challenges = Some(partial_challenges);\n\n            let mut all_challenges = Vec::with_capacity(self.challenge_count);\n            let mut all_challenged_sectors = Vec::with_capacity(self.challenge_count);\n            let mut remaining_challenges = self.challenge_count;\n\n            while all_challenges.len() < self.challenge_count {\n                let (challenges, challenged_sectors) = derive_final_challenges::<H, Bls12>(\n                    partial_challenge,\n                    mix,\n                    self.sectors_count,\n                    self.challenge_bits,\n                );\n\n                for i in 0..cmp::min(challenges.len(), remaining_challenges) {\n                    all_challenges.push(challenges[i]);\n                    all_challenged_sectors.push(challenged_sectors[i]);\n                }\n                remaining_challenges = self.challenge_count - all_challenges.len();\n            }\n            Some((all_challenges, all_challenged_sectors))\n        }\n    }\n}\n\n/// Returns (challenges, challenged_sectors)\n/// Note that if challenge_bits does not evenly divide 256, then the last challenge will be\n/// sampled from a space of only `remainder` bits.\nfn derive_final_challenges<H: Hasher, E: Engine>(\n    partial_challenge: H::Domain,\n    mix: H::Domain,\n    _sectors_count: usize,\n    challenge_bits: usize,\n) -> (Vec<usize>, Vec<usize>)\nwhere\n    <E as Engine>::Fr: std::convert::From<pairing::bls12_381::Fr>,\n{\n    type BV = BitVec<bitvec::LittleEndian, u8>;\n\n    let mut mixed = partial_challenge.into();\n    mixed.sub_assign(&mix.into());\n\n    let mixed_bytes = fr_into_bytes::<E>(&mixed.into());\n    let mut challenges = Vec::new();\n    let mut challenged_sectors = Vec::new();\n\n    for chunk in BV::from(mixed_bytes)\n        .into_iter()\n        .chunks(challenge_bits)\n        .into_iter()\n    {\n        let mut challenge: usize = 0;\n        let mut place = 1;\n\n        for bit in chunk {\n            if bit {\n                challenge += place;\n            }\n            place <<= 1;\n        }\n\n        let challenged_sector = 0; // FIXME: Actually generate challenged_sector.\n\n        challenges.push(challenge);\n        challenged_sectors.push(challenged_sector);\n    }\n\n    challenges.reverse();\n    challenged_sectors.reverse();\n\n    (challenges, challenged_sectors)\n}\n\n/// verify_final_challenge_derivation is used only in a unit test, but it is an important check of\n/// and documentation of both the challenge derivation and the method of verifying it.\n#[allow(dead_code)]\nfn verify_final_challenge_derivation<H: Hasher>(\n    challenges: &[usize],\n    partial_challenge: H::Domain,\n    mix: H::Domain,\n    challenge_bits: usize,\n) -> bool {\n    assert!(challenge_bits > 0 && challenge_bits < 64);\n    // Computing shift_factor will overflow if challenge_bits >= 64. No need to work around: 63 bits is plenty.\n    let shift_factor = Fr::from_repr(FrRepr::from(1u64 << challenge_bits)).unwrap();\n    let packed = challenges.iter().fold(Fr::zero(), |mut acc, challenge| {\n        let fr_challenge = Fr::from_repr(FrRepr::from(*challenge as u64)).unwrap();\n\n        acc.mul_assign(&shift_factor);\n        acc.add_assign(&fr_challenge);\n\n        acc\n    });\n\n    let mut fr_mixed: Fr = mix.into();\n    let fr_partial: Fr = partial_challenge.into();\n    fr_mixed.add_assign(&packed);\n\n    fr_partial == fr_mixed\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use pairing::bls12_381::Bls12;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n\n    use crate::drgraph::{new_seed, BucketGraph, Graph};\n    use crate::fr32::fr_into_bytes;\n    use crate::hasher::pedersen::{PedersenDomain, PedersenHasher};\n    use crate::vdf_sloth;\n\n    #[test]\n    fn test_derive_and_verify_final_challenges() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        for challenge_bits in 1..64 {\n            let sectors_count = 1;\n            let partial_challenge: Fr = rng.gen();\n            let mix: Fr = rng.gen();\n\n            let (challenges, _challenged_sectors) = derive_final_challenges::<PedersenHasher, Bls12>(\n                partial_challenge.into(),\n                mix.into(),\n                sectors_count,\n                challenge_bits,\n            );\n\n            assert!(verify_final_challenge_derivation::<PedersenHasher>(\n                &challenges,\n                partial_challenge.into(),\n                mix.into(),\n                challenge_bits,\n            ));\n        }\n    }\n\n    #[test]\n    fn test_vdf_post_basics() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let sp = SetupParams::<PedersenDomain, vdf_sloth::Sloth> {\n            challenge_count: 30,\n            sector_size: 1024 * 32,\n            post_epochs: 3,\n            setup_params_vdf: vdf_sloth::SetupParams {\n                key: rng.gen(),\n                rounds: 1,\n            },\n            sectors_count: 2,\n        };\n\n        let pub_params = VDFPoSt::<PedersenHasher, vdf_sloth::Sloth>::setup(&sp).unwrap();\n\n        let data0: Vec<u8> = (0..1024)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n        let data1: Vec<u8> = (0..1024)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n\n        let graph0 = BucketGraph::<PedersenHasher>::new(1024, 5, 0, new_seed());\n        let tree0 = graph0.merkle_tree(data0.as_slice()).unwrap();\n        let graph1 = BucketGraph::<PedersenHasher>::new(1024, 5, 0, new_seed());\n        let tree1 = graph1.merkle_tree(data1.as_slice()).unwrap();\n\n        let pub_inputs = PublicInputs {\n            challenge_seed: rng.gen(),\n            commitments: vec![tree0.root(), tree1.root()],\n        };\n\n        let priv_inputs = PrivateInputs {\n            trees: &[&tree0, &tree1],\n            _h: PhantomData,\n        };\n\n        let proof = VDFPoSt::<PedersenHasher, vdf_sloth::Sloth>::prove(\n            &pub_params,\n            &pub_inputs,\n            &priv_inputs,\n        )\n        .unwrap();\n\n        assert!(VDFPoSt::<PedersenHasher, vdf_sloth::Sloth>::verify(\n            &pub_params,\n            &pub_inputs,\n            &proof\n        )\n        .unwrap());\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/vdf_sloth.rs",
    "content": "use pairing::bls12_381::{Bls12, Fr};\n\nuse crate::crypto::sloth;\nuse crate::error::Result;\nuse crate::hasher::pedersen::PedersenDomain;\nuse crate::parameter_cache::ParameterSetIdentifier;\nuse crate::vdf::Vdf;\n\n/// VDF construction using sloth.\n#[derive(Debug, Clone)]\npub struct Sloth {}\n\nunsafe impl Sync for Sloth {}\nunsafe impl Send for Sloth {}\n\n#[derive(Debug, Clone)]\npub struct SetupParams {\n    pub key: PedersenDomain,\n    pub rounds: usize,\n}\n\n#[derive(Debug, Clone)]\npub struct PublicParams {\n    pub key: PedersenDomain,\n    pub rounds: usize,\n}\n\nimpl ParameterSetIdentifier for PublicParams {\n    fn parameter_set_identifier(&self) -> String {\n        format!(\n            \"vdf_sloth::PublicParams{{key: {:?}; rounds: {}}}\",\n            self.key, self.rounds\n        )\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Proof {\n    y: PedersenDomain,\n}\n\nimpl Vdf<PedersenDomain> for Sloth {\n    type SetupParams = SetupParams;\n    type PublicParams = PublicParams;\n    type Proof = Proof;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        Ok(PublicParams {\n            key: sp.key,\n            rounds: sp.rounds,\n        })\n    }\n\n    fn eval(pp: &Self::PublicParams, x: &PedersenDomain) -> Result<(PedersenDomain, Self::Proof)> {\n        let key: Fr = pp.key.into();\n        let x: Fr = (*x).into();\n        let y = sloth::encode::<Bls12>(&key, &x, pp.rounds);\n\n        Ok((y.into(), Proof { y: y.into() }))\n    }\n\n    fn verify(pp: &Self::PublicParams, x: &PedersenDomain, proof: &Self::Proof) -> Result<bool> {\n        let y: Fr = Self::extract_output(proof).into();\n        let key: Fr = pp.key.into();\n        let decoded: PedersenDomain = sloth::decode::<Bls12>(&key, &y, pp.rounds).into();\n\n        Ok(&decoded == x)\n    }\n\n    fn key(pp: &self::PublicParams) -> PedersenDomain {\n        pp.key\n    }\n    fn rounds(pp: &self::PublicParams) -> usize {\n        pp.rounds\n    }\n    fn extract_output(proof: &Proof) -> PedersenDomain {\n        proof.y\n    }\n}\n"
  },
  {
    "path": "storage-proofs/src/zigzag_drgporep.rs",
    "content": "use std::marker::PhantomData;\n\nuse crate::drgporep;\nuse crate::drgraph::Graph;\nuse crate::hasher::Hasher;\nuse crate::layered_drgporep::Layers;\nuse crate::parameter_cache::ParameterSetIdentifier;\nuse crate::zigzag_graph::{ZigZag, ZigZagBucketGraph};\n\n/// ZigZagDrgPorep is a layered PoRep which replicates layer by layer.\n/// Between layers, the graph is 'reversed' in such a way that the dependencies expand with each iteration.\n/// This reversal is not a straightforward inversion -- so we coin the term 'zigzag' to describe the transformation.\n/// Each graph can be divided into base and expansion components.\n/// The 'base' component is an ordinary DRG. The expansion component attempts to add a target (expansion_degree) number of connections\n/// between nodes in a reversible way. Expansion connections are therefore simply inverted at each layer.\n/// Because of how DRG-sampled parents are calculated on demand, the base components are not. Instead, a same-degree\n/// DRG with connections in the opposite direction (and using the same random seed) is used when calculating parents on demand.\n/// For the algorithm to have the desired properties, it is important that the expansion components are directly inverted at each layer.\n/// However, it is fortunately not necessary that the base DRG components also have this property.\n\n#[derive(Debug)]\npub struct ZigZagDrgPoRep<'a, H: 'a + Hasher> {\n    _a: PhantomData<&'a H>,\n}\n\nimpl<'a, H: 'static + Hasher> Layers for ZigZagDrgPoRep<'a, H> where {\n    type Hasher = <ZigZagBucketGraph<H> as ZigZag>::BaseHasher;\n    type Graph = ZigZagBucketGraph<Self::Hasher>;\n\n    fn transform(\n        pp: &drgporep::PublicParams<Self::Hasher, Self::Graph>,\n        _layer: usize,\n        _layers: usize,\n    ) -> drgporep::PublicParams<Self::Hasher, Self::Graph> {\n        zigzag::<Self::Hasher, Self::Graph>(pp)\n    }\n\n    fn invert_transform(\n        pp: &drgporep::PublicParams<Self::Hasher, Self::Graph>,\n        _layer: usize,\n        _layers: usize,\n    ) -> drgporep::PublicParams<Self::Hasher, Self::Graph> {\n        zigzag::<Self::Hasher, Self::Graph>(pp)\n    }\n}\n\nfn zigzag<H, Z>(pp: &drgporep::PublicParams<H, Z>) -> drgporep::PublicParams<H, Z>\nwhere\n    H: Hasher,\n    Z: ZigZag + Graph<H> + ParameterSetIdentifier,\n{\n    drgporep::PublicParams::new(pp.graph.zigzag(), pp.sloth_iter)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use pairing::bls12_381::Bls12;\n    use rand::{Rng, SeedableRng, XorShiftRng};\n\n    use crate::drgraph::new_seed;\n    use crate::fr32::fr_into_bytes;\n    use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher};\n    use crate::layered_drgporep::{PrivateInputs, PublicInputs, PublicParams, SetupParams};\n    use crate::porep::PoRep;\n    use crate::proof::ProofScheme;\n\n    const DEFAULT_ZIGZAG_LAYERS: usize = 10;\n\n    #[test]\n    fn extract_all_pedersen() {\n        test_extract_all::<PedersenHasher>();\n    }\n\n    #[test]\n    fn extract_all_sha256() {\n        test_extract_all::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn extract_all_blake2s() {\n        test_extract_all::<Blake2sHasher>();\n    }\n\n    fn test_extract_all<H: 'static + Hasher>() {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n        let sloth_iter = 1;\n        let replica_id: H::Domain = rng.gen();\n        let data = vec![2u8; 32 * 3];\n        let challenge_count = 5;\n\n        // create a copy, so we can compare roundtrips\n        let mut data_copy = data.clone();\n\n        let sp = SetupParams {\n            drg_porep_setup_params: drgporep::SetupParams {\n                drg: drgporep::DrgParams {\n                    nodes: data.len() / 32,\n                    degree: 5,\n                    expansion_degree: 5,\n                    seed: new_seed(),\n                },\n                sloth_iter,\n            },\n            layers: DEFAULT_ZIGZAG_LAYERS,\n            challenge_count,\n        };\n\n        let mut pp = ZigZagDrgPoRep::<H>::setup(&sp).unwrap();\n        // Get the public params for the last layer.\n        // In reality, this is a no-op with an even number of layers.\n        for _ in 0..pp.layers {\n            pp.drg_porep_public_params = zigzag(&pp.drg_porep_public_params);\n        }\n\n        ZigZagDrgPoRep::<H>::replicate(&pp, &replica_id, data_copy.as_mut_slice(), None).unwrap();\n\n        let transformed_params = PublicParams {\n            drg_porep_public_params: pp.drg_porep_public_params,\n            layers: pp.layers,\n            challenge_count,\n        };\n\n        assert_ne!(data, data_copy);\n\n        let decoded_data = ZigZagDrgPoRep::<H>::extract_all(\n            &transformed_params,\n            &replica_id,\n            data_copy.as_mut_slice(),\n        )\n        .unwrap();\n\n        assert_eq!(data, decoded_data);\n    }\n\n    fn prove_verify(n: usize, i: usize) {\n        test_prove_verify::<PedersenHasher>(n, i);\n        test_prove_verify::<Sha256Hasher>(n, i);\n        test_prove_verify::<Blake2sHasher>(n, i);\n    }\n\n    fn test_prove_verify<H: 'static + Hasher>(n: usize, i: usize) {\n        let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);\n\n        let degree = 1 + i;\n        let expansion_degree = i;\n        let sloth_iter = 1;\n        let replica_id: H::Domain = rng.gen();\n        let data: Vec<u8> = (0..n)\n            .flat_map(|_| fr_into_bytes::<Bls12>(&rng.gen()))\n            .collect();\n        // create a copy, so we can compare roundtrips\n        let mut data_copy = data.clone();\n        let challenge_count = 5;\n        let partitions = 2;\n\n        let sp = SetupParams {\n            drg_porep_setup_params: drgporep::SetupParams {\n                drg: drgporep::DrgParams {\n                    nodes: n,\n                    degree,\n                    expansion_degree,\n                    seed: new_seed(),\n                },\n                sloth_iter,\n            },\n            layers: DEFAULT_ZIGZAG_LAYERS,\n            challenge_count,\n        };\n\n        let pp = ZigZagDrgPoRep::<H>::setup(&sp).unwrap();\n        let (tau, aux) =\n            ZigZagDrgPoRep::<H>::replicate(&pp, &replica_id, data_copy.as_mut_slice(), None)\n                .unwrap();\n        assert_ne!(data, data_copy);\n\n        let pub_inputs = PublicInputs::<H::Domain> {\n            replica_id,\n            challenge_count,\n            tau: Some(tau.simplify().into()),\n            comm_r_star: tau.comm_r_star,\n            k: None,\n        };\n\n        let priv_inputs = PrivateInputs {\n            replica: data.as_slice(),\n            aux,\n            tau: tau.layer_taus,\n        };\n\n        let all_partition_proofs =\n            &ZigZagDrgPoRep::<H>::prove_all_partitions(&pp, &pub_inputs, &priv_inputs, partitions)\n                .unwrap();\n\n        assert!(\n            ZigZagDrgPoRep::<H>::verify_all_partitions(&pp, &pub_inputs, all_partition_proofs)\n                .unwrap()\n        );\n    }\n\n    table_tests! {\n        prove_verify{\n            // TODO: figure out why this was failing\n            // prove_verify_32_2_1(32, 2, 1);\n            // prove_verify_32_2_2(32, 2, 2);\n\n            // TODO: why u fail???\n            // prove_verify_32_3_1(32, 3, 1);\n            // prove_verify_32_3_2(32, 3, 2);\n\n           prove_verify_32_5_1(5, 1);\n           prove_verify_32_5_2( 5, 2);\n           prove_verify_32_5_3( 5, 3);\n    }}\n}\n"
  },
  {
    "path": "storage-proofs/src/zigzag_graph.rs",
    "content": "use std::marker::PhantomData;\n\nuse crate::crypto::feistel::{self, FeistelPrecomputed};\nuse crate::drgraph::{BucketGraph, Graph};\nuse crate::hasher::Hasher;\nuse crate::layered_drgporep::Layerable;\nuse crate::parameter_cache::ParameterSetIdentifier;\n\npub const DEFAULT_EXPANSION_DEGREE: usize = 8;\n\n#[derive(Debug, Clone, Eq, PartialEq)]\npub struct ZigZagGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + 'static,\n{\n    expansion_degree: usize,\n    base_graph: G,\n    pub reversed: bool,\n    feistel_precomputed: FeistelPrecomputed,\n    _h: PhantomData<H>,\n}\n\npub type ZigZagBucketGraph<H> = ZigZagGraph<H, BucketGraph<H>>;\n\nimpl<'a, H, G> Layerable<H> for ZigZagGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + 'static,\n{\n}\n\nimpl<H, G> ZigZagGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H>,\n{\n    pub fn new(\n        base_graph: Option<G>,\n        nodes: usize,\n        base_degree: usize,\n        expansion_degree: usize,\n        seed: [u32; 7],\n    ) -> Self {\n        ZigZagGraph {\n            base_graph: match base_graph {\n                Some(graph) => graph,\n                None => G::new(nodes, base_degree, 0, seed),\n            },\n            expansion_degree,\n            reversed: false,\n            feistel_precomputed: feistel::precompute((expansion_degree * nodes) as u32),\n            _h: PhantomData,\n        }\n    }\n}\n\nimpl<H, G> ParameterSetIdentifier for ZigZagGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetIdentifier,\n{\n    fn parameter_set_identifier(&self) -> String {\n        format!(\n            \"zigzag_graph::ZigZagGraph{{expansion_degree: {} base_graph: {} }}\",\n            self.expansion_degree,\n            self.base_graph.parameter_set_identifier()\n        )\n    }\n}\n\npub trait ZigZag: ::std::fmt::Debug + Clone + PartialEq + Eq {\n    type BaseHasher: Hasher;\n    type BaseGraph: Graph<Self::BaseHasher>;\n\n    /// zigzag returns a new graph with expansion component inverted and a distinct\n    /// base DRG graph -- with the direction of drg connections reversed. (i.e. from high-to-low nodes).\n    /// The name is 'weird', but so is the operation -- hence the choice.\n    fn zigzag(&self) -> Self;\n    /// Constructs a new graph.\n    fn base_graph(&self) -> Self::BaseGraph;\n    fn expansion_degree(&self) -> usize;\n    fn reversed(&self) -> bool;\n    fn expanded_parents(&self, node: usize) -> Vec<usize>;\n    fn real_index(&self, i: usize) -> usize;\n    fn new_zigzag(\n        nodes: usize,\n        base_degree: usize,\n        expansion_degree: usize,\n        seed: [u32; 7],\n    ) -> Self;\n}\n\nimpl<Z: ZigZag> Graph<Z::BaseHasher> for Z {\n    fn size(&self) -> usize {\n        self.base_graph().size()\n    }\n\n    fn degree(&self) -> usize {\n        self.base_graph().degree() + self.expansion_degree()\n    }\n\n    #[inline]\n    fn parents(&self, raw_node: usize) -> Vec<usize> {\n        // If graph is reversed, use real_index to convert index to reversed index.\n        // So we convert a raw reversed node to an unreversed node, calculate its parents,\n        // then convert the parents to reversed.\n\n        let drg_parents = self\n            .base_graph()\n            .parents(self.real_index(raw_node))\n            .iter()\n            .map(|i| self.real_index(*i))\n            .collect::<Vec<_>>();\n\n        let mut parents = drg_parents;\n        // expanded_parents takes raw_node\n        let expanded_parents = self.expanded_parents(raw_node);\n\n        parents.extend(expanded_parents.iter());\n\n        // Pad so all nodes have correct degree.\n        for _ in 0..(self.degree() - parents.len()) {\n            if self.reversed() {\n                parents.push(self.size() - 1);\n            } else {\n                parents.push(0);\n            }\n        }\n        assert!(parents.len() == self.degree());\n        parents.sort();\n\n        assert!(parents.iter().all(|p| if self.forward() {\n            *p <= raw_node\n        } else {\n            *p >= raw_node\n        }));\n\n        parents\n    }\n\n    fn seed(&self) -> [u32; 7] {\n        self.base_graph().seed()\n    }\n\n    fn new(nodes: usize, base_degree: usize, expansion_degree: usize, seed: [u32; 7]) -> Self {\n        Z::new_zigzag(nodes, base_degree, expansion_degree, seed)\n    }\n\n    fn forward(&self) -> bool {\n        !self.reversed()\n    }\n}\n\nimpl<'a, H, G> ZigZagGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H>,\n{\n    fn correspondent(&self, node: usize, i: usize) -> usize {\n        let a = (node * self.expansion_degree) as u32 + i as u32;\n        let feistel_keys = &[1, 2, 3, 4];\n\n        let transformed = if self.reversed {\n            feistel::invert_permute(\n                self.size() as u32 * self.expansion_degree as u32,\n                a,\n                feistel_keys,\n                self.feistel_precomputed,\n            )\n        } else {\n            feistel::permute(\n                self.size() as u32 * self.expansion_degree as u32,\n                a,\n                feistel_keys,\n                self.feistel_precomputed,\n            )\n        };\n        transformed as usize / self.expansion_degree\n    }\n}\n\nimpl<'a, H, G> ZigZag for ZigZagGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H>,\n{\n    type BaseHasher = H;\n    type BaseGraph = G;\n\n    fn new_zigzag(\n        nodes: usize,\n        base_degree: usize,\n        expansion_degree: usize,\n        seed: [u32; 7],\n    ) -> Self {\n        Self::new(None, nodes, base_degree, expansion_degree, seed)\n    }\n\n    /// To zigzag a graph, we just toggle its reversed field.\n    /// All the real work happens when we calculate node parents on-demand.\n    fn zigzag(&self) -> Self {\n        ZigZagGraph {\n            base_graph: self.base_graph.clone(),\n            expansion_degree: self.expansion_degree,\n            reversed: !self.reversed,\n            feistel_precomputed: feistel::precompute((self.expansion_degree * self.size()) as u32),\n            _h: PhantomData,\n        }\n    }\n\n    fn base_graph(&self) -> Self::BaseGraph {\n        self.base_graph.clone()\n    }\n\n    fn expansion_degree(&self) -> usize {\n        self.expansion_degree\n    }\n\n    fn reversed(&self) -> bool {\n        self.reversed\n    }\n\n    #[inline]\n    fn expanded_parents(&self, node: usize) -> Vec<usize> {\n        (0..self.expansion_degree)\n            .filter_map(|i| {\n                let other = self.correspondent(node, i);\n                if self.reversed {\n                    if other > node {\n                        Some(other)\n                    } else {\n                        None\n                    }\n                } else if other < node {\n                    Some(other)\n                } else {\n                    None\n                }\n            })\n            .collect()\n    }\n\n    #[inline]\n    fn real_index(&self, i: usize) -> usize {\n        if self.reversed {\n            (self.size() - 1) - i\n        } else {\n            i\n        }\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use std::collections::HashMap;\n\n    use crate::drgraph::new_seed;\n    use crate::hasher::{Blake2sHasher, PedersenHasher, Sha256Hasher};\n\n    fn assert_graph_ascending<H: Hasher, G: Graph<H>>(g: G) {\n        for i in 0..g.size() {\n            for p in g.parents(i) {\n                if i == 0 {\n                    assert!(p == i);\n                } else {\n                    assert!(p < i);\n                }\n            }\n        }\n    }\n\n    fn assert_graph_descending<H: Hasher, G: Graph<H>>(g: G) {\n        for i in 0..g.size() {\n            let parents = g.parents(i);\n            for p in parents {\n                if i == g.size() - 1 {\n                    assert!(p == i);\n                } else {\n                    assert!(p > i);\n                }\n            }\n        }\n    }\n\n    #[test]\n    fn zigzag_graph_zigzags_pedersen() {\n        test_zigzag_graph_zigzags::<PedersenHasher>();\n    }\n\n    #[test]\n    fn zigzag_graph_zigzags_sha256() {\n        test_zigzag_graph_zigzags::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn zigzag_graph_zigzags_blake2s() {\n        test_zigzag_graph_zigzags::<Blake2sHasher>();\n    }\n\n    fn test_zigzag_graph_zigzags<H: 'static + Hasher>() {\n        let g = ZigZagBucketGraph::<H>::new_zigzag(50, 5, DEFAULT_EXPANSION_DEGREE, new_seed());\n        let gz = g.zigzag();\n\n        assert_graph_ascending(g);\n        assert_graph_descending(gz);\n    }\n\n    #[test]\n    fn expansion_pedersen() {\n        test_expansion::<PedersenHasher>();\n    }\n\n    #[test]\n    fn expansion_sha256() {\n        test_expansion::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn expansion_blake2s() {\n        test_expansion::<Blake2sHasher>();\n    }\n\n    fn test_expansion<H: 'static + Hasher>() {\n        // We need a graph.\n        let g = ZigZagBucketGraph::<H>::new_zigzag(25, 5, DEFAULT_EXPANSION_DEGREE, new_seed());\n\n        // We're going to fully realize the expansion-graph component, in a HashMap.\n        let mut gcache: HashMap<usize, Vec<usize>> = HashMap::new();\n\n        // Populate the HashMap with each node's 'expanded parents'.\n        for i in 0..g.size() {\n            let parents = g.expanded_parents(i);\n            gcache.insert(i, parents);\n        }\n\n        // Here's the zigzag version of the graph.\n        let gz = g.zigzag();\n\n        // And a HashMap to hold the expanded parents.\n        let mut gzcache: HashMap<usize, Vec<usize>> = HashMap::new();\n\n        for i in 0..gz.size() {\n            let parents = gz.expanded_parents(i);\n\n            // Check to make sure all (expanded) node-parent relationships also exist in reverse,\n            // in the original graph's Hashmap.\n            for p in &parents {\n                assert!(gcache[&p].contains(&i));\n            }\n            // And populate the zigzag's HashMap.\n            gzcache.insert(i, parents);\n        }\n\n        // And then do the same check to make sure all (expanded) node-parent relationships from the original\n        // are present in the zigzag, just reversed.\n        for i in 0..g.size() {\n            let parents = g.expanded_parents(i);\n            for p in parents {\n                assert!(gzcache[&p].contains(&i));\n            }\n        }\n        // Having checked both ways, we know the graph and its zigzag counterpart have 'expanded' components\n        // which are each other's inverses. It's important that this be true.\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/Cargo.toml",
    "content": "[package]\nname = \"storage-proofs-core\"\nversion = \"7.0.1\"\nauthors = [\"dignifiedquire <me@dignifiedquire.com>\"]\ndescription = \"Core parts for proofs of storage\"\nlicense = \"MIT OR Apache-2.0\"\nedition = \"2018\"\nrepository = \"https://github.com/filecoin-project/rust-fil-proofs\"\nreadme = \"README.md\"\n\n[lib]\nbench = false\n\n[dependencies]\nfilecoin-hashers = { path = \"../filecoin-hashers\", version = \"^2.0.0\", default-features = false, features = [\"sha256\", \"poseidon\"] }\nrand = \"0.7\"\nmerkletree = \"0.21.0\"\nbyteorder = \"1\"\nconfig = { version = \"0.10.1\", default-features = false, features = [\"toml\"] }\nitertools = \"0.9\"\nlazy_static = \"1.2\"\nmemmap = \"0.7\"\naes = \"0.6\"\nblock-modes = \"0.7\"\nsha2 = \"0.9.1\"\ntempfile = \"3\"\nfs2 = \"0.4\"\nrayon = \"1.0.0\"\nserde = { version = \"1.0\", features = [\"derive\"]}\nblake2b_simd = \"0.5\"\nblake2s_simd = \"0.5\"\ntoml = \"0.5\"\nff = { version = \"0.2.3\", package = \"fff\" }\nbellperson = { version = \"0.13\", default-features = false }\nserde_json = \"1.0\"\nlog = \"0.4.7\"\nrand_chacha = \"0.2.1\"\nhex = \"0.4.0\"\ngeneric-array = \"0.14.4\"\nanyhow = \"1.0.23\"\nthiserror = \"1.0.6\"\nneptune = { version = \"2.2.0\", default-features = false }\ncpu-time = { version = \"1.0\", optional = true }\ngperftools = { version = \"0.2\", optional = true }\nnum_cpus = \"1.10.1\"\nsemver = \"0.11.0\"\nfr32 = { path = \"../fr32\", version = \"^0.2.0\", default-features = false }\n\n[dev-dependencies]\nproptest = \"0.10\"\ncriterion = \"0.3\"\nbitvec = \"0.17\"\nrand_xorshift = \"0.2.0\"\npretty_assertions = \"0.6.1\"\nsha2raw = { path = \"../sha2raw\", version = \"^2.0.0\"}\nfilecoin-hashers = { path = \"../filecoin-hashers\", version = \"^2.0.0\", default-features = false, features = [\"blake2s\", \"sha256\", \"poseidon\"] }\n\n[features]\ndefault = [\"gpu\", \"pairing\"]\nsimd = []\nasm = [\"sha2/sha2-asm\"]\nbig-sector-sizes-bench = []\nmeasurements = [\"cpu-time\", \"gperftools\"]\nprofile = [\"measurements\"]\n\ngpu = [\"bellperson/gpu\", \"neptune/opencl\", \"filecoin-hashers/gpu\", \"fr32/gpu\"]\npairing = [\"bellperson/pairing\", \"neptune/pairing\", \"bellperson/pairing-serde\", \"filecoin-hashers/pairing\", \"fr32/pairing\"]\nblst = [\"bellperson/blst\", \"neptune/blst\", \"bellperson/blst-serde\", \"filecoin-hashers/blst\", \"fr32/blst\"]\n\n[[bench]]\nname = \"sha256\"\nharness = false\n\n[[bench]]\nname = \"blake2s\"\nharness = false\n\n[[bench]]\nname = \"drgraph\"\nharness = false\n\n[[bench]]\nname = \"xor\"\nharness = false\n\n[[bench]]\nname = \"merkle\"\nharness = false\n\n[[bench]]\nname = \"misc\"\nharness = false\n"
  },
  {
    "path": "storage-proofs-core/README.md",
    "content": "# Storage Proofs Core\n\n## License\n\nMIT or Apache 2.0\n"
  },
  {
    "path": "storage-proofs-core/benches/blake2s.rs",
    "content": "use bellperson::{\n    bls::Bls12,\n    gadgets::{\n        blake2s::blake2s as blake2s_circuit,\n        boolean::{AllocatedBit, Boolean},\n    },\n    groth16::{create_random_proof, generate_random_parameters},\n    util_cs::bench_cs::BenchCS,\n    Circuit, ConstraintSystem, SynthesisError,\n};\nuse blake2s_simd::blake2s;\nuse criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};\nuse rand::{thread_rng, Rng};\n\nstruct Blake2sExample<'a> {\n    data: &'a [Option<bool>],\n}\n\nimpl<'a> Circuit<Bls12> for Blake2sExample<'a> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let data: Vec<Boolean> = self\n            .data\n            .iter()\n            .enumerate()\n            .map(|(i, b)| {\n                Ok(Boolean::from(AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"bit {}\", i)),\n                    *b,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n        let cs = cs.namespace(|| \"blake2s\");\n        let personalization = vec![0u8; 8];\n        let _res = blake2s_circuit(cs, &data, &personalization)?;\n        Ok(())\n    }\n}\n\nfn blake2s_benchmark(c: &mut Criterion) {\n    let params = vec![32, 64, 10 * 32];\n\n    c.bench(\n        \"hash-blake2s\",\n        ParameterizedBenchmark::new(\n            \"non-circuit\",\n            |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n                b.iter(|| black_box(blake2s(&data)))\n            },\n            params,\n        ),\n    );\n}\n\nfn blake2s_circuit_benchmark(c: &mut Criterion) {\n    let mut rng1 = thread_rng();\n    let groth_params =\n        generate_random_parameters::<Bls12, _, _>(Blake2sExample { data: &[None; 256] }, &mut rng1)\n            .unwrap();\n\n    let params = vec![32];\n\n    c.bench(\n        \"hash-blake2s-circuit\",\n        ParameterizedBenchmark::new(\n            \"create-proof\",\n            move |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n                b.iter(|| {\n                    let proof = create_random_proof(\n                        Blake2sExample {\n                            data: data.as_slice(),\n                        },\n                        &groth_params,\n                        &mut rng,\n                    )\n                    .unwrap();\n\n                    black_box(proof)\n                });\n            },\n            params,\n        )\n        .with_function(\"synthesize\", move |b, bytes| {\n            let mut rng = thread_rng();\n            let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n            b.iter(|| {\n                let mut cs = BenchCS::<Bls12>::new();\n\n                Blake2sExample {\n                    data: data.as_slice(),\n                }\n                .synthesize(&mut cs)\n                .unwrap();\n\n                black_box(cs)\n            });\n        })\n        .sample_size(20),\n    );\n}\n\ncriterion_group!(benches, blake2s_benchmark, blake2s_circuit_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs-core/benches/drgraph.rs",
    "content": "use criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};\nuse filecoin_hashers::poseidon::PoseidonHasher;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    drgraph::{BucketGraph, Graph, BASE_DEGREE},\n};\n\n#[allow(clippy::unit_arg)]\nfn drgraph(c: &mut Criterion) {\n    let params = vec![12, 24, 128, 1024];\n\n    c.bench(\n        \"sample\",\n        ParameterizedBenchmark::new(\n            \"bucket/m=6\",\n            |b, n| {\n                let graph = BucketGraph::<PoseidonHasher>::new(\n                    *n,\n                    BASE_DEGREE,\n                    0,\n                    [32; 32],\n                    ApiVersion::V1_1_0,\n                )\n                .unwrap();\n\n                b.iter(|| {\n                    let mut parents = vec![0; 6];\n                    black_box(graph.parents(2, &mut parents).unwrap());\n                })\n            },\n            params,\n        ),\n    );\n}\n\ncriterion_group!(benches, drgraph);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs-core/benches/merkle.rs",
    "content": "use anyhow::Result;\nuse criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};\nuse filecoin_hashers::{\n    poseidon::PoseidonDomain, poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain,\n};\nuse rand::{thread_rng, Rng};\nuse storage_proofs_core::merkle::{create_base_merkle_tree, BinaryMerkleTree};\n\nfn merkle_benchmark_sha256(c: &mut Criterion) {\n    let params = if cfg!(feature = \"big-sector-sizes-bench\") {\n        vec![128, 1024, 1_048_576]\n    } else {\n        vec![128, 1024]\n    };\n\n    c.bench(\n        \"merkletree-binary\",\n        ParameterizedBenchmark::new(\n            \"sha256\",\n            move |b, n_nodes| {\n                let mut rng = thread_rng();\n                let data: Vec<u8> = (0..32 * *n_nodes).map(|_| rng.gen()).collect();\n                b.iter(|| {\n                    black_box(\n                        create_base_merkle_tree::<BinaryMerkleTree<Sha256Hasher>>(\n                            None, *n_nodes, &data,\n                        )\n                        .unwrap(),\n                    )\n                })\n            },\n            params,\n        ),\n    );\n}\n\nfn merkle_benchmark_poseidon(c: &mut Criterion) {\n    let params = if cfg!(feature = \"big-sector-sizes-bench\") {\n        vec![64, 128, 1024, 1_048_576]\n    } else {\n        vec![64, 128, 1024]\n    };\n\n    c.bench(\n        \"merkletree-binary\",\n        ParameterizedBenchmark::new(\n            \"poseidon\",\n            move |b, n_nodes| {\n                let mut rng = thread_rng();\n                let mut data: Vec<u8> = Vec::with_capacity(32 * *n_nodes);\n                (0..*n_nodes)\n                    .into_iter()\n                    .try_for_each(|_| -> Result<()> {\n                        let node = PoseidonDomain::random(&mut rng);\n                        Ok(data.extend(node.into_bytes()))\n                    })\n                    .expect(\"failed to generate data\");\n\n                b.iter(|| {\n                    black_box(\n                        create_base_merkle_tree::<BinaryMerkleTree<PoseidonHasher>>(\n                            None, *n_nodes, &data,\n                        )\n                        .unwrap(),\n                    )\n                })\n            },\n            params,\n        ),\n    );\n}\n\ncriterion_group!(benches, merkle_benchmark_sha256, merkle_benchmark_poseidon);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs-core/benches/misc.rs",
    "content": "use std::io::{Read, Seek, SeekFrom, Write};\n\nuse criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};\nuse rand::{thread_rng, Rng};\nuse tempfile::tempfile;\n\nfn read_bytes_benchmark(c: &mut Criterion) {\n    let params = vec![32, 64, 512, 1024, 64 * 1024];\n\n    c.bench(\n        \"read\",\n        ParameterizedBenchmark::new(\n            \"from_disk\",\n            |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n                let mut f = tempfile().unwrap();\n                f.write_all(&data).unwrap();\n                f.sync_all().unwrap();\n\n                b.iter(|| {\n                    let mut res = vec![0u8; *bytes];\n                    f.seek(SeekFrom::Start(0)).unwrap();\n                    f.read_exact(&mut res).unwrap();\n\n                    black_box(res)\n                })\n            },\n            params,\n        ),\n    );\n}\n\ncriterion_group!(benches, read_bytes_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs-core/benches/sha256.rs",
    "content": "use bellperson::{\n    bls::Bls12,\n    gadgets::{\n        boolean::{AllocatedBit, Boolean},\n        sha256::sha256 as sha256_circuit,\n    },\n    groth16::{create_random_proof, generate_random_parameters},\n    util_cs::bench_cs::BenchCS,\n    Circuit, ConstraintSystem, SynthesisError,\n};\nuse criterion::{\n    black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark, Throughput,\n};\nuse rand::{thread_rng, Rng};\nuse sha2::Digest;\n\nstruct Sha256Example<'a> {\n    data: &'a [Option<bool>],\n}\n\nimpl<'a> Circuit<Bls12> for Sha256Example<'a> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let data: Vec<Boolean> = self\n            .data\n            .iter()\n            .enumerate()\n            .map(|(i, b)| {\n                Ok(Boolean::from(AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"bit {}\", i)),\n                    *b,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n        let cs = cs.namespace(|| \"sha256\");\n\n        let _res = sha256_circuit(cs, &data)?;\n        Ok(())\n    }\n}\n\nfn sha256_benchmark(c: &mut Criterion) {\n    let params = vec![32, 64, 10 * 32, 37 * 32];\n\n    c.bench(\n        \"hash-sha256-base\",\n        ParameterizedBenchmark::new(\n            \"non-circuit\",\n            |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n                b.iter(|| black_box(sha2::Sha256::digest(&data)))\n            },\n            params,\n        )\n        .throughput(|bytes| Throughput::Bytes(*bytes as u64)),\n    );\n}\n\nfn sha256_raw_benchmark(c: &mut Criterion) {\n    let params = vec![64, 10 * 32, 38 * 32];\n\n    c.bench(\n        \"hash-sha256-raw\",\n        ParameterizedBenchmark::new(\n            \"non-circuit\",\n            |b, bytes| {\n                let mut rng = thread_rng();\n                let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n                let chunks = data.chunks(32).collect::<Vec<_>>();\n\n                b.iter(|| black_box(sha2raw::Sha256::digest(&chunks)))\n            },\n            params,\n        )\n        .throughput(|bytes| Throughput::Bytes(*bytes as u64)),\n    );\n}\n\nfn sha256_circuit_benchmark(c: &mut Criterion) {\n    let mut rng1 = thread_rng();\n\n    let params = vec![32, 64];\n\n    c.bench(\n        \"hash-sha256-circuit\",\n        ParameterizedBenchmark::new(\n            \"create-proof\",\n            move |b, bytes| {\n                let groth_params = generate_random_parameters::<Bls12, _, _>(\n                    Sha256Example {\n                        data: &vec![None; *bytes as usize * 8],\n                    },\n                    &mut rng1,\n                )\n                .unwrap();\n\n                let mut rng = thread_rng();\n                let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n                b.iter(|| {\n                    let proof = create_random_proof(\n                        Sha256Example {\n                            data: data.as_slice(),\n                        },\n                        &groth_params,\n                        &mut rng,\n                    )\n                    .unwrap();\n\n                    black_box(proof)\n                });\n            },\n            params,\n        )\n        .with_function(\"synthesize\", move |b, bytes| {\n            let mut rng = thread_rng();\n            let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n            b.iter(|| {\n                let mut cs = BenchCS::<Bls12>::new();\n                Sha256Example {\n                    data: data.as_slice(),\n                }\n                .synthesize(&mut cs)\n                .unwrap();\n\n                black_box(cs)\n            });\n        })\n        .sample_size(20),\n    );\n}\n\ncriterion_group!(\n    benches,\n    sha256_benchmark,\n    sha256_raw_benchmark,\n    sha256_circuit_benchmark\n);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs-core/benches/xor.rs",
    "content": "use bellperson::{\n    bls::Bls12,\n    gadgets::boolean::{AllocatedBit, Boolean},\n    groth16::{create_random_proof, generate_random_parameters},\n    util_cs::bench_cs::BenchCS,\n    Circuit, ConstraintSystem, SynthesisError,\n};\nuse criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};\nuse rand::{thread_rng, Rng};\nuse storage_proofs_core::{crypto::xor, gadgets::xor::xor as xor_circuit};\n\nstruct XorExample<'a> {\n    key: &'a [Option<bool>],\n    data: &'a [Option<bool>],\n}\n\nimpl<'a> Circuit<Bls12> for XorExample<'a> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let key: Vec<Boolean> = self\n            .key\n            .iter()\n            .enumerate()\n            .map(|(i, b)| {\n                Ok(Boolean::from(AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"key_bit {}\", i)),\n                    *b,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n        let data: Vec<Boolean> = self\n            .data\n            .iter()\n            .enumerate()\n            .map(|(i, b)| {\n                Ok(Boolean::from(AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"data_bit {}\", i)),\n                    *b,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n        let mut cs = cs.namespace(|| \"xor\");\n        let _res = xor_circuit(&mut cs, &key, &data)?;\n\n        Ok(())\n    }\n}\n\nfn xor_benchmark(c: &mut Criterion) {\n    let params = vec![32, 64, 10 * 32];\n\n    c.bench(\n        \"xor\",\n        ParameterizedBenchmark::new(\n            \"non-circuit\",\n            |b, bytes| {\n                let mut rng = thread_rng();\n                let key: Vec<u8> = (0..32).map(|_| rng.gen()).collect();\n                let data: Vec<u8> = (0..*bytes).map(|_| rng.gen()).collect();\n\n                b.iter(|| black_box(xor::encode(&key, &data)))\n            },\n            params,\n        ),\n    );\n}\n\nfn xor_circuit_benchmark(c: &mut Criterion) {\n    let mut rng1 = thread_rng();\n    let groth_params = generate_random_parameters::<Bls12, _, _>(\n        XorExample {\n            key: &[None; 8 * 32],\n            data: &[None; 256],\n        },\n        &mut rng1,\n    )\n    .unwrap();\n\n    let params = vec![32];\n\n    c.bench(\n        \"xor-circuit\",\n        ParameterizedBenchmark::new(\n            \"create-proof\",\n            move |b, bytes| {\n                let mut rng = thread_rng();\n                let key: Vec<Option<bool>> = (0..32 * 8).map(|_| Some(rng.gen())).collect();\n                let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n                b.iter(|| {\n                    let proof = create_random_proof(\n                        XorExample {\n                            key: key.as_slice(),\n                            data: data.as_slice(),\n                        },\n                        &groth_params,\n                        &mut rng,\n                    )\n                    .unwrap();\n\n                    black_box(proof)\n                });\n            },\n            params,\n        )\n        .with_function(\"synthesize\", move |b, bytes| {\n            let mut rng = thread_rng();\n            let key: Vec<Option<bool>> = (0..32 * 8).map(|_| Some(rng.gen())).collect();\n            let data: Vec<Option<bool>> = (0..bytes * 8).map(|_| Some(rng.gen())).collect();\n\n            b.iter(|| {\n                let mut cs = BenchCS::<Bls12>::new();\n                XorExample {\n                    key: key.as_slice(),\n                    data: data.as_slice(),\n                }\n                .synthesize(&mut cs)\n                .unwrap();\n\n                black_box(cs)\n            });\n        })\n        .sample_size(20),\n    );\n}\n\ncriterion_group!(benches, xor_benchmark, xor_circuit_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs-core/src/api_version.rs",
    "content": "use std::fmt::{self, Debug, Display, Formatter};\nuse std::str::FromStr;\n\nuse anyhow::{format_err, Error, Result};\nuse semver::Version;\n\n#[derive(Copy, Clone, Eq, PartialEq)]\npub enum ApiVersion {\n    V1_0_0,\n    V1_1_0,\n}\n\nimpl ApiVersion {\n    pub fn as_semver(&self) -> Version {\n        match self {\n            ApiVersion::V1_0_0 => Version::new(1, 0, 0),\n            ApiVersion::V1_1_0 => Version::new(1, 1, 0),\n        }\n    }\n}\n\nimpl Debug for ApiVersion {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        let semver = self.as_semver();\n        write!(f, \"{}.{}.{}\", semver.major, semver.minor, semver.patch)\n    }\n}\n\nimpl Display for ApiVersion {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        let semver = self.as_semver();\n        write!(f, \"{}.{}.{}\", semver.major, semver.minor, semver.patch)\n    }\n}\n\nimpl FromStr for ApiVersion {\n    type Err = Error;\n    fn from_str(api_version_str: &str) -> Result<Self> {\n        let api_version = Version::parse(api_version_str)?;\n        match (api_version.major, api_version.minor, api_version.patch) {\n            (1, 0, 0) => Ok(ApiVersion::V1_0_0),\n            (1, 1, 0) => Ok(ApiVersion::V1_1_0),\n            (1, 1, _) | (1, 0, _) => Err(format_err!(\n                \"Could not parse API Version from string (patch)\"\n            )),\n            (1, _, _) => Err(format_err!(\n                \"Could not parse API Version from string (minor)\"\n            )),\n            _ => Err(format_err!(\n                \"Could not parse API Version from string (major)\"\n            )),\n        }\n    }\n}\n\n#[test]\nfn test_fmt() {\n    assert_eq!(format!(\"{}\", ApiVersion::V1_0_0), \"1.0.0\");\n    assert_eq!(format!(\"{}\", ApiVersion::V1_1_0), \"1.1.0\");\n}\n\n#[test]\nfn test_as_semver() {\n    assert_eq!(ApiVersion::V1_0_0.as_semver().major, 1);\n    assert_eq!(ApiVersion::V1_1_0.as_semver().major, 1);\n}\n"
  },
  {
    "path": "storage-proofs-core/src/cache_key.rs",
    "content": "use std::fmt::{self, Display, Formatter};\n\n#[derive(Debug, Copy, Clone)]\npub enum CacheKey {\n    PAux,\n    TAux,\n    CommDTree,\n    CommCTree,\n    CommRLastTree,\n}\n\nimpl Display for CacheKey {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match *self {\n            CacheKey::PAux => write!(f, \"p_aux\"),\n            CacheKey::TAux => write!(f, \"t_aux\"),\n            CacheKey::CommDTree => write!(f, \"tree-d\"),\n            CacheKey::CommCTree => write!(f, \"tree-c\"),\n            CacheKey::CommRLastTree => write!(f, \"tree-r-last\"),\n        }\n    }\n}\n\nimpl CacheKey {\n    pub fn label_layer(layer: usize) -> String {\n        format!(\"layer-{}\", layer)\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/compound_proof.rs",
    "content": "use anyhow::{ensure, Context};\nuse bellperson::{\n    bls::{Bls12, Fr},\n    groth16::{\n        self, create_random_proof_batch, create_random_proof_batch_in_priority, verify_proofs_batch,\n    },\n    Circuit,\n};\nuse log::info;\nuse rand::{rngs::OsRng, RngCore};\nuse rayon::prelude::{\n    IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator,\n};\n\nuse crate::{\n    error::Result,\n    multi_proof::MultiProof,\n    parameter_cache::{CacheableParameters, ParameterSetMetadata},\n    partitions::partition_count,\n    proof::ProofScheme,\n};\n\n#[derive(Clone)]\npub struct SetupParams<'a, S: ProofScheme<'a>> {\n    pub vanilla_params: <S as ProofScheme<'a>>::SetupParams,\n    pub partitions: Option<usize>,\n    /// High priority (always runs on GPU) == true\n    pub priority: bool,\n}\n\n#[derive(Clone)]\npub struct PublicParams<'a, S: ProofScheme<'a>> {\n    pub vanilla_params: S::PublicParams,\n    pub partitions: Option<usize>,\n    pub priority: bool,\n}\n\n/// CircuitComponent exists so parent components can pass private inputs to their subcomponents\n/// when calling CompoundProof::circuit directly. In general, there are no internal private inputs,\n/// and a default value will be passed. CompoundProof::circuit implementations should exhibit\n/// default behavior when passed a default ComponentPrivateinputs.\npub trait CircuitComponent {\n    type ComponentPrivateInputs: Default + Clone;\n}\n\n/// The CompoundProof trait bundles a proof::ProofScheme and a bellperson::Circuit together.\n/// It provides methods equivalent to those provided by proof::ProofScheme (setup, prove, verify).\n/// See documentation at proof::ProofScheme for details.\n/// Implementations should generally only need to supply circuit and generate_public_inputs.\n/// The remaining trait methods are used internally and implement the necessary plumbing.\npub trait CompoundProof<'a, S: ProofScheme<'a>, C: Circuit<Bls12> + CircuitComponent + Send>\nwhere\n    S::Proof: Sync + Send,\n    S::PublicParams: ParameterSetMetadata + Sync + Send,\n    S::PublicInputs: Clone + Sync,\n    Self: CacheableParameters<C, S::PublicParams>,\n{\n    // setup is equivalent to ProofScheme::setup.\n    fn setup(sp: &SetupParams<'a, S>) -> Result<PublicParams<'a, S>> {\n        Ok(PublicParams {\n            vanilla_params: S::setup(&sp.vanilla_params)?,\n            partitions: sp.partitions,\n            priority: sp.priority,\n        })\n    }\n\n    fn partition_count(public_params: &PublicParams<'a, S>) -> usize {\n        match public_params.partitions {\n            None => 1,\n            Some(0) => panic!(\"cannot specify zero partitions\"),\n            Some(k) => k,\n        }\n    }\n\n    /// prove is equivalent to ProofScheme::prove.\n    fn prove<'b>(\n        pub_params: &PublicParams<'a, S>,\n        pub_in: &S::PublicInputs,\n        priv_in: &S::PrivateInputs,\n        groth_params: &'b groth16::MappedParameters<Bls12>,\n    ) -> Result<MultiProof<'b>> {\n        let partition_count = Self::partition_count(pub_params);\n\n        // This will always run at least once, since there cannot be zero partitions.\n        ensure!(partition_count > 0, \"There must be partitions\");\n\n        info!(\"vanilla_proofs:start\");\n        let vanilla_proofs = S::prove_all_partitions(\n            &pub_params.vanilla_params,\n            &pub_in,\n            priv_in,\n            partition_count,\n        )?;\n\n        info!(\"vanilla_proofs:finish\");\n\n        let sanity_check =\n            S::verify_all_partitions(&pub_params.vanilla_params, &pub_in, &vanilla_proofs)?;\n        ensure!(sanity_check, \"sanity check failed\");\n\n        info!(\"snark_proof:start\");\n        let groth_proofs = Self::circuit_proofs(\n            pub_in,\n            vanilla_proofs,\n            &pub_params.vanilla_params,\n            groth_params,\n            pub_params.priority,\n        )?;\n        info!(\"snark_proof:finish\");\n\n        Ok(MultiProof::new(groth_proofs, &groth_params.pvk))\n    }\n\n    fn prove_with_vanilla<'b>(\n        pub_params: &PublicParams<'a, S>,\n        pub_in: &S::PublicInputs,\n        vanilla_proofs: Vec<S::Proof>,\n        groth_params: &'b groth16::MappedParameters<Bls12>,\n    ) -> Result<MultiProof<'b>> {\n        let partition_count = Self::partition_count(pub_params);\n\n        // This will always run at least once, since there cannot be zero partitions.\n        ensure!(partition_count > 0, \"There must be partitions\");\n\n        info!(\"snark_proof:start\");\n        let groth_proofs = Self::circuit_proofs(\n            pub_in,\n            vanilla_proofs,\n            &pub_params.vanilla_params,\n            groth_params,\n            pub_params.priority,\n        )?;\n        info!(\"snark_proof:finish\");\n\n        Ok(MultiProof::new(groth_proofs, &groth_params.pvk))\n    }\n\n    // verify is equivalent to ProofScheme::verify.\n    fn verify<'b>(\n        public_params: &PublicParams<'a, S>,\n        public_inputs: &S::PublicInputs,\n        multi_proof: &MultiProof<'b>,\n        requirements: &S::Requirements,\n    ) -> Result<bool> {\n        ensure!(\n            multi_proof.circuit_proofs.len() == Self::partition_count(public_params),\n            \"Inconsistent inputs\"\n        );\n\n        let vanilla_public_params = &public_params.vanilla_params;\n        let pvk = &multi_proof.verifying_key;\n\n        if !<S as ProofScheme>::satisfies_requirements(\n            &public_params.vanilla_params,\n            requirements,\n            multi_proof.circuit_proofs.len(),\n        ) {\n            return Ok(false);\n        }\n\n        let inputs: Vec<_> = (0..multi_proof.circuit_proofs.len())\n            .into_par_iter()\n            .map(|k| Self::generate_public_inputs(public_inputs, vanilla_public_params, Some(k)))\n            .collect::<Result<_>>()?;\n\n        let proofs: Vec<_> = multi_proof.circuit_proofs.iter().collect();\n        let res = verify_proofs_batch(&pvk, &mut OsRng, &proofs, &inputs)?;\n        Ok(res)\n    }\n\n    /// Efficiently verify multiple proofs.\n    fn batch_verify<'b>(\n        public_params: &PublicParams<'a, S>,\n        public_inputs: &[S::PublicInputs],\n        multi_proofs: &[MultiProof<'b>],\n        requirements: &S::Requirements,\n    ) -> Result<bool> {\n        ensure!(\n            public_inputs.len() == multi_proofs.len(),\n            \"Inconsistent inputs\"\n        );\n        for proof in multi_proofs {\n            ensure!(\n                proof.circuit_proofs.len() == Self::partition_count(public_params),\n                \"Inconsistent inputs\"\n            );\n        }\n        ensure!(!public_inputs.is_empty(), \"Cannot verify empty proofs\");\n\n        let vanilla_public_params = &public_params.vanilla_params;\n        // just use the first one, the must be equal any way\n        let pvk = &multi_proofs[0].verifying_key;\n\n        for multi_proof in multi_proofs.iter() {\n            if !<S as ProofScheme>::satisfies_requirements(\n                &public_params.vanilla_params,\n                requirements,\n                multi_proof.circuit_proofs.len(),\n            ) {\n                return Ok(false);\n            }\n        }\n\n        let inputs: Vec<_> = multi_proofs\n            .par_iter()\n            .zip(public_inputs.par_iter())\n            .flat_map(|(multi_proof, pub_inputs)| {\n                (0..multi_proof.circuit_proofs.len())\n                    .into_par_iter()\n                    .map(|k| {\n                        Self::generate_public_inputs(pub_inputs, vanilla_public_params, Some(k))\n                    })\n                    .collect::<Result<Vec<_>>>()\n                    .expect(\"Invalid public inputs\") // TODO: improve error handling\n            })\n            .collect::<Vec<_>>();\n        let circuit_proofs: Vec<_> = multi_proofs\n            .iter()\n            .flat_map(|m| m.circuit_proofs.iter())\n            .collect();\n\n        let res = verify_proofs_batch(&pvk, &mut OsRng, &circuit_proofs[..], &inputs)?;\n\n        Ok(res)\n    }\n\n    /// circuit_proof creates and synthesizes a circuit from concrete params/inputs, then generates a\n    /// groth proof from it. It returns a groth proof.\n    /// circuit_proof is used internally and should neither be called nor implemented outside of\n    /// default trait methods.\n    fn circuit_proofs(\n        pub_in: &S::PublicInputs,\n        vanilla_proofs: Vec<S::Proof>,\n        pub_params: &S::PublicParams,\n        groth_params: &groth16::MappedParameters<Bls12>,\n        priority: bool,\n    ) -> Result<Vec<groth16::Proof<Bls12>>> {\n        let mut rng = OsRng;\n        ensure!(\n            !vanilla_proofs.is_empty(),\n            \"cannot create a circuit proof over missing vanilla proofs\"\n        );\n\n        let circuits = vanilla_proofs\n            .into_par_iter()\n            .enumerate()\n            .map(|(k, vanilla_proof)| {\n                Self::circuit(\n                    &pub_in,\n                    C::ComponentPrivateInputs::default(),\n                    &vanilla_proof,\n                    &pub_params,\n                    Some(k),\n                )\n            })\n            .collect::<Result<Vec<_>>>()?;\n\n        let groth_proofs = if priority {\n            create_random_proof_batch_in_priority(circuits, groth_params, &mut rng)?\n        } else {\n            create_random_proof_batch(circuits, groth_params, &mut rng)?\n        };\n\n        groth_proofs\n            .into_iter()\n            .map(|groth_proof| {\n                let mut proof_vec = Vec::new();\n                groth_proof.write(&mut proof_vec)?;\n                let gp = groth16::Proof::<Bls12>::read(&proof_vec[..])?;\n                Ok(gp)\n            })\n            .collect()\n    }\n\n    /// generate_public_inputs generates public inputs suitable for use as input during verification\n    /// of a proof generated from this CompoundProof's bellperson::Circuit (C). These inputs correspond\n    /// to those allocated when C is synthesized.\n    fn generate_public_inputs(\n        pub_in: &S::PublicInputs,\n        pub_params: &S::PublicParams,\n        partition_k: Option<usize>,\n    ) -> Result<Vec<Fr>>;\n\n    /// circuit constructs an instance of this CompoundProof's bellperson::Circuit.\n    /// circuit takes PublicInputs, PublicParams, and Proof from this CompoundProof's proof::ProofScheme (S)\n    /// and uses them to initialize Circuit fields which will be used to construct public and private\n    /// inputs during circuit synthesis.\n    fn circuit(\n        public_inputs: &S::PublicInputs,\n        component_private_inputs: C::ComponentPrivateInputs,\n        vanilla_proof: &S::Proof,\n        public_param: &S::PublicParams,\n        partition_k: Option<usize>,\n    ) -> Result<C>;\n\n    fn blank_circuit(public_params: &S::PublicParams) -> C;\n\n    /// If the rng option argument is set, parameters will be\n    /// generated using it.  This is used for testing only, or where\n    /// parameters are otherwise unavailable (e.g. benches).  If rng\n    /// is not set, an error will result if parameters are not\n    /// present.\n    fn groth_params<R: RngCore>(\n        rng: Option<&mut R>,\n        public_params: &S::PublicParams,\n    ) -> Result<groth16::MappedParameters<Bls12>> {\n        Self::get_groth_params(rng, Self::blank_circuit(public_params), public_params)\n    }\n\n    /// If the rng option argument is set, parameters will be\n    /// generated using it.  This is used for testing only, or where\n    /// parameters are otherwise unavailable (e.g. benches).  If rng\n    /// is not set, an error will result if parameters are not\n    /// present.\n    fn verifying_key<R: RngCore>(\n        rng: Option<&mut R>,\n        public_params: &S::PublicParams,\n    ) -> Result<groth16::VerifyingKey<Bls12>> {\n        Self::get_verifying_key(rng, Self::blank_circuit(public_params), public_params)\n    }\n\n    fn circuit_for_test(\n        public_parameters: &PublicParams<'a, S>,\n        public_inputs: &S::PublicInputs,\n        private_inputs: &S::PrivateInputs,\n    ) -> Result<(C, Vec<Fr>)> {\n        let vanilla_params = &public_parameters.vanilla_params;\n        let partition_count = partition_count(public_parameters.partitions);\n        let vanilla_proofs = S::prove_all_partitions(\n            vanilla_params,\n            public_inputs,\n            private_inputs,\n            partition_count,\n        )\n        .context(\"failed to generate partition proofs\")?;\n\n        ensure!(\n            vanilla_proofs.len() == partition_count,\n            \"Vanilla proofs didn't match number of partitions.\"\n        );\n\n        let partitions_are_verified =\n            S::verify_all_partitions(vanilla_params, &public_inputs, &vanilla_proofs)\n                .context(\"failed to verify partition proofs\")?;\n\n        ensure!(partitions_are_verified, \"Vanilla proof didn't verify.\");\n\n        // Some(0) because we only return a circuit and inputs for the first partition.\n        // It would be more thorough to return all, though just checking one is probably\n        // fine for verifying circuit construction.\n        let partition_pub_in = S::with_partition(public_inputs.clone(), Some(0));\n        let inputs = Self::generate_public_inputs(&partition_pub_in, vanilla_params, Some(0))?;\n\n        let circuit = Self::circuit(\n            &partition_pub_in,\n            C::ComponentPrivateInputs::default(),\n            &vanilla_proofs[0],\n            vanilla_params,\n            Some(0),\n        )?;\n\n        Ok((circuit, inputs))\n    }\n\n    /// Like circuit_for_test but returns values for all partitions.\n    fn circuit_for_test_all(\n        public_parameters: &PublicParams<'a, S>,\n        public_inputs: &S::PublicInputs,\n        private_inputs: &S::PrivateInputs,\n    ) -> Result<Vec<(C, Vec<Fr>)>> {\n        let vanilla_params = &public_parameters.vanilla_params;\n        let partition_count = partition_count(public_parameters.partitions);\n        let vanilla_proofs = S::prove_all_partitions(\n            vanilla_params,\n            public_inputs,\n            private_inputs,\n            partition_count,\n        )\n        .context(\"failed to generate partition proofs\")?;\n\n        ensure!(\n            vanilla_proofs.len() == partition_count,\n            \"Vanilla proofs didn't match number of partitions.\"\n        );\n\n        let partitions_are_verified =\n            S::verify_all_partitions(vanilla_params, &public_inputs, &vanilla_proofs)\n                .context(\"failed to verify partition proofs\")?;\n\n        ensure!(partitions_are_verified, \"Vanilla proof didn't verify.\");\n\n        let mut res = Vec::with_capacity(partition_count);\n        for (partition, vanilla_proof) in vanilla_proofs.iter().enumerate() {\n            let partition_pub_in = S::with_partition(public_inputs.clone(), Some(partition));\n            let inputs =\n                Self::generate_public_inputs(&partition_pub_in, vanilla_params, Some(partition))?;\n\n            let circuit = Self::circuit(\n                &partition_pub_in,\n                C::ComponentPrivateInputs::default(),\n                vanilla_proof,\n                vanilla_params,\n                Some(partition),\n            )?;\n            res.push((circuit, inputs));\n        }\n        Ok(res)\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/crypto/aes.rs",
    "content": "use aes::Aes256;\nuse anyhow::{ensure, Context};\nuse block_modes::{block_padding::ZeroPadding, BlockMode, Cbc};\n\nuse crate::error::Result;\n\nconst IV: [u8; 16] = [0u8; 16];\n\npub fn encode(key: &[u8], plaintext: &[u8]) -> Result<Vec<u8>> {\n    ensure!(key.len() == 32, \"invalid key length\");\n\n    let mode = Cbc::<Aes256, ZeroPadding>::new_var(key, &IV).context(\"invalid key\")?;\n\n    Ok(mode.encrypt_vec(plaintext))\n}\n\npub fn decode(key: &[u8], ciphertext: &[u8]) -> Result<Vec<u8>> {\n    ensure!(key.len() == 32, \"invalid key length\");\n\n    let mode = Cbc::<Aes256, ZeroPadding>::new_var(key, &IV).context(\"invalid key\")?;\n\n    let res = mode.decrypt_vec(ciphertext).context(\"failed to decrypt\")?;\n    Ok(res)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    use crate::TEST_SEED;\n\n    #[test]\n    fn test_aes() {\n        let mut rng = XorShiftRng::from_seed(TEST_SEED);\n\n        for i in 0..10 {\n            let key: Vec<u8> = (0..32).map(|_| rng.gen()).collect();\n            let plaintext: Vec<u8> = (0..(i + 1) * 32).map(|_| rng.gen()).collect();\n\n            let ciphertext = encode(key.as_slice(), plaintext.as_slice()).unwrap();\n\n            assert_ne!(\n                plaintext, ciphertext,\n                \"plaintext and ciphertext are identical\"\n            );\n            assert_eq!(plaintext.len(), ciphertext.len());\n\n            let roundtrip = decode(key.as_slice(), ciphertext.as_slice()).unwrap();\n            assert_eq!(plaintext, roundtrip, \"failed to roundtrip\");\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/crypto/feistel.rs",
    "content": "use std::mem::size_of;\n\nuse blake2b_simd::blake2b;\n\npub const FEISTEL_ROUNDS: usize = 3;\n// 3 rounds is an acceptable value for a pseudo-random permutation,\n// see https://github.com/filecoin-project/rust-proofs/issues/425\n// (and also https://en.wikipedia.org/wiki/Feistel_cipher#Theoretical_work).\n\npub type Index = u64;\n\npub type FeistelPrecomputed = (Index, Index, Index);\n\n// Find the minimum number of even bits to represent `num_elements`\n// within a `u32` maximum. Returns the left and right masks evenly\n// distributed that together add up to that minimum number of bits.\npub fn precompute(num_elements: Index) -> FeistelPrecomputed {\n    let mut next_pow4: Index = 4;\n    let mut log4 = 1;\n    while next_pow4 < num_elements {\n        next_pow4 *= 4;\n        log4 += 1;\n    }\n\n    let left_mask = ((1 << log4) - 1) << log4;\n    let right_mask = (1 << log4) - 1;\n    let half_bits = log4;\n\n    (left_mask, right_mask, half_bits)\n}\n\n// Pseudo-randomly shuffle an input from a starting position to another\n// one within the `[0, num_elements)` range using a `key` that will allow\n// the reverse operation to take place.\npub fn permute(\n    num_elements: Index,\n    index: Index,\n    keys: &[Index],\n    precomputed: FeistelPrecomputed,\n) -> Index {\n    let mut u = encode(index, keys, precomputed);\n\n    while u >= num_elements {\n        u = encode(u, keys, precomputed)\n    }\n    // Since we are representing `num_elements` using an even number of bits,\n    // that can encode many values above it, so keep repeating the operation\n    // until we land in the permitted range.\n\n    u\n}\n\n// Inverts the `permute` result to its starting value for the same `key`.\npub fn invert_permute(\n    num_elements: Index,\n    index: Index,\n    keys: &[Index],\n    precomputed: FeistelPrecomputed,\n) -> Index {\n    let mut u = decode(index, keys, precomputed);\n\n    while u >= num_elements {\n        u = decode(u, keys, precomputed);\n    }\n    u\n}\n\n/// common_setup performs common calculations on inputs shared by encode and decode.\n/// Decompress the `precomputed` part of the algorithm into the initial `left` and\n/// `right` pieces `(L_0, R_0)` with the `right_mask` and `half_bits` to manipulate\n/// them.\nfn common_setup(index: Index, precomputed: FeistelPrecomputed) -> (Index, Index, Index, Index) {\n    let (left_mask, right_mask, half_bits) = precomputed;\n\n    let left = (index & left_mask) >> half_bits;\n    let right = index & right_mask;\n\n    (left, right, right_mask, half_bits)\n}\n\nfn encode(index: Index, keys: &[Index], precomputed: FeistelPrecomputed) -> Index {\n    let (mut left, mut right, right_mask, half_bits) = common_setup(index, precomputed);\n\n    for key in keys.iter().take(FEISTEL_ROUNDS) {\n        let (l, r) = (right, left ^ feistel(right, *key, right_mask));\n        left = l;\n        right = r;\n    }\n\n    (left << half_bits) | right\n}\n\nfn decode(index: Index, keys: &[Index], precomputed: FeistelPrecomputed) -> Index {\n    let (mut left, mut right, right_mask, half_bits) = common_setup(index, precomputed);\n\n    for i in (0..FEISTEL_ROUNDS).rev() {\n        let (l, r) = ((right ^ feistel(left, keys[i], right_mask)), left);\n        left = l;\n        right = r;\n    }\n\n    (left << half_bits) | right\n}\n\nconst HALF_FEISTEL_BYTES: usize = size_of::<Index>();\nconst FEISTEL_BYTES: usize = 2 * HALF_FEISTEL_BYTES;\n\n// Round function of the Feistel network: `F(Ri, Ki)`. Joins the `right`\n// piece and the `key`, hashes it and returns the lower `u32` part of\n// the hash filtered trough the `right_mask`.\nfn feistel(right: Index, key: Index, right_mask: Index) -> Index {\n    let mut data: [u8; FEISTEL_BYTES] = [0; FEISTEL_BYTES];\n\n    // So ugly, but the price of (relative) speed.\n    let r = if FEISTEL_BYTES <= 8 {\n        data[0] = (right >> 24) as u8;\n        data[1] = (right >> 16) as u8;\n        data[2] = (right >> 8) as u8;\n        data[3] = right as u8;\n\n        data[4] = (key >> 24) as u8;\n        data[5] = (key >> 16) as u8;\n        data[6] = (key >> 8) as u8;\n        data[7] = key as u8;\n\n        let raw = blake2b(&data);\n        let hash = raw.as_bytes();\n\n        Index::from(hash[0]) << 24\n            | Index::from(hash[1]) << 16\n            | Index::from(hash[2]) << 8\n            | Index::from(hash[3])\n    } else {\n        data[0] = (right >> 56) as u8;\n        data[1] = (right >> 48) as u8;\n        data[2] = (right >> 40) as u8;\n        data[3] = (right >> 32) as u8;\n        data[4] = (right >> 24) as u8;\n        data[5] = (right >> 16) as u8;\n        data[6] = (right >> 8) as u8;\n        data[7] = right as u8;\n\n        data[8] = (key >> 56) as u8;\n        data[9] = (key >> 48) as u8;\n        data[10] = (key >> 40) as u8;\n        data[11] = (key >> 32) as u8;\n        data[12] = (key >> 24) as u8;\n        data[13] = (key >> 16) as u8;\n        data[14] = (key >> 8) as u8;\n        data[15] = key as u8;\n\n        let raw = blake2b(&data);\n        let hash = raw.as_bytes();\n\n        Index::from(hash[0]) << 56\n            | Index::from(hash[1]) << 48\n            | Index::from(hash[2]) << 40\n            | Index::from(hash[3]) << 32\n            | Index::from(hash[4]) << 24\n            | Index::from(hash[5]) << 16\n            | Index::from(hash[6]) << 8\n            | Index::from(hash[7])\n    };\n\n    r & right_mask\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use rayon::prelude::{IntoParallelIterator, ParallelIterator};\n\n    // Some sample n-values which are not powers of four and also don't coincidentally happen to\n    // encode/decode correctly.\n    const BAD_NS: &[Index] = &[5, 6, 8, 12, 17]; //\n                                                 //\n    fn encode_decode(n: Index, expect_success: bool) {\n        let mut failed = false;\n        let precomputed = precompute(n);\n        for i in 0..n {\n            let p = encode(i, &[1, 2, 3, 4], precomputed);\n            let v = decode(p, &[1, 2, 3, 4], precomputed);\n            let equal = i == v;\n            let in_range = p < n;\n            if expect_success {\n                assert!(equal, \"failed to permute (n = {})\", n);\n                assert!(in_range, \"output number is too big (n = {})\", n);\n            } else if !equal || !in_range {\n                failed = true;\n            }\n        }\n        if !expect_success {\n            assert!(failed, \"expected failure (n = {})\", n);\n        }\n    }\n\n    #[test]\n    fn test_feistel_power_of_4() {\n        // Our implementation is guaranteed to produce a permutation when input size (number of elements)\n        // is a power of our.\n        let mut n = 1;\n\n        // Powers of 4 always succeed.\n        for _ in 0..4 {\n            n *= 4;\n            encode_decode(n, true);\n        }\n\n        // Some non-power-of 4 also succeed, but here is a selection of examples values showing\n        // that this is not guaranteed.\n        for i in BAD_NS.iter() {\n            encode_decode(*i, false);\n        }\n    }\n\n    #[test]\n    fn test_feistel_on_arbitrary_set() {\n        for n in BAD_NS.iter() {\n            let precomputed = precompute(*n as Index);\n            for i in 0..*n {\n                let p = permute(*n, i, &[1, 2, 3, 4], precomputed);\n                let v = invert_permute(*n, p, &[1, 2, 3, 4], precomputed);\n                // Since every element in the set is reversibly mapped to another element also in the set,\n                // this is indeed a permutation.\n                assert_eq!(i, v, \"failed to permute\");\n                assert!(p < *n, \"output number is too big\");\n            }\n        }\n    }\n\n    #[test]\n    #[ignore]\n    fn test_feistel_valid_permutation() {\n        let n = (1u64 << 30) as Index;\n        let mut flags = vec![false; n as usize];\n        let precomputed = precompute(n);\n        let perm: Vec<Index> = (0..n)\n            .into_par_iter()\n            .map(|i| permute(n, i, &[1, 2, 3, 4], precomputed))\n            .collect();\n        for i in perm {\n            assert!(i < n, \"output number is too big\");\n            flags[i as usize] = true;\n        }\n        assert!(flags.iter().all(|f| *f), \"output isn't a permutation\");\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/crypto/mod.rs",
    "content": "use sha2::{Digest, Sha256};\n\npub mod aes;\npub mod feistel;\npub mod sloth;\npub mod xor;\n\npub struct DomainSeparationTag(&'static str);\n\npub const DRSAMPLE_DST: DomainSeparationTag = DomainSeparationTag(\"Filecoin_DRSample\");\npub const FEISTEL_DST: DomainSeparationTag = DomainSeparationTag(\"Filecoin_Feistel\");\n\npub fn derive_porep_domain_seed(\n    domain_separation_tag: DomainSeparationTag,\n    porep_id: [u8; 32],\n) -> [u8; 32] {\n    Sha256::new()\n        .chain(domain_separation_tag.0)\n        .chain(porep_id)\n        .finalize()\n        .into()\n}\n"
  },
  {
    "path": "storage-proofs-core/src/crypto/sloth.rs",
    "content": "use bellperson::bls::Fr;\nuse ff::Field;\n\n/// Sloth based encoding.\n#[inline]\npub fn encode(key: &Fr, plaintext: &Fr) -> Fr {\n    let mut ciphertext = *plaintext;\n\n    ciphertext.add_assign(key); // c + k\n    ciphertext\n}\n\n/// Sloth based decoding.\n#[inline]\npub fn decode(key: &Fr, ciphertext: &Fr) -> Fr {\n    let mut plaintext = *ciphertext;\n\n    plaintext.sub_assign(key); // c - k\n\n    plaintext\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use bellperson::bls::FrRepr;\n    use ff::PrimeField;\n    use proptest::{prop_compose, proptest};\n\n    // the modulus from `bls12_381::Fr`\n    // The definition of MODULUS and comment defining r come from paired/src/bls_12_381/fr.rs.\n    // r = 52435875175126190479447740508185965837690552500527637822603658699938581184513\n    const MODULUS: [u64; 4] = [\n        0xffffffff00000001,\n        0x53bda402fffe5bfe,\n        0x3339d80809a1d805,\n        0x73eda753299d7d48,\n    ];\n\n    #[test]\n    fn sloth_bls_12() {\n        let key = Fr::from_str(\"11111111\").unwrap();\n        let plaintext = Fr::from_str(\"123456789\").unwrap();\n        let ciphertext = encode(&key, &plaintext);\n        let decrypted = decode(&key, &ciphertext);\n        assert_eq!(plaintext, decrypted);\n        assert_ne!(plaintext, ciphertext);\n    }\n\n    #[test]\n    fn sloth_bls_12_fake() {\n        let key = Fr::from_str(\"11111111\").unwrap();\n        let key_fake = Fr::from_str(\"11111112\").unwrap();\n        let plaintext = Fr::from_str(\"123456789\").unwrap();\n        let ciphertext = encode(&key, &plaintext);\n        let decrypted = decode(&key_fake, &ciphertext);\n        assert_ne!(plaintext, decrypted);\n    }\n\n    prop_compose! {\n        fn arb_fr()(a in 0..MODULUS[0], b in 0..MODULUS[1], c in 0..MODULUS[2], d in 0..MODULUS[3]) -> Fr {\n            Fr::from_repr(FrRepr([a, b, c, d])).unwrap()\n        }\n    }\n    proptest! {\n        #[test]\n        fn sloth_bls_roundtrip(key in arb_fr(), plaintext in arb_fr()) {\n            let ciphertext = encode(&key, &plaintext);\n            assert_eq!(decode(&key, &ciphertext), plaintext);\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/crypto/xor.rs",
    "content": "use anyhow::ensure;\n\nuse crate::error::Result;\n\n/// Encodes plaintext by elementwise xoring with the passed in key.\npub fn encode(key: &[u8], plaintext: &[u8]) -> Result<Vec<u8>> {\n    xor(key, plaintext)\n}\n\n/// Decodes ciphertext by elementwise xoring with the passed in key.\npub fn decode(key: &[u8], ciphertext: &[u8]) -> Result<Vec<u8>> {\n    xor(key, ciphertext)\n}\n\nfn xor(key: &[u8], input: &[u8]) -> Result<Vec<u8>> {\n    let key_len = key.len();\n    ensure!(key_len == 32, \"Key must be 32 bytes.\");\n\n    Ok(input\n        .iter()\n        .enumerate()\n        .map(|(i, byte)| byte ^ key[i % key_len])\n        .collect())\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    use crate::TEST_SEED;\n\n    #[test]\n    fn test_xor() {\n        let mut rng = XorShiftRng::from_seed(TEST_SEED);\n\n        for i in 0..10 {\n            let key: Vec<u8> = (0..32).map(|_| rng.gen()).collect();\n            let plaintext: Vec<u8> = (0..(i + 1) * 32).map(|_| rng.gen()).collect();\n\n            let ciphertext = encode(key.as_slice(), plaintext.as_slice()).unwrap();\n\n            assert_ne!(\n                plaintext, ciphertext,\n                \"plaintext and ciphertext are identical\"\n            );\n            assert_eq!(plaintext.len(), ciphertext.len());\n\n            let roundtrip = decode(key.as_slice(), ciphertext.as_slice()).unwrap();\n            assert_eq!(plaintext, roundtrip, \"failed to roundtrip\");\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/data.rs",
    "content": "use std::fs::OpenOptions;\nuse std::ops::{Deref, DerefMut};\nuse std::path::PathBuf;\n\nuse anyhow::{ensure, Context, Result};\nuse log::info;\nuse memmap::{MmapMut, MmapOptions};\n\n/// A wrapper around data either on disk or a slice in memory, that can be dropped and read back into memory,\n/// to allow for better control of memory consumption.\n#[derive(Debug)]\npub struct Data<'a> {\n    raw: Option<RawData<'a>>,\n    path: Option<PathBuf>,\n    len: usize,\n}\n\n#[derive(Debug)]\nenum RawData<'a> {\n    Slice(&'a mut [u8]),\n    Mmap(MmapMut),\n}\n\nimpl<'a> Deref for RawData<'a> {\n    type Target = [u8];\n\n    fn deref(&self) -> &Self::Target {\n        match self {\n            RawData::Slice(ref raw) => raw,\n            RawData::Mmap(ref raw) => raw,\n        }\n    }\n}\n\nimpl<'a> DerefMut for RawData<'a> {\n    fn deref_mut(&mut self) -> &mut Self::Target {\n        match self {\n            RawData::Slice(ref mut raw) => raw,\n            RawData::Mmap(ref mut raw) => raw,\n        }\n    }\n}\n\nimpl<'a> From<&'a mut [u8]> for Data<'a> {\n    fn from(raw: &'a mut [u8]) -> Self {\n        let len = raw.len();\n        Data {\n            raw: Some(RawData::Slice(raw)),\n            path: None,\n            len,\n        }\n    }\n}\n\nimpl<'a> From<(MmapMut, PathBuf)> for Data<'a> {\n    fn from(raw: (MmapMut, PathBuf)) -> Self {\n        let len = raw.0.len();\n        Data {\n            raw: Some(RawData::Mmap(raw.0)),\n            path: Some(raw.1),\n            len,\n        }\n    }\n}\n\nimpl<'a> AsRef<[u8]> for Data<'a> {\n    fn as_ref(&self) -> &[u8] {\n        match self.raw {\n            Some(ref raw) => raw,\n            None => panic!(\"figure it out\"),\n        }\n    }\n}\n\nimpl<'a> AsMut<[u8]> for Data<'a> {\n    fn as_mut(&mut self) -> &mut [u8] {\n        match self.raw {\n            Some(ref mut raw) => raw,\n            None => panic!(\"figure it out\"),\n        }\n    }\n}\n\nimpl<'a> Data<'a> {\n    pub fn from_path(path: PathBuf) -> Self {\n        Data {\n            raw: None,\n            path: Some(path),\n            len: 0,\n        }\n    }\n\n    pub fn new(raw: &'a mut [u8], path: PathBuf) -> Self {\n        let len = raw.len();\n\n        Data {\n            raw: Some(RawData::Slice(raw)),\n            path: Some(path),\n            len,\n        }\n    }\n\n    pub fn len(&self) -> usize {\n        self.len\n    }\n\n    pub fn is_empty(&self) -> bool {\n        self.len == 0\n    }\n\n    /// Recover the data.\n    pub fn ensure_data(&mut self) -> Result<()> {\n        match self.raw {\n            Some(..) => {}\n            None => {\n                ensure!(self.path.is_some(), \"Missing path\");\n                let path = self.path.as_ref().expect(\"path as_ref failure\");\n\n                info!(\"restoring {}\", path.display());\n\n                let f_data = OpenOptions::new()\n                    .read(true)\n                    .write(true)\n                    .open(path)\n                    .with_context(|| format!(\"could not open path={:?}\", path))?;\n                let data = unsafe {\n                    MmapOptions::new()\n                        .map_mut(&f_data)\n                        .with_context(|| format!(\"could not mmap path={:?}\", path))?\n                };\n\n                self.len = data.len();\n                self.raw = Some(RawData::Mmap(data));\n            }\n        }\n\n        Ok(())\n    }\n\n    /// Drops the actual data, if we can recover it.\n    pub fn drop_data(&mut self) {\n        if let Some(ref p) = self.path {\n            info!(\"dropping data {}\", p.display());\n            self.raw.take();\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/drgraph.rs",
    "content": "use std::cmp::{max, min};\nuse std::fmt::Debug;\nuse std::marker::PhantomData;\n\nuse anyhow::ensure;\nuse filecoin_hashers::{Hasher, PoseidonArity};\nuse fr32::bytes_into_fr_repr_safe;\nuse generic_array::typenum::Unsigned;\nuse merkletree::merkle::get_merkle_tree_row_count;\nuse rand::{Rng, SeedableRng};\nuse rand_chacha::ChaCha8Rng;\nuse sha2::{Digest, Sha256};\n\nuse crate::{\n    api_version::ApiVersion,\n    crypto::{derive_porep_domain_seed, DRSAMPLE_DST},\n    error::Result,\n    parameter_cache::ParameterSetMetadata,\n    util::{data_at_node_offset, NODE_SIZE},\n    PoRepID,\n};\n\npub const PARALLEL_MERKLE: bool = true;\n\n/// The base degree used for all DRG graphs. One degree from this value is used to ensure that a\n/// given node always has its immediate predecessor as a parent, thus ensuring unique topological\n/// ordering of the graph nodes.\npub const BASE_DEGREE: usize = 6;\n\n/// A depth robust graph.\npub trait Graph<H: Hasher>: Debug + Clone + PartialEq + Eq {\n    type Key: Debug;\n\n    /// Returns the expected size of all nodes in the graph.\n    fn expected_size(&self) -> usize {\n        self.size() * NODE_SIZE\n    }\n\n    /// Returns the merkle tree depth.\n    fn merkle_tree_depth<U: 'static + PoseidonArity>(&self) -> u64 {\n        graph_height::<U>(self.size()) as u64\n    }\n\n    /// Returns a sorted list of all parents of this node. The parents may be repeated.\n    ///\n    /// If a node doesn't have any parents, then this vector needs to return a vector where\n    /// the first element is the requested node. This will be used as indicator for nodes\n    /// without parents.\n    ///\n    /// The `parents` parameter is used to store the result. This is done fore performance\n    /// reasons, so that the vector can be allocated outside this call.\n    fn parents(&self, node: usize, parents: &mut [u32]) -> Result<()>;\n\n    /// Returns the size of the graph (number of nodes).\n    fn size(&self) -> usize;\n\n    /// Returns the number of parents of each node in the graph.\n    fn degree(&self) -> usize;\n\n    fn new(\n        nodes: usize,\n        base_degree: usize,\n        expansion_degree: usize,\n        porep_id: PoRepID,\n        api_version: ApiVersion,\n    ) -> Result<Self>;\n    fn seed(&self) -> [u8; 28];\n\n    /// Creates the encoding key.\n    /// The algorithm for that is `Sha256(id | encodedParentNode1 | encodedParentNode1 | ...)`.\n    fn create_key(\n        &self,\n        id: &H::Domain,\n        node: usize,\n        parents: &[u32],\n        parents_data: &[u8],\n        exp_parents_data: Option<&[u8]>,\n    ) -> Result<Self::Key>;\n}\n\npub fn graph_height<U: Unsigned>(number_of_leafs: usize) -> usize {\n    get_merkle_tree_row_count(number_of_leafs, U::to_usize())\n}\n\n/// Bucket sampling algorithm.\n#[derive(Clone, Debug, PartialEq, Eq, Copy)]\npub struct BucketGraph<H: Hasher> {\n    nodes: usize,\n    base_degree: usize,\n    seed: [u8; 28],\n    api_version: ApiVersion,\n    _h: PhantomData<H>,\n}\n\nimpl<H: Hasher> ParameterSetMetadata for BucketGraph<H> {\n    fn identifier(&self) -> String {\n        // NOTE: Seed is not included because it does not influence parameter generation.\n        format!(\n            \"drgraph::BucketGraph{{size: {}; degree: {}; hasher: {}}}\",\n            self.nodes,\n            self.degree(),\n            H::name(),\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        (self.nodes * NODE_SIZE) as u64\n    }\n}\n\nimpl<H: Hasher> Graph<H> for BucketGraph<H> {\n    type Key = H::Domain;\n\n    fn create_key(\n        &self,\n        id: &H::Domain,\n        node: usize,\n        parents: &[u32],\n        base_parents_data: &[u8],\n        _exp_parents_data: Option<&[u8]>,\n    ) -> Result<Self::Key> {\n        let mut hasher = Sha256::new();\n        hasher.update(AsRef::<[u8]>::as_ref(id));\n\n        // The hash is about the parents, hence skip if a node doesn't have any parents\n        if node != parents[0] as usize {\n            for parent in parents.iter() {\n                let offset = data_at_node_offset(*parent as usize);\n                hasher.update(&base_parents_data[offset..offset + NODE_SIZE]);\n            }\n        }\n\n        let hash = hasher.finalize();\n        Ok(bytes_into_fr_repr_safe(hash.as_ref()).into())\n    }\n\n    #[inline]\n    fn parents(&self, node: usize, parents: &mut [u32]) -> Result<()> {\n        let m = self.degree();\n\n        match node {\n            // There are special cases for the first and second node: the first node self\n            // references, the second node only references the first node.\n            0 | 1 => {\n                // Use the degree of the current graph (`m`) as `parents.len()` might be bigger than\n                // that (that's the case for Stacked Graph).\n                for parent in parents.iter_mut().take(m) {\n                    *parent = 0;\n                }\n                Ok(())\n            }\n            _ => {\n                // DRG node indexes are guaranteed to fit within a `u32`.\n                let node = node as u32;\n\n                let mut seed = [0u8; 32];\n                seed[..28].copy_from_slice(&self.seed);\n                seed[28..].copy_from_slice(&node.to_le_bytes());\n                let mut rng = ChaCha8Rng::from_seed(seed);\n\n                let m_prime = m - 1;\n                // Large sector sizes require that metagraph node indexes are `u64`.\n                let metagraph_node = node as u64 * m_prime as u64;\n                let n_buckets = (metagraph_node as f64).log2().ceil() as u64;\n\n                let (predecessor_index, other_drg_parents) = match self.api_version {\n                    ApiVersion::V1_0_0 => (m_prime, &mut parents[..]),\n                    ApiVersion::V1_1_0 => (0, &mut parents[1..]),\n                };\n\n                for parent in other_drg_parents.iter_mut().take(m_prime) {\n                    let bucket_index = (rng.gen::<u64>() % n_buckets) + 1;\n                    let largest_distance_in_bucket = min(metagraph_node, 1 << bucket_index);\n                    let smallest_distance_in_bucket = max(2, largest_distance_in_bucket >> 1);\n\n                    // Add 1 becuase the number of distances in the bucket is inclusive.\n                    let n_distances_in_bucket =\n                        largest_distance_in_bucket - smallest_distance_in_bucket + 1;\n\n                    let distance =\n                        smallest_distance_in_bucket + (rng.gen::<u64>() % n_distances_in_bucket);\n\n                    let metagraph_parent = metagraph_node - distance;\n\n                    // Any metagraph node mapped onto the DRG can be safely cast back to `u32`.\n                    let mapped_parent = (metagraph_parent / m_prime as u64) as u32;\n\n                    *parent = if mapped_parent == node {\n                        node - 1\n                    } else {\n                        mapped_parent\n                    };\n                }\n\n                // Immediate predecessor must be the first parent, so hashing cannot begin early.\n                parents[predecessor_index] = node - 1;\n                Ok(())\n            }\n        }\n    }\n\n    #[inline]\n    fn size(&self) -> usize {\n        self.nodes\n    }\n\n    /// Returns the degree of the graph.\n    #[inline]\n    fn degree(&self) -> usize {\n        self.base_degree\n    }\n\n    fn seed(&self) -> [u8; 28] {\n        self.seed\n    }\n\n    fn new(\n        nodes: usize,\n        base_degree: usize,\n        expansion_degree: usize,\n        porep_id: PoRepID,\n        api_version: ApiVersion,\n    ) -> Result<Self> {\n        ensure!(expansion_degree == 0, \"Expension degree must be zero.\");\n\n        // The number of metagraph nodes must be less than `2u64^54` as to not incur rounding errors\n        // when casting metagraph node indexes from `u64` to `f64` during parent generation.\n        let m_prime = base_degree - 1;\n        let n_metagraph_nodes = nodes as u64 * m_prime as u64;\n        ensure!(\n            n_metagraph_nodes <= 1u64 << 54,\n            \"The number of metagraph nodes must be precisely castable to `f64`\"\n        );\n\n        let drg_seed = derive_drg_seed(porep_id);\n\n        Ok(BucketGraph {\n            nodes,\n            base_degree,\n            seed: drg_seed,\n            api_version,\n            _h: PhantomData,\n        })\n    }\n}\n\npub fn derive_drg_seed(porep_id: PoRepID) -> [u8; 28] {\n    let mut drg_seed = [0; 28];\n    let raw_seed = derive_porep_domain_seed(DRSAMPLE_DST, porep_id);\n    drg_seed.copy_from_slice(&raw_seed[..28]);\n    drg_seed\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use filecoin_hashers::{\n        blake2s::Blake2sHasher, poseidon::PoseidonHasher, sha256::Sha256Hasher,\n    };\n    use generic_array::typenum::{U0, U2, U4, U8};\n    use memmap::{MmapMut, MmapOptions};\n    use merkletree::store::StoreConfig;\n\n    use crate::merkle::{\n        create_base_merkle_tree, DiskStore, MerkleProofTrait, MerkleTreeTrait, MerkleTreeWrapper,\n    };\n\n    // Create and return an object of MmapMut backed by in-memory copy of data.\n    pub fn mmap_from(data: &[u8]) -> MmapMut {\n        let mut mm = MmapOptions::new()\n            .len(data.len())\n            .map_anon()\n            .expect(\"Failed to create memory map\");\n        mm.copy_from_slice(data);\n        mm\n    }\n\n    fn graph_bucket<H: Hasher>() {\n        // These PoRepIDs do not correspond to the small-sized graphs used in\n        // the tests. However, they are sufficient to distinguish legacy vs new\n        // behavior of parent ordering.\n        let porep_id = |id: u8| {\n            let mut porep_id = [0u8; 32];\n            porep_id[0] = id;\n\n            porep_id\n        };\n\n        let legacy_porep_id = porep_id(0);\n        let new_porep_id = porep_id(5);\n\n        graph_bucket_aux::<H>(legacy_porep_id, ApiVersion::V1_0_0);\n        graph_bucket_aux::<H>(new_porep_id, ApiVersion::V1_1_0);\n    }\n\n    fn graph_bucket_aux<H: Hasher>(porep_id: PoRepID, api_version: ApiVersion) {\n        let degree = BASE_DEGREE;\n\n        for &size in &[4, 16, 256, 2048] {\n            let g = BucketGraph::<H>::new(size, degree, 0, porep_id, api_version).unwrap();\n\n            assert_eq!(g.size(), size, \"wrong nodes count\");\n\n            let mut parents = vec![0; degree];\n            g.parents(0, &mut parents).unwrap();\n            assert_eq!(parents, vec![0; degree as usize]);\n            parents = vec![0; degree];\n            g.parents(1, &mut parents).unwrap();\n            assert_eq!(parents, vec![0; degree as usize]);\n\n            for i in 1..size {\n                let mut pa1 = vec![0; degree];\n                g.parents(i, &mut pa1).unwrap();\n                let mut pa2 = vec![0; degree];\n                g.parents(i, &mut pa2).unwrap();\n\n                assert_eq!(pa1.len(), degree);\n                assert_eq!(pa1, pa2, \"different parents on the same node\");\n\n                let mut p1 = vec![0; degree];\n                g.parents(i, &mut p1).unwrap();\n                let mut p2 = vec![0; degree];\n                g.parents(i, &mut p2).unwrap();\n\n                for parent in p1 {\n                    // TODO: fix me\n                    assert_ne!(i, parent as usize, \"self reference found\");\n                }\n\n                match api_version {\n                    ApiVersion::V1_0_0 => {\n                        assert_eq!(\n                            i - 1,\n                            pa1[degree - 1] as usize,\n                            \"immediate predecessor was not last DRG parent\"\n                        );\n                    }\n                    ApiVersion::V1_1_0 => {\n                        assert_eq!(\n                            i - 1,\n                            pa1[0] as usize,\n                            \"immediate predecessor was not first parent\"\n                        );\n                    }\n                }\n            }\n        }\n    }\n\n    #[test]\n    fn graph_bucket_sha256() {\n        graph_bucket::<Sha256Hasher>();\n    }\n\n    #[test]\n    fn graph_bucket_blake2s() {\n        graph_bucket::<Blake2sHasher>();\n    }\n\n    fn gen_proof<H: 'static + Hasher, U: 'static + PoseidonArity>(config: Option<StoreConfig>) {\n        let leafs = 64;\n        let porep_id = [1; 32];\n        let g = BucketGraph::<H>::new(leafs, BASE_DEGREE, 0, porep_id, ApiVersion::V1_1_0).unwrap();\n        let data = vec![2u8; NODE_SIZE * leafs];\n\n        let mmapped = &mmap_from(&data);\n        let tree =\n            create_base_merkle_tree::<MerkleTreeWrapper<H, DiskStore<H::Domain>, U, U0, U0>>(\n                config,\n                g.size(),\n                mmapped,\n            )\n            .unwrap();\n        let proof = tree.gen_proof(2).unwrap();\n\n        assert!(proof.verify());\n    }\n\n    #[test]\n    fn gen_proof_poseidon_binary() {\n        gen_proof::<PoseidonHasher, U2>(None);\n    }\n\n    #[test]\n    fn gen_proof_sha256_binary() {\n        gen_proof::<Sha256Hasher, U2>(None);\n    }\n\n    #[test]\n    fn gen_proof_blake2s_binary() {\n        gen_proof::<Blake2sHasher, U2>(None);\n    }\n\n    #[test]\n    fn gen_proof_poseidon_quad() {\n        gen_proof::<PoseidonHasher, U4>(None);\n    }\n\n    #[test]\n    fn gen_proof_sha256_quad() {\n        gen_proof::<Sha256Hasher, U4>(None);\n    }\n\n    #[test]\n    fn gen_proof_blake2s_quad() {\n        gen_proof::<Blake2sHasher, U4>(None);\n    }\n\n    #[test]\n    fn gen_proof_poseidon_oct() {\n        gen_proof::<PoseidonHasher, U8>(None);\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/error.rs",
    "content": "use std::any::Any;\n\npub use anyhow::Result;\n\nuse bellperson::SynthesisError;\n\nuse crate::sector::SectorId;\n\n/// Custom error types\n#[derive(Debug, thiserror::Error)]\npub enum Error {\n    #[error(\"Could not create PieceInclusionProof (probably bad piece commitment: comm_p)\")]\n    BadPieceCommitment,\n    #[error(\"Out of bounds access {} > {}\", _0, _1)]\n    OutOfBounds(usize, usize),\n    #[error(\"mismatch of data, node_size and nodes {} != {} * {}\", _0, _1, _2)]\n    InvalidMerkleTreeArgs(usize, usize, usize),\n    #[error(\"{}\", _0)]\n    Synthesis(#[from] SynthesisError),\n    #[error(\"{}\", _0)]\n    Io(#[from] std::io::Error),\n    #[error(\"tree root and commitment do not match\")]\n    InvalidCommitment,\n    #[error(\"malformed input\")]\n    MalformedInput,\n    #[error(\"malformed merkle tree\")]\n    MalformedMerkleTree,\n    #[error(\"invalid input size\")]\n    InvalidInputSize,\n    #[error(\"merkle tree generation error: {}\", _0)]\n    MerkleTreeGenerationError(String),\n    #[error(\"Cannot (yet) generate inclusion proof for unaligned piece.\")]\n    UnalignedPiece,\n    #[error(\"{}\", _0)]\n    Serde(#[from] serde_json::Error),\n    #[error(\"unclassified error: {}\", _0)]\n    Unclassified(String),\n    #[error(\"Missing Private Input {0} for sector {1}\")]\n    MissingPrivateInput(&'static str, u64),\n    #[error(\"faulty sectors {:?}\", _0)]\n    FaultySectors(Vec<SectorId>),\n    #[error(\"Invalid parameters file: {}\", _0)]\n    InvalidParameters(String),\n}\n\nimpl From<Box<dyn Any + Send>> for Error {\n    fn from(inner: Box<dyn Any + Send>) -> Error {\n        Error::Unclassified(format!(\"{:?}\", dbg!(inner)))\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/gadgets/constraint.rs",
    "content": "use bellperson::{bls::Engine, gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError};\nuse ff::Field;\n\n/// Adds a constraint to CS, enforcing an equality relationship between the allocated numbers a and b.\n///\n/// a == b\npub fn equal<E: Engine, A, AR, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    annotation: A,\n    a: &AllocatedNum<E>,\n    b: &AllocatedNum<E>,\n) where\n    A: FnOnce() -> AR,\n    AR: Into<String>,\n{\n    // a * 1 = b\n    cs.enforce(\n        annotation,\n        |lc| lc + a.get_variable(),\n        |lc| lc + CS::one(),\n        |lc| lc + b.get_variable(),\n    );\n}\n\n/// Adds a constraint to CS, enforcing a add relationship between the allocated numbers a, b, and sum.\n///\n/// a + b = sum\npub fn sum<E: Engine, A, AR, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    annotation: A,\n    a: &AllocatedNum<E>,\n    b: &AllocatedNum<E>,\n    sum: &AllocatedNum<E>,\n) where\n    A: FnOnce() -> AR,\n    AR: Into<String>,\n{\n    // (a + b) * 1 = sum\n    cs.enforce(\n        annotation,\n        |lc| lc + a.get_variable() + b.get_variable(),\n        |lc| lc + CS::one(),\n        |lc| lc + sum.get_variable(),\n    );\n}\n\npub fn add<E: Engine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    a: &AllocatedNum<E>,\n    b: &AllocatedNum<E>,\n) -> Result<AllocatedNum<E>, SynthesisError> {\n    let res = AllocatedNum::alloc(cs.namespace(|| \"add_num\"), || {\n        let mut tmp = a\n            .get_value()\n            .ok_or_else(|| SynthesisError::AssignmentMissing)?;\n        tmp.add_assign(\n            &b.get_value()\n                .ok_or_else(|| SynthesisError::AssignmentMissing)?,\n        );\n\n        Ok(tmp)\n    })?;\n\n    // a + b = res\n    sum(&mut cs, || \"sum constraint\", &a, &b, &res);\n\n    Ok(res)\n}\n\npub fn sub<E: Engine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    a: &AllocatedNum<E>,\n    b: &AllocatedNum<E>,\n) -> Result<AllocatedNum<E>, SynthesisError> {\n    let res = AllocatedNum::alloc(cs.namespace(|| \"sub_num\"), || {\n        let mut tmp = a\n            .get_value()\n            .ok_or_else(|| SynthesisError::AssignmentMissing)?;\n        tmp.sub_assign(\n            &b.get_value()\n                .ok_or_else(|| SynthesisError::AssignmentMissing)?,\n        );\n\n        Ok(tmp)\n    })?;\n\n    // a - b = res\n    difference(&mut cs, || \"subtraction constraint\", &a, &b, &res);\n\n    Ok(res)\n}\n\n/// Adds a constraint to CS, enforcing a difference relationship between the allocated numbers a, b, and difference.\n///\n/// a - b = difference\npub fn difference<E: Engine, A, AR, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    annotation: A,\n    a: &AllocatedNum<E>,\n    b: &AllocatedNum<E>,\n    difference: &AllocatedNum<E>,\n) where\n    A: FnOnce() -> AR,\n    AR: Into<String>,\n{\n    //    difference = a-b\n    // => difference + b = a\n    // => (difference + b) * 1 = a\n    cs.enforce(\n        annotation,\n        |lc| lc + difference.get_variable() + b.get_variable(),\n        |lc| lc + CS::one(),\n        |lc| lc + a.get_variable(),\n    );\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use bellperson::{\n        bls::{Bls12, Fr},\n        util_cs::test_cs::TestConstraintSystem,\n    };\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n\n    use crate::TEST_SEED;\n\n    #[test]\n    fn add_constraint() {\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n        for _ in 0..100 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let a = AllocatedNum::alloc(cs.namespace(|| \"a\"), || Ok(Fr::random(rng))).unwrap();\n            let b = AllocatedNum::alloc(cs.namespace(|| \"b\"), || Ok(Fr::random(rng))).unwrap();\n\n            let res = add(cs.namespace(|| \"a+b\"), &a, &b).expect(\"add failed\");\n\n            let mut tmp = a.get_value().unwrap();\n            tmp.add_assign(&b.get_value().unwrap());\n\n            assert_eq!(res.get_value().unwrap(), tmp);\n            assert!(cs.is_satisfied());\n        }\n    }\n\n    #[test]\n    fn sub_constraint() {\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n        for _ in 0..100 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let a = AllocatedNum::alloc(cs.namespace(|| \"a\"), || Ok(Fr::random(rng))).unwrap();\n            let b = AllocatedNum::alloc(cs.namespace(|| \"b\"), || Ok(Fr::random(rng))).unwrap();\n\n            let res = sub(cs.namespace(|| \"a-b\"), &a, &b).expect(\"subtraction failed\");\n\n            let mut tmp = a.get_value().unwrap();\n            tmp.sub_assign(&b.get_value().unwrap());\n\n            assert_eq!(res.get_value().unwrap(), tmp);\n            assert!(cs.is_satisfied());\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/gadgets/encode.rs",
    "content": "use bellperson::{bls::Engine, gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError};\n\nuse crate::gadgets::constraint;\n\npub fn encode<E, CS>(\n    mut cs: CS,\n    key: &AllocatedNum<E>,\n    value: &AllocatedNum<E>,\n) -> Result<AllocatedNum<E>, SynthesisError>\nwhere\n    E: Engine,\n    CS: ConstraintSystem<E>,\n{\n    constraint::add(cs.namespace(|| \"encode_add\"), key, value)\n}\n\npub fn decode<E, CS>(\n    mut cs: CS,\n    key: &AllocatedNum<E>,\n    value: &AllocatedNum<E>,\n) -> Result<AllocatedNum<E>, SynthesisError>\nwhere\n    E: Engine,\n    CS: ConstraintSystem<E>,\n{\n    constraint::sub(cs.namespace(|| \"decode_sub\"), value, key)\n}\n"
  },
  {
    "path": "storage-proofs-core/src/gadgets/insertion.rs",
    "content": "//! Insertion Permutation\n//!\n//! Insert an `AllocatedNum` into a sequence of `AllocatedNums` at an arbitrary position.\n//! This can be thought of as a generalization of `AllocatedNum::conditionally_reverse` and reduces to it in the binary case.\n\nuse bellperson::{\n    bls::Engine,\n    gadgets::{\n        boolean::{AllocatedBit, Boolean},\n        num::AllocatedNum,\n    },\n    ConstraintSystem, SynthesisError,\n};\nuse ff::Field;\n\n/// Insert `element` after the nth 1-indexed element of `elements`, where `path_bits` represents n, least-significant bit first.\n/// The returned result contains a new vector of `AllocatedNum`s with `element` inserted, and constraints are enforced.\n/// `elements.len() + 1` must be a power of two.\npub fn insert<E: Engine, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    element: &AllocatedNum<E>,\n    bits: &[Boolean],\n    elements: &[AllocatedNum<E>],\n) -> Result<Vec<AllocatedNum<E>>, SynthesisError> {\n    let size = elements.len() + 1;\n    assert_eq!(1 << bits.len(), size);\n\n    // For the sizes we know we need, we can take advantage of redundancy in the candidate selection at each position.\n    // This allows us to accomplish insertion with fewer constraints, if we hand-optimize.\n    // We don't need a special case for size 2 because the general algorithm\n    // collapses to `conditionally_reverse` when size = 2.\n    //\n    // If no special cases have been hand-coded, use the general algorithm.\n    // This costs size * (size - 1) constraints.\n    //\n    // Future work: In theory, we could compile arbitrary lookup tables to minimize constraints and avoid\n    // the most general case except when actually required — which it never is for simple insertion.\n    if size == 2 {\n        return insert_2(cs, element, bits, elements);\n    } else if size == 4 {\n        return insert_4(cs, element, bits, elements);\n    } else if size == 8 {\n        return insert_8(cs, element, bits, elements);\n    };\n\n    // Running example choices, represent inserting x into [1, 2, 3].\n\n    // An indexed sequence of correct results, one of which (the indexed one) will be selected.\n    let mut potential_results = Vec::new();\n    for index in 0..size {\n        // These are the results when bits corresponds to index.\n        //\n        // index | result\n        //-------+-------\n        // 0     | x 1 2 3\n        // 1     | 1 x 2 3\n        // 2     | 1 2 x 3\n        // 3     | 1 2 3 x\n        let mut result = Vec::new();\n        (0..index).for_each(|i| result.push(elements[i].clone()));\n        result.push(element.clone());\n        (index..elements.len()).for_each(|i| result.push(elements[i].clone()));\n\n        potential_results.push(result);\n    }\n\n    let mut result = Vec::new();\n    for pos in 0..size {\n        // These are the choices needed such that for each position in the selected result,\n        // the value is column-for-pos[index].\n        //\n        // This table is constructed by reading columns from the index-result table above.\n        // Reading columns from this table yields the result table.\n\n        // pos   column\n        // 0     x 1 1 1\n        // 1     1 x 2 2\n        // 2     2 2 x 3\n        // 3     3 3 3 x\n        let choices = (0..size)\n            .map(|index| potential_results[index][pos].clone())\n            .collect::<Vec<_>>();\n\n        result.push(select(\n            cs.namespace(|| format!(\"choice at {}\", pos)),\n            &choices,\n            bits,\n        )?);\n    }\n\n    Ok(result)\n}\n\npub fn insert_2<E: Engine, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    element: &AllocatedNum<E>,\n    bits: &[Boolean],\n    elements: &[AllocatedNum<E>],\n) -> Result<Vec<AllocatedNum<E>>, SynthesisError> {\n    assert_eq!(elements.len() + 1, 2);\n    assert_eq!(bits.len(), 1);\n\n    Ok(vec![\n        pick(\n            cs.namespace(|| \"binary insert 0\"),\n            &bits[0],\n            &elements[0],\n            &element,\n        )?,\n        pick(\n            cs.namespace(|| \"binary insert 1\"),\n            &bits[0],\n            &element,\n            &elements[0],\n        )?,\n    ])\n}\n\npub fn insert_4<E: Engine, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    element: &AllocatedNum<E>,\n    bits: &[Boolean],\n    elements: &[AllocatedNum<E>],\n) -> Result<Vec<AllocatedNum<E>>, SynthesisError> {\n    assert_eq!(elements.len() + 1, 4);\n    assert_eq!(bits.len(), 2);\n\n    /*\n    To insert A into [b, c, d] at position n of bits, represented by booleans [b0, b1, b2].\n    n [b0, b1] pos 0 1 2 3\n    0 [0, 0]       A b c d\n    1 [1, 0]       b A c d\n    2 [0, 1]       b c A d\n    3 [1, 1]       b c d A\n\n    A = element\n    b = elements[0]\n    c = elements[1]\n    d = elements[2]\n     */\n    let (b0, b1) = (&bits[0], &bits[1]);\n    let (a, b, c, d) = (&element, &elements[0], &elements[1], &elements[2]);\n\n    /// Define witness macro to allow legible definition of positional constraints.\n    /// See example expansions in comment to first usages below.\n    macro_rules! witness {\n        ( $var:ident <== if $cond:ident { $a:expr } else { $b:expr }) => {\n            let $var = pick(cs.namespace(|| stringify!($var)), $cond, $a, $b)?;\n        };\n    }\n\n    // Witness naming convention:\n    // `p0_x0` means \"Output position 0 when b0 is unknown (x) and b1 is 0.\"\n\n    // Declaration:\n    witness!(p0_x0 <== if b0 { b } else { a });\n    witness!(p0 <== if b1 { b } else { &p0_x0 });\n    // Expansion:\n    // let p0_x0 = pick(cs.namespace(|| \"p0_x0\"), b0, b, a)?;\n    // let p0 = pick(cs.namespace(|| \"p0\"), b1, b, &p0_x0)?;\n\n    witness!(p1_x0 <== if b0 { a } else { b });\n    witness!(p1 <== if b1 { c } else { &p1_x0 });\n\n    witness!(p2_x1 <== if b0 { d } else { a });\n    witness!(p2 <== if b1 { &p2_x1 } else {c });\n\n    witness!(p3_x1 <== if b0 { a } else { d });\n    witness!(p3 <== if b1 { &p3_x1 } else { d });\n\n    Ok(vec![p0, p1, p2, p3])\n}\n\n#[allow(clippy::many_single_char_names)]\npub fn insert_8<E: Engine, CS: ConstraintSystem<E>>(\n    cs: &mut CS,\n    element: &AllocatedNum<E>,\n    bits: &[Boolean],\n    elements: &[AllocatedNum<E>],\n) -> Result<Vec<AllocatedNum<E>>, SynthesisError> {\n    assert_eq!(elements.len() + 1, 8);\n    assert_eq!(bits.len(), 3);\n    /*\n    To insert A into [b, c, d, e, f, g, h] at position n of bits, represented by booleans [b0, b1, b2].\n    n [b0, b1, b2] pos 0 1 2 3 4 5 6 7\n    0 [0, 0, 0]        A b c d e f g h\n    1 [1, 0, 0]        b A c d e f g h\n    2 [0, 1, 0]        b c A d e f g h\n    3 [1, 1, 0]        b c d A e f g h\n    4 [0, 0, 1]        b c d e A f g h\n    5 [1, 0, 1]        b c d e f A g h\n    6 [0, 1, 1]        b c d e f g A h\n    7 [1, 1, 1]        b c d e f g h A\n\n\n    A = element\n    b = elements[0]\n    c = elements[1]\n    d = elements[2]\n    e = elements[3]\n    f = elements[4]\n    g = elements[5]\n    h = elements[6]\n     */\n\n    let (b0, b1, b2) = (&bits[0], &bits[1], &bits[2]);\n    let (a, b, c, d, e, f, g, h) = (\n        &element,\n        &elements[0],\n        &elements[1],\n        &elements[2],\n        &elements[3],\n        &elements[4],\n        &elements[5],\n        &elements[6],\n    );\n\n    // true if booleans b0 and b1 are both false: `(not b0) and (not b1)`\n    // (1 - b0) * (1 - b1) = 1\n    let b0_nor_b1 = match (b0, b1) {\n        (Boolean::Is(ref b0), Boolean::Is(ref b1)) => {\n            Boolean::Is(AllocatedBit::nor(cs.namespace(|| \"b0 nor b1\"), b0, b1)?)\n        }\n        _ => panic!(\"bits must be allocated and unnegated\"),\n    };\n\n    // true if booleans b0 and b1 are both true: `b0 and b1`\n    // b0 * b1 = 1\n    let b0_and_b1 = match (&bits[0], &bits[1]) {\n        (Boolean::Is(ref b0), Boolean::Is(ref b1)) => {\n            Boolean::Is(AllocatedBit::and(cs.namespace(|| \"b0 and b1\"), b0, b1)?)\n        }\n        _ => panic!(\"bits must be allocated and unnegated\"),\n    };\n\n    /// Define witness macro to allow legible definition of positional constraints.\n    /// See example expansions in comment to first usages below.\n    macro_rules! witness {\n        ( $var:ident <== if $cond:ident { $a:expr } else { $b:expr }) => {\n            let $var = pick(cs.namespace(|| stringify!($var)), $cond, $a, $b)?;\n        };\n\n        // Match condition terms which are explict syntactic references.\n        ( $var:ident <== if &$cond:ident { $a:expr } else { $b:expr }) => {\n            let $var = pick(cs.namespace(|| stringify!($var)), &$cond, $a, $b)?;\n        };\n    }\n\n    // Declaration:\n    witness!(p0_xx0 <== if &b0_nor_b1 { a } else { b });\n    witness!(p0 <== if b2 { b } else { &p0_xx0 });\n    // Expansion:\n    // let p0_xx0 = pick(cs.namespace(|| \"p0_xx0\"), &b0_nor_b1, a, b)?;\n    // let p0 = pick(cs.namespace(|| \"p0\"), b2, b, &p0_xx0)?;\n\n    witness!(p1_x00 <== if b0 { a } else { b });\n    witness!(p1_xx0 <== if b1 { c } else { &p1_x00 });\n    witness!(p1 <== if b2 { c } else { &p1_xx0 });\n\n    witness!(p2_x10 <== if b0 { d } else { a });\n    witness!(p2_xx0 <== if b1 { &p2_x10 } else { c });\n    witness!(p2 <== if b2 { d } else { &p2_xx0 });\n\n    witness!(p3_xx0 <== if &b0_and_b1 { a } else { d });\n    witness!(p3 <== if b2 { e } else { &p3_xx0 });\n\n    witness!(p4_xx1 <== if &b0_nor_b1 { a } else { f });\n    witness!(p4 <== if b2 { &p4_xx1 } else { e });\n\n    witness!(p5_x01 <== if b0 { a } else { f });\n    witness!(p5_xx1 <== if b1 { g } else { &p5_x01 });\n    witness!(p5 <== if b2 { &p5_xx1 } else { f });\n\n    witness!(p6_x11 <== if b0 { h } else { a });\n    witness!(p6_xx1 <== if b1 { &p6_x11 } else { g });\n    witness!(p6 <== if b2 { &p6_xx1 } else { g });\n\n    witness!(p7_xx1 <== if &b0_and_b1 { a } else { h });\n    witness!(p7 <== if b2 { &p7_xx1 } else { h });\n\n    Ok(vec![p0, p1, p2, p3, p4, p5, p6, p7])\n}\n\n/// Select the nth element of `from`, where `path_bits` represents n, least-significant bit first.\n/// The returned result contains the selected element, and constraints are enforced.\n/// `from.len()` must be a power of two.\npub fn select<E: Engine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    from: &[AllocatedNum<E>],\n    path_bits: &[Boolean],\n) -> Result<AllocatedNum<E>, SynthesisError> {\n    let pathlen = path_bits.len();\n    assert_eq!(1 << pathlen, from.len());\n\n    let mut state = Vec::new();\n    for elt in from {\n        state.push(elt.clone())\n    }\n    let mut half_size = from.len() / 2;\n\n    // We reverse the path bits because the contained algorithm consumes most significant bit first.\n    for (i, bit) in path_bits.iter().rev().enumerate() {\n        let mut new_state = Vec::new();\n        for j in 0..half_size {\n            new_state.push(pick(\n                cs.namespace(|| format!(\"pick {}, {}\", i, j)),\n                bit,\n                &state[half_size + j],\n                &state[j],\n            )?);\n        }\n        state = new_state;\n        half_size /= 2;\n    }\n\n    Ok(state.remove(0))\n}\n\n/// Takes two allocated numbers (`a`, `b`) and returns `a` if the condition is true, and `b` otherwise.\npub fn pick<E: Engine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    condition: &Boolean,\n    a: &AllocatedNum<E>,\n    b: &AllocatedNum<E>,\n) -> Result<AllocatedNum<E>, SynthesisError>\nwhere\n    CS: ConstraintSystem<E>,\n{\n    let c = AllocatedNum::alloc(cs.namespace(|| \"pick result\"), || {\n        if condition\n            .get_value()\n            .ok_or(SynthesisError::AssignmentMissing)?\n        {\n            Ok(a.get_value().ok_or(SynthesisError::AssignmentMissing)?)\n        } else {\n            Ok(b.get_value().ok_or(SynthesisError::AssignmentMissing)?)\n        }\n    })?;\n\n    // Constrain (b - a) * condition = (b - c), ensuring c = a iff\n    // condition is true, otherwise c = b.\n    cs.enforce(\n        || \"pick\",\n        |lc| lc + b.get_variable() - a.get_variable(),\n        |_| condition.lc(CS::one(), E::Fr::one()),\n        |lc| lc + b.get_variable() - c.get_variable(),\n    );\n\n    Ok(c)\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use bellperson::{\n        bls::{Bls12, Fr},\n        util_cs::test_cs::TestConstraintSystem,\n    };\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n\n    use crate::TEST_SEED;\n\n    #[test]\n    fn test_select() {\n        for log_size in 1..5 {\n            let size = 1 << log_size;\n            for index in 0..size {\n                // Initialize rng in loop to simplify debugging with consistent elements.\n                let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n                let mut cs = TestConstraintSystem::new();\n\n                let elements: Vec<_> = (0..size)\n                    .map(|i| {\n                        AllocatedNum::<Bls12>::alloc(\n                            &mut cs.namespace(|| format!(\"element {}\", i)),\n                            || {\n                                let elt = <Fr as Field>::random(rng);\n                                Ok(elt)\n                            },\n                        )\n                        .unwrap()\n                    })\n                    .collect();\n\n                let path_bits = (0..log_size)\n                    .map(|i| {\n                        <Boolean as From<AllocatedBit>>::from(\n                            AllocatedBit::alloc(cs.namespace(|| format!(\"index bit {}\", i)), {\n                                let bit = ((index >> i) & 1) == 1;\n                                Some(bit)\n                            })\n                            .unwrap(),\n                        )\n                    })\n                    .collect::<Vec<_>>();\n\n                let test_constraints = cs.num_constraints();\n                assert_eq!(log_size, test_constraints);\n\n                let selected = select(cs.namespace(|| \"select\"), &elements, &path_bits).unwrap();\n\n                assert!(cs.is_satisfied());\n                assert_eq!(elements[index].get_value(), selected.get_value());\n\n                // One constraint per non-leaf node of a binary tree with `size` leaves.\n                let expected_constraints = size - 1;\n\n                let actual_constraints = cs.num_constraints() - test_constraints;\n                assert_eq!(expected_constraints, actual_constraints);\n            }\n        }\n    }\n\n    #[test]\n    fn test_insert() {\n        for log_size in 1..=4 {\n            let size = 1 << log_size;\n            for index in 0..size {\n                // Initialize rng in loop to simplify debugging with consistent elements.\n                let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n                let mut cs = TestConstraintSystem::new();\n\n                let elements: Vec<_> = (0..size - 1)\n                    .map(|i| {\n                        AllocatedNum::<Bls12>::alloc(\n                            &mut cs.namespace(|| format!(\"element {}\", i)),\n                            || {\n                                let elt = <Fr as Field>::random(rng);\n                                Ok(elt)\n                            },\n                        )\n                        .unwrap()\n                    })\n                    .collect();\n\n                let to_insert =\n                    AllocatedNum::<Bls12>::alloc(&mut cs.namespace(|| \"insert\"), || {\n                        let elt_to_insert = <Fr as Field>::random(rng);\n                        Ok(elt_to_insert)\n                    })\n                    .unwrap();\n\n                let index_bits = (0..log_size)\n                    .map(|i| {\n                        <Boolean as From<AllocatedBit>>::from(\n                            AllocatedBit::alloc(cs.namespace(|| format!(\"index bit {}\", i)), {\n                                let bit = ((index >> i) & 1) == 1;\n                                Some(bit)\n                            })\n                            .unwrap(),\n                        )\n                    })\n                    .collect::<Vec<_>>();\n\n                let test_constraints = cs.num_constraints();\n                assert_eq!(log_size, test_constraints);\n\n                let mut inserted = insert(\n                    &mut cs,\n                    &to_insert.clone(),\n                    index_bits.as_slice(),\n                    &elements.as_slice(),\n                )\n                .unwrap();\n\n                assert!(cs.is_satisfied());\n\n                let extracted = inserted.remove(index);\n                assert_eq!(to_insert.get_value(), extracted.get_value(),);\n\n                for i in 0..size - 1 {\n                    let a = elements[i].get_value();\n                    let b = inserted[i].get_value();\n                    assert_eq!(a, b)\n                }\n\n                // One selection for each element of the result.\n                let expected_constraints = match size {\n                    8 => 22, // unoptimized, would be 56\n                    4 => 8,  // unoptimized, would be 12\n                    _ => size * (size - 1),\n                };\n\n                let actual_constraints = cs.num_constraints() - test_constraints;\n                assert_eq!(expected_constraints, actual_constraints);\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/gadgets/mod.rs",
    "content": "pub mod constraint;\npub mod encode;\npub mod insertion;\npub mod por;\npub mod uint64;\npub mod variables;\npub mod xor;\n"
  },
  {
    "path": "storage-proofs-core/src/gadgets/multipack.rs",
    "content": "use bellperson::gadgets::{\n    boolean::Boolean,\n    num::{AllocatedNum, Num},\n    ConstraintSystem, SynthesisError,\n};\nuse ff::{Field, PrimeField, ScalarEngine};\n\n/// Takes a sequence of booleans and exposes them as a single compact Num.\npub fn pack_bits<E, CS>(mut cs: CS, bits: &[Boolean]) -> Result<AllocatedNum<E>, SynthesisError>\nwhere\n    E: ScalarEngine,\n    CS: ConstraintSystem<E>,\n{\n    let mut num = Num::<E>::zero();\n    let mut coeff = E::Fr::one();\n    for bit in bits.iter().take(E::Fr::CAPACITY as usize) {\n        num = num.add_bool_with_coeff(CS::one(), &bit, coeff);\n\n        coeff.double();\n    }\n\n    let alloc_num = AllocatedNum::alloc(cs.namespace(|| \"input\"), || {\n        num.get_value()\n            .ok_or_else(|| SynthesisError::AssignmentMissing)\n    })?;\n\n    // num * 1 = input\n    cs.enforce(\n        || \"packing constraint\",\n        |_| num.lc(E::Fr::one()),\n        |lc| lc + CS::one(),\n        |lc| lc + alloc_num.get_variable(),\n    );\n\n    Ok(alloc_num)\n}\n"
  },
  {
    "path": "storage-proofs-core/src/gadgets/por.rs",
    "content": "use std::convert::TryFrom;\nuse std::marker::PhantomData;\n\nuse anyhow::ensure;\nuse bellperson::{\n    bls::{Bls12, Fr, FrRepr},\n    gadgets::{\n        boolean::{AllocatedBit, Boolean},\n        multipack,\n        num::AllocatedNum,\n    },\n    Circuit, ConstraintSystem, SynthesisError,\n};\nuse ff::PrimeField;\nuse filecoin_hashers::{HashFunction, Hasher, PoseidonArity};\nuse generic_array::typenum::Unsigned;\n\nuse crate::{\n    compound_proof::{CircuitComponent, CompoundProof},\n    error::Result,\n    gadgets::{constraint, insertion::insert, variables::Root},\n    merkle::{base_path_length, MerkleProofTrait, MerkleTreeTrait},\n    parameter_cache::{CacheableParameters, ParameterSetMetadata},\n    por::PoR,\n    proof::ProofScheme,\n};\n\n/// Proof of retrievability.\n///\n/// # Fields\n///\n/// * `params` - The params for the bls curve.\n/// * `value` - The value of the leaf.\n/// * `auth_path` - The authentication path of the leaf in the tree.\n/// * `root` - The merkle root of the tree.\n///\npub struct PoRCircuit<Tree: MerkleTreeTrait> {\n    value: Root<Bls12>,\n    auth_path: AuthPath<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n    root: Root<Bls12>,\n    private: bool,\n    _tree: PhantomData<Tree>,\n}\n\n#[derive(Debug, Clone)]\npub struct AuthPath<\n    H: Hasher,\n    U: 'static + PoseidonArity,\n    V: 'static + PoseidonArity,\n    W: 'static + PoseidonArity,\n> {\n    base: SubPath<H, U>,\n    sub: SubPath<H, V>,\n    top: SubPath<H, W>,\n}\n\nimpl<\n        H: Hasher,\n        U: 'static + PoseidonArity,\n        V: 'static + PoseidonArity,\n        W: 'static + PoseidonArity,\n    > From<Vec<(Vec<Option<Fr>>, Option<usize>)>> for AuthPath<H, U, V, W>\n{\n    fn from(mut base_opts: Vec<(Vec<Option<Fr>>, Option<usize>)>) -> Self {\n        let has_top = W::to_usize() > 0;\n        let has_sub = V::to_usize() > 0;\n        let len = base_opts.len();\n\n        let x = if has_top {\n            2\n        } else if has_sub {\n            1\n        } else {\n            0\n        };\n        let mut opts = base_opts.split_off(len - x);\n\n        let base = base_opts\n            .into_iter()\n            .map(|(hashes, index)| PathElement {\n                hashes,\n                index,\n                _a: Default::default(),\n                _h: Default::default(),\n            })\n            .collect();\n\n        let top = if has_top {\n            let (hashes, index) = opts.pop().expect(\"pop failure\");\n            vec![PathElement {\n                hashes,\n                index,\n                _a: Default::default(),\n                _h: Default::default(),\n            }]\n        } else {\n            Vec::new()\n        };\n\n        let sub = if has_sub {\n            let (hashes, index) = opts.pop().expect(\"pop failure\");\n            vec![PathElement {\n                hashes,\n                index,\n                _a: Default::default(),\n                _h: Default::default(),\n            }]\n        } else {\n            Vec::new()\n        };\n\n        assert!(opts.is_empty());\n\n        AuthPath {\n            base: SubPath { path: base },\n            sub: SubPath { path: sub },\n            top: SubPath { path: top },\n        }\n    }\n}\n\n#[derive(Debug, Clone)]\nstruct SubPath<H: Hasher, Arity: 'static + PoseidonArity> {\n    path: Vec<PathElement<H, Arity>>,\n}\n\n#[derive(Debug, Clone)]\nstruct PathElement<H: Hasher, Arity: 'static + PoseidonArity> {\n    hashes: Vec<Option<Fr>>,\n    index: Option<usize>,\n    _a: PhantomData<Arity>,\n    _h: PhantomData<H>,\n}\n\nimpl<H: Hasher, Arity: 'static + PoseidonArity> SubPath<H, Arity> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(\n        self,\n        mut cs: CS,\n        mut cur: AllocatedNum<Bls12>,\n    ) -> Result<(AllocatedNum<Bls12>, Vec<Boolean>), SynthesisError> {\n        let arity = Arity::to_usize();\n\n        if arity == 0 {\n            // Nothing to do here.\n            assert!(self.path.is_empty());\n            return Ok((cur, vec![]));\n        }\n\n        assert_eq!(1, arity.count_ones(), \"arity must be a power of two\");\n        let index_bit_count = arity.trailing_zeros() as usize;\n\n        let mut auth_path_bits = Vec::with_capacity(self.path.len());\n\n        for (i, path_element) in self.path.into_iter().enumerate() {\n            let path_hashes = path_element.hashes;\n            let optional_index = path_element.index; // Optional because of Bellman blank-circuit construction mechanics.\n\n            let cs = &mut cs.namespace(|| format!(\"merkle tree hash {}\", i));\n\n            let mut index_bits = Vec::with_capacity(index_bit_count);\n\n            for i in 0..index_bit_count {\n                let bit = AllocatedBit::alloc(cs.namespace(|| format!(\"index bit {}\", i)), {\n                    optional_index.map(|index| ((index >> i) & 1) == 1)\n                })?;\n\n                index_bits.push(Boolean::from(bit));\n            }\n\n            auth_path_bits.extend_from_slice(&index_bits);\n\n            // Witness the authentication path elements adjacent at this depth.\n            let path_hash_nums = path_hashes\n                .iter()\n                .enumerate()\n                .map(|(i, elt)| {\n                    AllocatedNum::alloc(cs.namespace(|| format!(\"path element {}\", i)), || {\n                        elt.ok_or_else(|| SynthesisError::AssignmentMissing)\n                    })\n                })\n                .collect::<Result<Vec<_>, _>>()?;\n\n            let inserted = insert(cs, &cur, &index_bits, &path_hash_nums)?;\n\n            // Compute the new subtree value\n            cur = H::Function::hash_multi_leaf_circuit::<Arity, _>(\n                cs.namespace(|| \"computation of commitment hash\"),\n                &inserted,\n                i,\n            )?;\n        }\n\n        Ok((cur, auth_path_bits))\n    }\n}\n\nimpl<H: Hasher, U: PoseidonArity, V: PoseidonArity, W: PoseidonArity> AuthPath<H, U, V, W> {\n    pub fn blank(leaves: usize) -> Self {\n        let has_sub = V::to_usize() > 0;\n        let has_top = W::to_usize() > 0;\n        let base_elements = base_path_length::<U, V, W>(leaves);\n\n        let base = vec![\n            PathElement::<H, U> {\n                hashes: vec![None; U::to_usize() - 1],\n                index: None,\n                _a: Default::default(),\n                _h: Default::default(),\n            };\n            base_elements\n        ];\n\n        let sub = if has_sub {\n            vec![PathElement::<H, V> {\n                hashes: vec![None; V::to_usize() - 1],\n                index: None,\n                _a: Default::default(),\n                _h: Default::default(),\n            }]\n        } else {\n            Vec::new()\n        };\n\n        let top = if has_top {\n            vec![PathElement::<H, W> {\n                hashes: vec![None; W::to_usize() - 1],\n                index: None,\n                _a: Default::default(),\n                _h: Default::default(),\n            }]\n        } else {\n            Vec::new()\n        };\n\n        AuthPath {\n            base: SubPath { path: base },\n            sub: SubPath { path: sub },\n            top: SubPath { path: top },\n        }\n    }\n}\n\nimpl<Tree: MerkleTreeTrait> CircuitComponent for PoRCircuit<Tree> {\n    type ComponentPrivateInputs = Option<Root<Bls12>>;\n}\n\npub struct PoRCompound<Tree: MerkleTreeTrait> {\n    _tree: PhantomData<Tree>,\n}\n\nfn to_bits(bit_count: u32, n: usize) -> Vec<bool> {\n    (0..bit_count).map(|i| (n >> i) & 1 == 1).collect()\n}\n\npub fn challenge_into_auth_path_bits(challenge: usize, leaves: usize) -> Vec<bool> {\n    assert_eq!(1, leaves.count_ones());\n\n    to_bits(leaves.trailing_zeros(), challenge)\n}\n\nimpl<C: Circuit<Bls12>, P: ParameterSetMetadata, Tree: MerkleTreeTrait> CacheableParameters<C, P>\n    for PoRCompound<Tree>\n{\n    fn cache_prefix() -> String {\n        format!(\"proof-of-retrievability-{}\", Tree::display())\n    }\n}\n\n// can only implment for Bls12 because por is not generic over the engine.\nimpl<'a, Tree: 'static + MerkleTreeTrait> CompoundProof<'a, PoR<Tree>, PoRCircuit<Tree>>\n    for PoRCompound<Tree>\n{\n    fn circuit<'b>(\n        public_inputs: &<PoR<Tree> as ProofScheme<'a>>::PublicInputs,\n        _component_private_inputs: <PoRCircuit<Tree> as CircuitComponent>::ComponentPrivateInputs,\n        proof: &'b <PoR<Tree> as ProofScheme<'a>>::Proof,\n        public_params: &'b <PoR<Tree> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Result<PoRCircuit<Tree>> {\n        let (root, private) = match (*public_inputs).commitment {\n            None => (Root::Val(Some(proof.proof.root().into())), true),\n            Some(commitment) => (Root::Val(Some(commitment.into())), false),\n        };\n\n        ensure!(\n            private == public_params.private,\n            \"Inputs must be consistent with public params\"\n        );\n\n        Ok(PoRCircuit::<Tree> {\n            value: Root::Val(Some(proof.data.into())),\n            auth_path: proof.proof.as_options().into(),\n            root,\n            private,\n            _tree: PhantomData,\n        })\n    }\n\n    fn blank_circuit(\n        public_params: &<PoR<Tree> as ProofScheme<'a>>::PublicParams,\n    ) -> PoRCircuit<Tree> {\n        PoRCircuit::<Tree> {\n            value: Root::Val(None),\n            auth_path: AuthPath::blank(public_params.leaves),\n            root: Root::Val(None),\n            private: public_params.private,\n            _tree: PhantomData,\n        }\n    }\n\n    fn generate_public_inputs(\n        pub_inputs: &<PoR<Tree> as ProofScheme<'a>>::PublicInputs,\n        pub_params: &<PoR<Tree> as ProofScheme<'a>>::PublicParams,\n        _k: Option<usize>,\n    ) -> Result<Vec<Fr>> {\n        ensure!(\n            pub_inputs.challenge < pub_params.leaves,\n            \"Challenge out of range\"\n        );\n        let mut inputs = Vec::new();\n\n        // Inputs are (currently, inefficiently) packed with one `Fr` per challenge.\n        // Boolean/bit auth paths trivially correspond to the challenged node's index within a sector.\n        // Defensively convert the challenge with `try_from` as a reminder that we must not truncate.\n        let input_fr = Fr::from_repr(FrRepr::from(\n            u64::try_from(pub_inputs.challenge).expect(\"challenge type too wide\"),\n        ))?;\n        inputs.push(input_fr);\n\n        if let Some(commitment) = pub_inputs.commitment {\n            ensure!(!pub_params.private, \"Params must be public\");\n            inputs.push(commitment.into());\n        } else {\n            ensure!(pub_params.private, \"Params must be private\");\n        }\n\n        Ok(inputs)\n    }\n}\n\nimpl<'a, Tree: MerkleTreeTrait> Circuit<Bls12> for PoRCircuit<Tree> {\n    /// # Public Inputs\n    ///\n    /// This circuit expects the following public inputs.\n    ///\n    /// * [0] - packed version of the `is_right` components of the auth_path.\n    /// * [1] - the merkle root of the tree.\n    ///\n    /// This circuit derives the following private inputs from its fields:\n    /// * value_num - packed version of `value` as bits. (might be more than one Fr)\n    ///\n    /// Note: All public inputs must be provided as `E::Fr`.\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let value = self.value;\n        let auth_path = self.auth_path;\n        let root = self.root;\n\n        let base_arity = Tree::Arity::to_usize();\n        let sub_arity = Tree::SubTreeArity::to_usize();\n        let top_arity = Tree::TopTreeArity::to_usize();\n\n        // All arities must be powers of two or circuits cannot be generated.\n        assert_eq!(\n            1,\n            base_arity.count_ones(),\n            \"base arity must be power of two\"\n        );\n        if sub_arity > 0 {\n            assert_eq!(\n                1,\n                sub_arity.count_ones(),\n                \"subtree arity must be power of two\"\n            );\n        }\n        if top_arity > 0 {\n            assert_eq!(\n                1,\n                top_arity.count_ones(),\n                \"top tree arity must be power of two\"\n            );\n        }\n\n        {\n            let value_num = value.allocated(cs.namespace(|| \"value\"))?;\n            let cur = value_num;\n\n            // Ascend the merkle tree authentication path\n\n            // base tree\n            let (cur, base_auth_path_bits) =\n                auth_path.base.synthesize(cs.namespace(|| \"base\"), cur)?;\n\n            // sub\n            let (cur, sub_auth_path_bits) =\n                auth_path.sub.synthesize(cs.namespace(|| \"sub\"), cur)?;\n\n            // top\n            let (computed_root, top_auth_path_bits) =\n                auth_path.top.synthesize(cs.namespace(|| \"top\"), cur)?;\n\n            let mut auth_path_bits = Vec::new();\n            auth_path_bits.extend(base_auth_path_bits);\n            auth_path_bits.extend(sub_auth_path_bits);\n            auth_path_bits.extend(top_auth_path_bits);\n\n            multipack::pack_into_inputs(cs.namespace(|| \"path\"), &auth_path_bits)?;\n            {\n                // Validate that the root of the merkle tree that we calculated is the same as the input.\n                let rt = root.allocated(cs.namespace(|| \"root_value\"))?;\n                constraint::equal(cs, || \"enforce root is correct\", &computed_root, &rt);\n\n                if !self.private {\n                    // Expose the root\n                    rt.inputize(cs.namespace(|| \"root\"))?;\n                }\n            }\n\n            Ok(())\n        }\n    }\n}\n\nimpl<'a, Tree: MerkleTreeTrait> PoRCircuit<Tree> {\n    pub fn new(proof: Tree::Proof, private: bool) -> Self {\n        PoRCircuit::<Tree> {\n            value: Root::Val(Some(proof.leaf().into())),\n            auth_path: proof.as_options().into(),\n            root: Root::Val(Some(proof.root().into())),\n            private,\n            _tree: PhantomData,\n        }\n    }\n\n    #[allow(clippy::type_complexity)]\n    pub fn synthesize<CS>(\n        mut cs: CS,\n        value: Root<Bls12>,\n        auth_path: AuthPath<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n        root: Root<Bls12>,\n        private: bool,\n    ) -> Result<(), SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>,\n    {\n        let por = Self {\n            value,\n            auth_path,\n            root,\n            private,\n            _tree: PhantomData,\n        };\n\n        por.synthesize(&mut cs)\n    }\n}\n\n/// Synthesizes a non-compound arity PoR without adding a public input for the challenge (whereas\n/// `PoRCircuit` adds one public input for the challenge). This PoR gadget allows the caller to pack\n/// mulitple PoR challenges into a single public input when the challenge bit length is less than\n/// `Fr::Capacity`.\npub fn por_no_challenge_input<Tree, CS>(\n    cs: &mut CS,\n    // Least significant bit first, most significant bit last.\n    challenge_bits: Vec<AllocatedBit>,\n    leaf: AllocatedNum<Bls12>,\n    path_values: Vec<Vec<AllocatedNum<Bls12>>>,\n    root: AllocatedNum<Bls12>,\n) -> Result<(), SynthesisError>\nwhere\n    Tree: MerkleTreeTrait,\n    CS: ConstraintSystem<Bls12>,\n{\n    let arity = Tree::Arity::to_usize();\n    let arity_bit_len = arity.trailing_zeros() as usize;\n    let challenge_bit_len = challenge_bits.len();\n    let height = path_values.len();\n\n    // Check that all path elements are consistent with the arity.\n    assert!(path_values\n        .iter()\n        .all(|siblings| siblings.len() == arity - 1));\n\n    // Check that the challenge bit length is consistent with the height and arity.\n    assert_eq!(challenge_bit_len, arity_bit_len * height);\n\n    let challenge_bits: Vec<Boolean> = challenge_bits.into_iter().map(Boolean::from).collect();\n\n    // Compute a root from the provided path and check equality with the provided root.\n    let mut cur = leaf;\n    for (height, (siblings, insert_index)) in path_values\n        .iter()\n        .zip(challenge_bits.chunks(arity_bit_len))\n        .enumerate()\n    {\n        let inputs = insert(\n            &mut cs.namespace(|| format!(\"merkle insert, height {}\", height)),\n            &cur,\n            &insert_index,\n            &siblings,\n        )?;\n        cur = <<Tree::Hasher as Hasher>::Function as HashFunction<\n            <Tree::Hasher as Hasher>::Domain,\n        >>::hash_multi_leaf_circuit::<Tree::Arity, _>(\n            cs.namespace(|| format!(\"merkle hash, height {}\", height)),\n            &inputs,\n            height,\n        )?;\n    }\n    let computed_root = cur;\n    constraint::equal(cs, || \"merkle root equality\", &computed_root, &root);\n\n    Ok(())\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use bellperson::util_cs::{metric_cs::MetricCS, test_cs::TestConstraintSystem};\n    use ff::Field;\n    use filecoin_hashers::{\n        blake2s::Blake2sHasher, poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain,\n    };\n    use fr32::{bytes_into_fr, fr_into_bytes};\n    use generic_array::typenum::{U0, U2, U4, U8};\n    use merkletree::store::VecStore;\n    use pretty_assertions::assert_eq;\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n\n    use crate::{\n        compound_proof,\n        merkle::{\n            create_base_merkle_tree, generate_tree, get_base_tree_count, MerkleTreeWrapper, ResTree,\n        },\n        por,\n        proof::NoRequirements,\n        util::data_at_node,\n        TEST_SEED,\n    };\n\n    type TestTree<H, A> = MerkleTreeWrapper<H, VecStore<<H as Hasher>::Domain>, A, U0, U0>;\n\n    type TestTree2<H, A, B> = MerkleTreeWrapper<H, VecStore<<H as Hasher>::Domain>, A, B, U0>;\n\n    type TestTree3<H, A, B, C> = MerkleTreeWrapper<H, VecStore<<H as Hasher>::Domain>, A, B, C>;\n\n    #[test]\n    #[ignore] // Slow test – run only when compiled for release.\n    fn por_test_compound_poseidon_base_8() {\n        por_compound::<TestTree<PoseidonHasher, U8>>();\n    }\n\n    fn por_compound<Tree: 'static + MerkleTreeTrait>() {\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n        let leaves = 64 * get_base_tree_count::<Tree>();\n\n        let data: Vec<u8> = (0..leaves)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n        let tree = create_base_merkle_tree::<Tree>(None, leaves, data.as_slice())\n            .expect(\"create_base_merkle_tree failure\");\n\n        let public_inputs = por::PublicInputs {\n            challenge: 2,\n            commitment: Some(tree.root()),\n        };\n\n        let setup_params = compound_proof::SetupParams {\n            vanilla_params: por::SetupParams {\n                leaves,\n                private: false,\n            },\n            partitions: None,\n            priority: false,\n        };\n        let public_params = PoRCompound::<Tree>::setup(&setup_params).expect(\"setup failed\");\n\n        let private_inputs = por::PrivateInputs::<Tree>::new(\n            bytes_into_fr(\n                data_at_node(data.as_slice(), public_inputs.challenge)\n                    .expect(\"bytes_into_fr failure\"),\n            )\n            .expect(\"failed to create Fr from node data\")\n            .into(),\n            &tree,\n        );\n\n        let gparams = PoRCompound::<Tree>::groth_params(Some(rng), &public_params.vanilla_params)\n            .expect(\"failed to generate groth params\");\n\n        let proof =\n            PoRCompound::<Tree>::prove(&public_params, &public_inputs, &private_inputs, &gparams)\n                .expect(\"failed while proving\");\n\n        let verified =\n            PoRCompound::<Tree>::verify(&public_params, &public_inputs, &proof, &NoRequirements)\n                .expect(\"failed while verifying\");\n        assert!(verified);\n\n        let (circuit, inputs) =\n            PoRCompound::<Tree>::circuit_for_test(&public_params, &public_inputs, &private_inputs)\n                .expect(\"circuit_for_test failure\");\n\n        let mut cs = TestConstraintSystem::new();\n\n        circuit.synthesize(&mut cs).expect(\"failed to synthesize\");\n        assert!(cs.is_satisfied());\n        assert!(cs.verify(&inputs));\n    }\n\n    #[test]\n    fn test_por_circuit_blake2s_base_2() {\n        test_por_circuit::<TestTree<Blake2sHasher, U2>>(3, 129_135);\n    }\n\n    #[test]\n    fn test_por_circuit_sha256_base_2() {\n        test_por_circuit::<TestTree<Sha256Hasher, U2>>(3, 272_295);\n    }\n\n    #[test]\n    fn test_por_circuit_poseidon_base_2() {\n        test_por_circuit::<TestTree<PoseidonHasher, U2>>(3, 1_887);\n    }\n\n    #[test]\n    fn test_por_circuit_blake2s_base_4() {\n        test_por_circuit::<TestTree<Blake2sHasher, U4>>(3, 130_296);\n    }\n\n    #[test]\n    fn test_por_circuit_sha256_base_4() {\n        test_por_circuit::<TestTree<Sha256Hasher, U4>>(3, 216_258);\n    }\n\n    #[test]\n    fn test_por_circuit_poseidon_base_4() {\n        test_por_circuit::<TestTree<PoseidonHasher, U4>>(3, 1_164);\n    }\n\n    #[test]\n    fn test_por_circuit_blake2s_base_8() {\n        test_por_circuit::<TestTree<Blake2sHasher, U8>>(3, 174_503);\n    }\n\n    #[test]\n    fn test_por_circuit_sha256_base_8() {\n        test_por_circuit::<TestTree<Sha256Hasher, U8>>(3, 250_987);\n    }\n\n    #[test]\n    fn test_por_circuit_poseidon_base_8() {\n        test_por_circuit::<TestTree<PoseidonHasher, U8>>(3, 1_063);\n    }\n\n    #[test]\n    fn test_por_circuit_poseidon_sub_8_2() {\n        test_por_circuit::<TestTree2<PoseidonHasher, U8, U2>>(3, 1_377);\n    }\n\n    #[test]\n    fn test_por_circuit_poseidon_top_8_4_2() {\n        test_por_circuit::<TestTree3<PoseidonHasher, U8, U4, U2>>(3, 1_764);\n    }\n\n    #[test]\n    fn test_por_circuit_poseidon_top_8_8() {\n        // This is the shape we want for 32GiB sectors.\n        test_por_circuit::<TestTree2<PoseidonHasher, U8, U8>>(3, 1_593);\n    }\n    #[test]\n    fn test_por_circuit_poseidon_top_8_8_2() {\n        // This is the shape we want for 64GiB secotrs.\n        test_por_circuit::<TestTree3<PoseidonHasher, U8, U8, U2>>(3, 1_907);\n    }\n\n    #[test]\n    fn test_por_circuit_poseidon_top_8_2_4() {\n        // We can handle top-heavy trees with a non-zero subtree arity.\n        // These should never be produced, though.\n        test_por_circuit::<TestTree3<PoseidonHasher, U8, U2, U4>>(3, 1_764);\n    }\n\n    fn test_por_circuit<Tree: 'static + MerkleTreeTrait>(\n        num_inputs: usize,\n        num_constraints: usize,\n    ) {\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n        // Ensure arity will evenly fill tree.\n        let leaves = 64 * get_base_tree_count::<Tree>();\n\n        // -- Basic Setup\n        let (data, tree) = generate_tree::<Tree, _>(rng, leaves, None);\n\n        for i in 0..leaves {\n            //println!(\"challenge: {}, ({})\", i, leaves);\n\n            // -- PoR\n            let pub_params = por::PublicParams {\n                leaves,\n                private: false,\n            };\n            let pub_inputs = por::PublicInputs::<<Tree::Hasher as Hasher>::Domain> {\n                challenge: i,\n                commitment: Some(tree.root()),\n            };\n            let leaf =\n                data_at_node(data.as_slice(), pub_inputs.challenge).expect(\"data_at_node failure\");\n            let leaf_element = <Tree::Hasher as Hasher>::Domain::try_from_bytes(leaf)\n                .expect(\"try_from_bytes failure\");\n            let priv_inputs = por::PrivateInputs::<ResTree<Tree>>::new(leaf_element, &tree);\n            let p = tree.gen_proof(i).expect(\"gen_proof failure\");\n            assert!(p.verify());\n\n            // create a non circuit proof\n            let proof = PoR::<ResTree<Tree>>::prove(&pub_params, &pub_inputs, &priv_inputs)\n                .expect(\"proving failed\");\n\n            // make sure it verifies\n            let is_valid = PoR::<ResTree<Tree>>::verify(&pub_params, &pub_inputs, &proof)\n                .expect(\"verification failed\");\n            assert!(is_valid, \"failed to verify por proof\");\n\n            // -- Circuit\n\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n            let por = PoRCircuit::<ResTree<Tree>> {\n                value: Root::Val(Some(proof.data.into())),\n                auth_path: proof.proof.as_options().into(),\n                root: Root::Val(Some(\n                    pub_inputs\n                        .commitment\n                        .expect(\"pub_inputs.commitment failure\")\n                        .into(),\n                )),\n                private: false,\n                _tree: PhantomData,\n            };\n\n            por.synthesize(&mut cs).expect(\"circuit synthesis failed\");\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n            assert_eq!(cs.num_inputs(), num_inputs, \"wrong number of inputs\");\n            assert_eq!(\n                cs.num_constraints(),\n                num_constraints,\n                \"wrong number of constraints\"\n            );\n\n            let generated_inputs = PoRCompound::<ResTree<Tree>>::generate_public_inputs(\n                &pub_inputs,\n                &pub_params,\n                None,\n            )\n            .expect(\"generate_public_inputs failure\");\n\n            let expected_inputs = cs.get_inputs();\n\n            for ((input, label), generated_input) in\n                expected_inputs.iter().skip(1).zip(generated_inputs.iter())\n            {\n                assert_eq!(input, generated_input, \"{}\", label);\n            }\n\n            assert_eq!(\n                generated_inputs.len(),\n                expected_inputs.len() - 1,\n                \"inputs are not the same length\"\n            );\n\n            assert!(cs.verify(&generated_inputs), \"failed to verify inputs\");\n        }\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_poseidon_base_2() {\n        private_por_test_compound::<TestTree<PoseidonHasher, U2>>();\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_poseidon_base_4() {\n        private_por_test_compound::<TestTree<PoseidonHasher, U4>>();\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_poseidon_sub_8_2() {\n        private_por_test_compound::<TestTree2<PoseidonHasher, U8, U2>>();\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_poseidon_top_8_4_2() {\n        private_por_test_compound::<TestTree3<PoseidonHasher, U8, U4, U2>>();\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_poseidon_top_8_8() {\n        private_por_test_compound::<TestTree2<PoseidonHasher, U8, U8>>();\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_poseidon_top_8_8_2() {\n        private_por_test_compound::<TestTree3<PoseidonHasher, U8, U8, U2>>();\n    }\n\n    #[ignore] // Slow test – run only when compiled for release.\n    #[test]\n    fn test_private_por_compound_poseidon_top_8_2_4() {\n        private_por_test_compound::<TestTree3<PoseidonHasher, U8, U2, U4>>();\n    }\n\n    fn private_por_test_compound<Tree: 'static + MerkleTreeTrait>() {\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n        // Ensure arity will evenly fill tree.\n        let leaves = 64 * get_base_tree_count::<Tree>();\n\n        // -- Basic Setup\n        let (data, tree) = generate_tree::<Tree, _>(rng, leaves, None);\n\n        for i in 0..3 {\n            let public_inputs = por::PublicInputs {\n                challenge: i,\n                commitment: None,\n            };\n\n            let setup_params = compound_proof::SetupParams {\n                vanilla_params: por::SetupParams {\n                    leaves,\n                    private: true,\n                },\n                partitions: None,\n                priority: false,\n            };\n            let public_params =\n                PoRCompound::<ResTree<Tree>>::setup(&setup_params).expect(\"setup failed\");\n\n            let private_inputs = por::PrivateInputs::<ResTree<Tree>>::new(\n                bytes_into_fr(\n                    data_at_node(data.as_slice(), public_inputs.challenge)\n                        .expect(\"data_at_node failure\"),\n                )\n                .expect(\"failed to create Fr from node data\")\n                .into(),\n                &tree,\n            );\n\n            {\n                let (circuit, inputs) =\n                    PoRCompound::circuit_for_test(&public_params, &public_inputs, &private_inputs)\n                        .expect(\"circuit_for_test\");\n\n                let mut cs = TestConstraintSystem::new();\n\n                circuit.synthesize(&mut cs).expect(\"failed to synthesize\");\n\n                if !cs.is_satisfied() {\n                    panic!(\n                        \"failed to satisfy: {:?}\",\n                        cs.which_is_unsatisfied().expect(\"cs is_satisfied failure\")\n                    );\n                }\n                assert!(\n                    cs.verify(&inputs),\n                    \"verification failed with TestContraintSystem and generated inputs\"\n                );\n            }\n            // NOTE: This diagnostic code currently fails, even though the proof generated from the blank circuit verifies.\n            // Use this to debug differences between blank and regular circuit generation.\n            {\n                let (circuit1, _inputs) =\n                    PoRCompound::circuit_for_test(&public_params, &public_inputs, &private_inputs)\n                        .expect(\"circuit_for_test failure\");\n                let blank_circuit =\n                    PoRCompound::<ResTree<Tree>>::blank_circuit(&public_params.vanilla_params);\n\n                let mut cs_blank = MetricCS::new();\n                blank_circuit\n                    .synthesize(&mut cs_blank)\n                    .expect(\"failed to synthesize\");\n\n                let a = cs_blank.pretty_print_list();\n\n                let mut cs1 = TestConstraintSystem::new();\n                circuit1.synthesize(&mut cs1).expect(\"failed to synthesize\");\n                let b = cs1.pretty_print_list();\n\n                for (i, (a, b)) in a.chunks(100).zip(b.chunks(100)).enumerate() {\n                    assert_eq!(a, b, \"failed at chunk {}\", i);\n                }\n            }\n\n            let blank_groth_params = PoRCompound::<ResTree<Tree>>::groth_params(\n                Some(rng),\n                &public_params.vanilla_params,\n            )\n            .expect(\"failed to generate groth params\");\n\n            let proof = PoRCompound::prove(\n                &public_params,\n                &public_inputs,\n                &private_inputs,\n                &blank_groth_params,\n            )\n            .expect(\"failed while proving\");\n\n            let verified =\n                PoRCompound::verify(&public_params, &public_inputs, &proof, &NoRequirements)\n                    .expect(\"failed while verifying\");\n\n            assert!(verified);\n        }\n    }\n\n    #[test]\n    fn test_private_por_input_circuit_poseidon_binary() {\n        test_private_por_input_circuit::<TestTree<PoseidonHasher, U2>>(1_886);\n    }\n\n    #[test]\n    fn test_private_por_input_circuit_poseidon_quad() {\n        test_private_por_input_circuit::<TestTree<PoseidonHasher, U4>>(1_163);\n    }\n\n    #[test]\n    fn test_private_por_input_circuit_poseidon_oct() {\n        test_private_por_input_circuit::<TestTree<PoseidonHasher, U8>>(1_062);\n    }\n\n    fn test_private_por_input_circuit<Tree: MerkleTreeTrait>(num_constraints: usize) {\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n        let leaves = 64 * get_base_tree_count::<Tree>();\n        for i in 0..leaves {\n            // -- Basic Setup\n\n            let data: Vec<u8> = (0..leaves)\n                .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n                .collect();\n\n            let tree = create_base_merkle_tree::<Tree>(None, leaves, data.as_slice())\n                .expect(\"create_base_merkle_tree failure\");\n\n            // -- PoR\n\n            let pub_params = por::PublicParams {\n                leaves,\n                private: true,\n            };\n            let pub_inputs = por::PublicInputs {\n                challenge: i,\n                commitment: None,\n            };\n\n            let priv_inputs = por::PrivateInputs::<Tree>::new(\n                bytes_into_fr(\n                    data_at_node(data.as_slice(), pub_inputs.challenge)\n                        .expect(\"data_at_node failure\"),\n                )\n                .expect(\"bytes_into_fr failure\")\n                .into(),\n                &tree,\n            );\n\n            // create a non circuit proof\n            let proof =\n                PoR::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs).expect(\"proving failed\");\n\n            // make sure it verifies\n            let is_valid =\n                PoR::<Tree>::verify(&pub_params, &pub_inputs, &proof).expect(\"verification failed\");\n            assert!(is_valid, \"failed to verify por proof\");\n\n            // -- Circuit\n\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let por = PoRCircuit::<Tree> {\n                value: Root::Val(Some(proof.data.into())),\n                auth_path: proof.proof.as_options().into(),\n                root: Root::Val(Some(tree.root().into())),\n                private: true,\n                _tree: PhantomData,\n            };\n\n            por.synthesize(&mut cs).expect(\"circuit synthesis failed\");\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n            assert_eq!(cs.num_inputs(), 2, \"wrong number of inputs\");\n            assert_eq!(\n                cs.num_constraints(),\n                num_constraints,\n                \"wrong number of constraints\"\n            );\n\n            let auth_path_bits =\n                challenge_into_auth_path_bits(pub_inputs.challenge, pub_params.leaves);\n            let packed_auth_path = multipack::compute_multipacking::<Bls12>(&auth_path_bits);\n\n            let mut expected_inputs = Vec::new();\n            expected_inputs.extend(packed_auth_path);\n\n            assert_eq!(cs.get_input(0, \"ONE\"), Fr::one(), \"wrong input 0\");\n\n            assert_eq!(\n                cs.get_input(1, \"path/input 0\"),\n                expected_inputs[0],\n                \"wrong packed_auth_path\"\n            );\n\n            assert!(cs.is_satisfied(), \"constraints are not all satisfied\");\n            assert!(cs.verify(&expected_inputs), \"failed to verify inputs\");\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/gadgets/uint64.rs",
    "content": "use bellperson::{\n    bls::Engine,\n    gadgets::{\n        boolean::{AllocatedBit, Boolean},\n        multipack::pack_into_inputs,\n    },\n    ConstraintSystem, SynthesisError,\n};\n\n/// Represents an interpretation of 64 `Boolean` objects as an unsigned integer.\n#[derive(Clone)]\npub struct UInt64 {\n    // Least significant bit first\n    bits: Vec<Boolean>,\n    value: Option<u64>,\n}\n\nimpl UInt64 {\n    /// Construct a constant `UInt64` from a `u64`\n    pub fn constant(value: u64) -> Self {\n        let mut bits = Vec::with_capacity(64);\n\n        let mut tmp = value;\n        for _ in 0..64 {\n            if tmp & 1 == 1 {\n                bits.push(Boolean::constant(true))\n            } else {\n                bits.push(Boolean::constant(false))\n            }\n\n            tmp >>= 1;\n        }\n\n        UInt64 {\n            bits,\n            value: Some(value),\n        }\n    }\n\n    pub fn get_value(&self) -> Option<u64> {\n        self.value\n    }\n\n    pub fn pack_into_input<E, CS>(&self, cs: CS) -> Result<(), SynthesisError>\n    where\n        E: Engine,\n        CS: ConstraintSystem<E>,\n    {\n        pack_into_inputs(cs, &self.bits)\n    }\n\n    /// Allocate a `UInt64` in the constraint system\n    pub fn alloc<E, CS>(mut cs: CS, value: Option<u64>) -> Result<Self, SynthesisError>\n    where\n        E: Engine,\n        CS: ConstraintSystem<E>,\n    {\n        let values = match value {\n            Some(mut val) => {\n                let mut v = Vec::with_capacity(64);\n\n                for _ in 0..64 {\n                    v.push(Some(val & 1 == 1));\n                    val >>= 1;\n                }\n\n                v\n            }\n            None => vec![None; 64],\n        };\n\n        let bits = values\n            .into_iter()\n            .enumerate()\n            .map(|(i, v)| {\n                Ok(Boolean::from(AllocatedBit::alloc(\n                    cs.namespace(|| format!(\"allocated bit {}\", i)),\n                    v,\n                )?))\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n        Ok(UInt64 { bits, value })\n    }\n\n    pub fn to_bits_be(&self) -> Vec<Boolean> {\n        self.bits.iter().rev().cloned().collect()\n    }\n\n    pub fn from_bits_be(bits: &[Boolean]) -> Self {\n        assert_eq!(bits.len(), 64);\n\n        let mut value = Some(0u64);\n        for b in bits {\n            if let Some(v) = value.as_mut() {\n                *v <<= 1;\n            }\n\n            match b.get_value() {\n                Some(true) => {\n                    if let Some(v) = value.as_mut() {\n                        *v |= 1;\n                    }\n                }\n                Some(false) => {}\n                None => {\n                    value = None;\n                }\n            }\n        }\n\n        UInt64 {\n            value,\n            bits: bits.iter().rev().cloned().collect(),\n        }\n    }\n\n    /// Turns this `UInt64` into its little-endian byte order representation.\n    pub fn to_bits_le(&self) -> Vec<Boolean> {\n        self.bits.clone()\n    }\n\n    /// Converts a little-endian byte order representation of bits into a\n    /// `UInt64`.\n    pub fn from_bits(bits: &[Boolean]) -> Self {\n        assert_eq!(bits.len(), 64);\n\n        let new_bits = bits.to_vec();\n\n        let mut value = Some(0u64);\n        for b in new_bits.iter().rev() {\n            if let Some(v) = value.as_mut() {\n                *v <<= 1;\n            }\n\n            match *b {\n                Boolean::Constant(b) => {\n                    if b {\n                        if let Some(v) = value.as_mut() {\n                            *v |= 1\n                        }\n                    }\n                }\n                Boolean::Is(ref b) => match b.get_value() {\n                    Some(true) => {\n                        if let Some(v) = value.as_mut() {\n                            *v |= 1;\n                        }\n                    }\n                    Some(false) => {}\n                    None => value = None,\n                },\n                Boolean::Not(ref b) => match b.get_value() {\n                    Some(false) => {\n                        if let Some(v) = value.as_mut() {\n                            *v |= 1;\n                        }\n                    }\n                    Some(true) => {}\n                    None => value = None,\n                },\n            }\n        }\n\n        UInt64 {\n            value,\n            bits: new_bits,\n        }\n    }\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    use crate::TEST_SEED;\n\n    #[test]\n    fn test_uint64_from_bits_be() {\n        let mut rng = XorShiftRng::from_seed(TEST_SEED);\n\n        for _ in 0..1000 {\n            let v = (0..64)\n                .map(|_| Boolean::constant(rng.gen()))\n                .collect::<Vec<_>>();\n\n            let b = UInt64::from_bits_be(&v);\n\n            for (i, bit) in b.bits.iter().enumerate() {\n                match *bit {\n                    Boolean::Constant(bit) => {\n                        assert!(bit == ((b.value.unwrap() >> i) & 1 == 1));\n                    }\n                    _ => unreachable!(),\n                }\n            }\n\n            let expected_to_be_same = b.to_bits_be();\n\n            for x in v.iter().zip(expected_to_be_same.iter()) {\n                match x {\n                    (&Boolean::Constant(true), &Boolean::Constant(true)) => {}\n                    (&Boolean::Constant(false), &Boolean::Constant(false)) => {}\n                    _ => unreachable!(),\n                }\n            }\n        }\n    }\n\n    #[test]\n    fn test_uint64_from_bits() {\n        let mut rng = XorShiftRng::from_seed(TEST_SEED);\n\n        for _ in 0..1000 {\n            let v = (0..64)\n                .map(|_| Boolean::constant(rng.gen()))\n                .collect::<Vec<_>>();\n\n            let b = UInt64::from_bits(&v);\n\n            for (i, bit) in b.bits.iter().enumerate() {\n                match *bit {\n                    Boolean::Constant(bit) => {\n                        assert!(bit == ((b.value.unwrap() >> i) & 1 == 1));\n                    }\n                    _ => unreachable!(),\n                }\n            }\n\n            let expected_to_be_same = b.to_bits_le();\n\n            for x in v.iter().zip(expected_to_be_same.iter()) {\n                match x {\n                    (&Boolean::Constant(true), &Boolean::Constant(true)) => {}\n                    (&Boolean::Constant(false), &Boolean::Constant(false)) => {}\n                    _ => unreachable!(),\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/gadgets/variables.rs",
    "content": "use std::fmt::{self, Debug, Formatter};\n\nuse bellperson::{bls::Engine, gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError};\n\nuse crate::error::Result;\n\n/// Root represents a root commitment which may be either a raw value or an already-allocated number.\n/// This allows subcomponents to depend on roots which may optionally be shared with their parent\n/// or sibling components.\n#[derive(Clone)]\npub enum Root<E: Engine> {\n    Var(AllocatedNum<E>),\n    Val(Option<E::Fr>),\n}\n\nimpl<E: Engine> Debug for Root<E> {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        match self {\n            Root::Var(num) => write!(f, \"Root::Var({:?})\", num.get_value()),\n            Root::Val(val) => write!(f, \"Root::Val({:?})\", val),\n        }\n    }\n}\n\nimpl<E: Engine> Root<E> {\n    pub fn from_allocated<CS: ConstraintSystem<E>>(allocated: AllocatedNum<E>) -> Self {\n        Root::Var(allocated)\n    }\n\n    pub fn allocated<CS: ConstraintSystem<E>>(\n        &self,\n        cs: CS,\n    ) -> Result<AllocatedNum<E>, SynthesisError> {\n        match self {\n            Root::Var(allocated) => Ok(allocated.clone()),\n            Root::Val(fr) => {\n                AllocatedNum::alloc(cs, || fr.ok_or_else(|| SynthesisError::AssignmentMissing))\n            }\n        }\n    }\n\n    pub fn var<CS: ConstraintSystem<E>>(cs: CS, fr: E::Fr) -> Result<Self> {\n        Ok(Root::Var(AllocatedNum::alloc(cs, || Ok(fr))?))\n    }\n\n    pub fn is_some(&self) -> bool {\n        match self {\n            Root::Var(_) => true,\n            Root::Val(Some(_)) => true,\n            Root::Val(None) => false,\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/gadgets/xor.rs",
    "content": "use bellperson::{bls::Engine, gadgets::boolean::Boolean, ConstraintSystem, SynthesisError};\n\npub fn xor<E, CS>(\n    cs: &mut CS,\n    key: &[Boolean],\n    input: &[Boolean],\n) -> Result<Vec<Boolean>, SynthesisError>\nwhere\n    E: Engine,\n    CS: ConstraintSystem<E>,\n{\n    let key_len = key.len();\n    assert_eq!(key_len, 32 * 8);\n\n    input\n        .iter()\n        .enumerate()\n        .map(|(i, byte)| {\n            Boolean::xor(\n                cs.namespace(|| format!(\"xor bit: {}\", i)),\n                byte,\n                &key[i % key_len],\n            )\n        })\n        .collect::<Result<Vec<_>, SynthesisError>>()\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use bellperson::{bls::Bls12, util_cs::test_cs::TestConstraintSystem};\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    use crate::{\n        crypto::xor,\n        util::{bits_to_bytes, bytes_into_boolean_vec},\n        TEST_SEED,\n    };\n\n    #[test]\n    fn test_xor_input_circuit() {\n        let mut rng = XorShiftRng::from_seed(TEST_SEED);\n\n        for i in 0..10 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let key: Vec<u8> = (0..32).map(|_| rng.gen()).collect();\n            let data: Vec<u8> = (0..(i + 1) * 32).map(|_| rng.gen()).collect();\n\n            let key_bits: Vec<Boolean> = {\n                let mut cs = cs.namespace(|| \"key\");\n                bytes_into_boolean_vec(&mut cs, Some(key.as_slice()), key.len()).unwrap()\n            };\n\n            let data_bits: Vec<Boolean> = {\n                let mut cs = cs.namespace(|| \"data bits\");\n                bytes_into_boolean_vec(&mut cs, Some(data.as_slice()), data.len()).unwrap()\n            };\n\n            let out_bits =\n                xor(&mut cs, key_bits.as_slice(), data_bits.as_slice()).expect(\"xor failed\");\n\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n            assert_eq!(out_bits.len(), data_bits.len(), \"invalid output length\");\n\n            // convert Vec<Boolean> to Vec<u8>\n            let actual = bits_to_bytes(\n                out_bits\n                    .iter()\n                    .map(|v| v.get_value().unwrap())\n                    .collect::<Vec<bool>>()\n                    .as_slice(),\n            );\n\n            let expected = xor::encode(key.as_slice(), data.as_slice()).unwrap();\n\n            assert_eq!(expected, actual, \"circuit and non circuit do not match\");\n\n            // -- roundtrip\n            let roundtrip_bits = {\n                let mut cs = cs.namespace(|| \"roundtrip\");\n                xor(&mut cs, key_bits.as_slice(), out_bits.as_slice()).expect(\"xor faield\")\n            };\n\n            let roundtrip = bits_to_bytes(\n                roundtrip_bits\n                    .iter()\n                    .map(|v| v.get_value().unwrap())\n                    .collect::<Vec<bool>>()\n                    .as_slice(),\n            );\n\n            assert_eq!(data, roundtrip, \"failed to roundtrip\");\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/lib.rs",
    "content": "#![deny(clippy::all, clippy::perf, clippy::correctness, rust_2018_idioms)]\n#![allow(clippy::many_single_char_names)]\n#![allow(clippy::unreadable_literal)]\n#![allow(clippy::type_repetition_in_bounds)]\n#![allow(clippy::upper_case_acronyms)]\n#![warn(clippy::unwrap_used)]\n#![warn(clippy::unnecessary_wraps)]\n#![warn(clippy::ptr_arg)]\n#![warn(clippy::unnecessary_lazy_evaluations)]\n#![warn(clippy::redundant_slicing)]\n\nuse std::convert::TryInto;\n\npub mod api_version;\npub mod cache_key;\npub mod compound_proof;\npub mod crypto;\npub mod data;\npub mod drgraph;\npub mod error;\npub mod gadgets;\npub mod measurements;\npub mod merkle;\npub mod multi_proof;\npub mod parameter_cache;\npub mod partitions;\npub mod pieces;\npub mod por;\npub mod proof;\npub mod sector;\npub mod settings;\npub mod test_helper;\npub mod util;\n\npub use data::Data;\n\npub const TEST_SEED: [u8; 16] = [\n    0x59, 0x62, 0xbe, 0x5d, 0x76, 0x3d, 0x31, 0x8d, 0x17, 0xdb, 0x37, 0x32, 0x54, 0x06, 0xbc, 0xe5,\n];\n\npub const MAX_LEGACY_POREP_REGISTERED_PROOF_ID: u64 = 4;\n\npub type PoRepID = [u8; 32];\n\npub fn is_legacy_porep_id(porep_id: PoRepID) -> bool {\n    // NOTE: Because we take only the first 8 bytes, we are actually examining the registered proof type id,\n    // not the porep_id. The latter requires the full 32 bytes and includes the nonce.\n    // We are, to some extent depending explictly on the strucuture of the `porep_id`.\n    // Of course, it happens to be the case that only the 'legacy' ids in question can ever satisfy\n    // this predicate, so the distinction is somewhat moot. However, for the sake of clarity in any future\n    // use of `porep_id`, we should pay close attention to this.\n    let id = u64::from_le_bytes(\n        porep_id[..8]\n            .try_into()\n            .expect(\"8 bytes is always a valid u64\"),\n    );\n    id <= MAX_LEGACY_POREP_REGISTERED_PROOF_ID\n}\n"
  },
  {
    "path": "storage-proofs-core/src/measurements.rs",
    "content": "#[cfg(feature = \"measurements\")]\nuse std::sync::{\n    mpsc::{channel, Receiver, Sender},\n    Mutex,\n};\nuse std::time::Duration;\n\n#[cfg(feature = \"measurements\")]\nuse lazy_static::lazy_static;\nuse serde::Serialize;\n\n#[cfg(feature = \"measurements\")]\nlazy_static! {\n    pub static ref OP_MEASUREMENTS: (\n        Mutex<Option<Sender<OpMeasurement>>>,\n        Mutex<Receiver<OpMeasurement>>\n    ) = {\n        // create asynchronous channel with unlimited buffer\n        let (tx, rx) = channel();\n        (Mutex::new(Some(tx)), Mutex::new(rx))\n    };\n}\n\n#[derive(Debug, Serialize)]\n#[serde(rename_all = \"kebab-case\")]\npub struct OpMeasurement {\n    pub op: Operation,\n    pub cpu_time: Duration,\n    pub wall_time: Duration,\n}\n\n#[derive(Debug, Serialize)]\n#[serde(rename_all = \"kebab-case\")]\npub enum Operation {\n    AddPiece,\n    GeneratePieceCommitment,\n    GenerateTreeC,\n    GenerateTreeRLast,\n    CommD,\n    EncodeWindowTimeAll,\n    WindowCommLeavesTime,\n    PorepCommitTime,\n    PostInclusionProofs,\n    PostFinalizeTicket,\n    PostReadChallengedRange,\n    PostPartialTicketHash,\n}\n\n#[cfg(feature = \"measurements\")]\npub fn measure_op<T, F>(op: Operation, f: F) -> T\nwhere\n    F: FnOnce() -> T,\n{\n    use std::time::Instant;\n\n    let cpu_time_start = cpu_time::ProcessTime::now();\n    let wall_start_time = Instant::now();\n\n    #[cfg(feature = \"profile\")]\n    gperftools::profiler::PROFILER\n        .lock()\n        .unwrap()\n        .start(format!(\"./{:?}.profile\", op))\n        .unwrap();\n    let x = f();\n    #[cfg(feature = \"profile\")]\n    gperftools::profiler::PROFILER\n        .lock()\n        .unwrap()\n        .stop()\n        .unwrap();\n\n    let opt_tx = OP_MEASUREMENTS\n        .0\n        .lock()\n        .expect(\"acquire lock on tx side of perf channel\");\n\n    if let Some(tx) = opt_tx.as_ref() {\n        tx.clone()\n            .send(OpMeasurement {\n                op,\n                cpu_time: cpu_time_start.elapsed(),\n                wall_time: wall_start_time.elapsed(),\n            })\n            .expect(\"failed to send to perf channel\");\n    }\n\n    x\n}\n\n#[cfg(not(feature = \"measurements\"))]\npub fn measure_op<T, F>(_: Operation, f: F) -> T\nwhere\n    F: FnOnce() -> T,\n{\n    f()\n}\n"
  },
  {
    "path": "storage-proofs-core/src/merkle/builders.rs",
    "content": "use std::any::Any;\nuse std::fs::File;\nuse std::io::Write;\nuse std::mem::size_of;\nuse std::path::PathBuf;\n\nuse anyhow::ensure;\nuse filecoin_hashers::{Domain, Hasher, PoseidonArity};\nuse generic_array::typenum::{Unsigned, U0};\nuse log::trace;\nuse merkletree::{\n    merkle::{\n        get_merkle_tree_leafs, is_merkle_tree_size_valid, FromIndexedParallelIterator, MerkleTree,\n    },\n    store::{DiskStore, ExternalReader, LevelCacheStore, ReplicaConfig, Store, StoreConfig},\n};\nuse rand::Rng;\nuse rayon::prelude::{IntoParallelIterator, ParallelIterator};\n\nuse crate::{\n    error::{Error, Result},\n    merkle::{DiskTree, LCMerkleTree, LCStore, LCTree, MerkleTreeTrait, MerkleTreeWrapper},\n    util::{data_at_node, default_rows_to_discard, NODE_SIZE},\n};\n\n// Create a DiskTree from the provided config(s), each representing a 'base' layer tree with 'base_tree_len' elements.\npub fn create_disk_tree<Tree: MerkleTreeTrait>(\n    base_tree_len: usize,\n    configs: &[StoreConfig],\n) -> Result<DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>> {\n    let base_tree_leafs = get_merkle_tree_leafs(base_tree_len, Tree::Arity::to_usize())?;\n\n    if Tree::TopTreeArity::to_usize() > 0 {\n        ensure!(\n            Tree::SubTreeArity::to_usize() > 0,\n            \"Invalid top arity specified without sub arity\"\n        );\n\n        DiskTree::from_sub_tree_store_configs(base_tree_leafs, configs)\n    } else if Tree::SubTreeArity::to_usize() > 0 {\n        ensure!(\n            !configs.is_empty(),\n            \"Cannot create sub-tree with a single tree config\"\n        );\n\n        DiskTree::from_store_configs(base_tree_leafs, configs)\n    } else {\n        ensure!(configs.len() == 1, \"Invalid tree-shape specified\");\n        let store = DiskStore::new_from_disk(base_tree_len, Tree::Arity::to_usize(), &configs[0])?;\n\n        DiskTree::from_data_store(store, base_tree_leafs)\n    }\n}\n\n// Create an LCTree from the provided config(s) and replica(s), each representing a 'base' layer tree with 'base_tree_len' elements.\npub fn create_lc_tree<Tree: MerkleTreeTrait>(\n    base_tree_len: usize,\n    configs: &[StoreConfig],\n    replica_config: &ReplicaConfig,\n) -> Result<LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>> {\n    let base_tree_leafs = get_merkle_tree_leafs(base_tree_len, Tree::Arity::to_usize())?;\n\n    if Tree::TopTreeArity::to_usize() > 0 {\n        ensure!(\n            Tree::SubTreeArity::to_usize() > 0,\n            \"Invalid top arity specified without sub arity\"\n        );\n\n        LCTree::from_sub_tree_store_configs_and_replica(base_tree_leafs, configs, replica_config)\n    } else if Tree::SubTreeArity::to_usize() > 0 {\n        ensure!(\n            !configs.is_empty(),\n            \"Cannot create sub-tree with a single tree config\"\n        );\n\n        LCTree::from_store_configs_and_replica(base_tree_leafs, configs, replica_config)\n    } else {\n        ensure!(configs.len() == 1, \"Invalid tree-shape specified\");\n        let store = LCStore::new_from_disk_with_reader(\n            base_tree_len,\n            Tree::Arity::to_usize(),\n            &configs[0],\n            ExternalReader::new_from_path(&replica_config.path)?,\n        )?;\n\n        LCTree::from_data_store(store, base_tree_leafs)\n    }\n}\n\n// Given base tree configs and optionally a replica_config, returns\n// either a disktree or an lctree, specified by Tree.\npub fn create_tree<Tree: MerkleTreeTrait>(\n    base_tree_len: usize,\n    configs: &[StoreConfig],\n    replica_config: Option<&ReplicaConfig>,\n) -> Result<\n    MerkleTreeWrapper<\n        <Tree as MerkleTreeTrait>::Hasher,\n        <Tree as MerkleTreeTrait>::Store,\n        <Tree as MerkleTreeTrait>::Arity,\n        <Tree as MerkleTreeTrait>::SubTreeArity,\n        <Tree as MerkleTreeTrait>::TopTreeArity,\n    >,\n>\nwhere\n    Tree::Store: 'static,\n{\n    let base_tree_leafs = get_base_tree_leafs::<Tree>(base_tree_len)?;\n    let mut trees = Vec::with_capacity(configs.len());\n    for i in 0..configs.len() {\n        let mut store = Tree::Store::new_with_config(\n            base_tree_len,\n            Tree::Arity::to_usize(),\n            configs[i].clone(),\n        )?;\n        if let Some(lc_store) = <dyn Any>::downcast_mut::<\n            LevelCacheStore<<Tree::Hasher as Hasher>::Domain, File>,\n        >(&mut store)\n        {\n            ensure!(\n                replica_config.is_some(),\n                \"Cannot create LCTree without replica paths\"\n            );\n            let replica_config = replica_config.expect(\"replica config failure\");\n            lc_store.set_external_reader(ExternalReader::new_from_config(&replica_config, i)?)?;\n        }\n\n        if configs.len() == 1 {\n            return MerkleTreeWrapper::<\n                Tree::Hasher,\n                Tree::Store,\n                Tree::Arity,\n                Tree::SubTreeArity,\n                Tree::TopTreeArity,\n            >::from_data_store(store, base_tree_leafs);\n        } else {\n            trees.push(MerkleTreeWrapper::<\n                Tree::Hasher,\n                Tree::Store,\n                Tree::Arity,\n                U0,\n                U0,\n            >::from_data_store(store, base_tree_leafs)?);\n        }\n    }\n\n    ensure!(\n        Tree::TopTreeArity::to_usize() > 0 || Tree::SubTreeArity::to_usize() > 0,\n        \"Cannot have a sub/top tree without more than 1 config\"\n    );\n    if Tree::TopTreeArity::to_usize() > 0 {\n        ensure!(\n            Tree::SubTreeArity::to_usize() > 0,\n            \"Invalid top arity specified without sub arity\"\n        );\n\n        MerkleTreeWrapper::<\n            Tree::Hasher,\n            Tree::Store,\n            Tree::Arity,\n            Tree::SubTreeArity,\n            Tree::TopTreeArity,\n        >::from_sub_trees_as_trees(trees)\n    } else {\n        ensure!(\n            !configs.is_empty(),\n            \"Cannot create sub-tree with a single tree config\"\n        );\n\n        MerkleTreeWrapper::from_trees(trees)\n    }\n}\n\npub fn create_base_merkle_tree<Tree: MerkleTreeTrait>(\n    config: Option<StoreConfig>,\n    size: usize,\n    data: &[u8],\n) -> Result<Tree> {\n    ensure!(\n        data.len() == (NODE_SIZE * size) as usize,\n        Error::InvalidMerkleTreeArgs(data.len(), NODE_SIZE, size)\n    );\n\n    trace!(\"create_merkle_tree called with size {}\", size);\n    trace!(\n        \"is_merkle_tree_size_valid({}, arity {}) = {}\",\n        size,\n        Tree::Arity::to_usize(),\n        is_merkle_tree_size_valid(size, Tree::Arity::to_usize())\n    );\n    ensure!(\n        is_merkle_tree_size_valid(size, Tree::Arity::to_usize()),\n        \"Invalid merkle tree size given the arity\"\n    );\n\n    let f = |i| {\n        // TODO Replace `expect()` with `context()` (problem is the parallel iterator)\n        let d = data_at_node(&data, i).expect(\"data_at_node math failed\");\n        // TODO/FIXME: This can panic. FOR NOW, let's leave this since we're experimenting with\n        // optimization paths. However, we need to ensure that bad input will not lead to a panic\n        // that isn't caught by the FPS API.\n        // Unfortunately, it's not clear how to perform this error-handling in the parallel\n        // iterator case.\n        <Tree::Hasher as Hasher>::Domain::try_from_bytes(d)\n            .expect(\"failed to convert node data to domain element\")\n    };\n\n    let tree = match config {\n        Some(x) => MerkleTree::<\n            <Tree::Hasher as Hasher>::Domain,\n            <Tree::Hasher as Hasher>::Function,\n            Tree::Store,\n            Tree::Arity,\n            Tree::SubTreeArity,\n            Tree::TopTreeArity,\n        >::from_par_iter_with_config((0..size).into_par_iter().map(f), x),\n        None => MerkleTree::<\n            <Tree::Hasher as Hasher>::Domain,\n            <Tree::Hasher as Hasher>::Function,\n            Tree::Store,\n            Tree::Arity,\n            Tree::SubTreeArity,\n            Tree::TopTreeArity,\n        >::from_par_iter((0..size).into_par_iter().map(f)),\n    }?;\n\n    Ok(Tree::from_merkle(tree))\n}\n\n/// Construct a new level cache merkle tree, given the specified\n/// config.\n///\n/// Note that while we don't need to pass both the data AND the\n/// replica path (since the replica file will contain the same data),\n/// we pass both since we have access from all callers and this avoids\n/// reading that data from the replica_config here.\npub fn create_base_lcmerkle_tree<H: Hasher, BaseTreeArity: 'static + PoseidonArity>(\n    config: StoreConfig,\n    size: usize,\n    data: &[u8],\n    replica_config: &ReplicaConfig,\n) -> Result<LCMerkleTree<H, BaseTreeArity>> {\n    trace!(\"create_base_lcmerkle_tree called with size {}\", size);\n    trace!(\n        \"is_merkle_tree_size_valid({}, arity {}) = {}\",\n        size,\n        BaseTreeArity::to_usize(),\n        is_merkle_tree_size_valid(size, BaseTreeArity::to_usize())\n    );\n    ensure!(\n        is_merkle_tree_size_valid(size, BaseTreeArity::to_usize()),\n        \"Invalid merkle tree size given the arity\"\n    );\n    ensure!(\n        data.len() == size * size_of::<H::Domain>(),\n        \"Invalid data length for merkle tree\"\n    );\n\n    let f = |i| {\n        let d = data_at_node(&data, i)?;\n        H::Domain::try_from_bytes(d)\n    };\n\n    let mut lc_tree: LCMerkleTree<H, BaseTreeArity> =\n        LCMerkleTree::<H, BaseTreeArity>::try_from_iter_with_config((0..size).map(f), config)?;\n\n    lc_tree.set_external_reader_path(&replica_config.path)?;\n\n    Ok(lc_tree)\n}\n\n// Given a StoreConfig, generate additional ones with appended numbers\n// to uniquely identify them and return the results.  If count is 1,\n// the original config is not modified.\npub fn split_config(config: StoreConfig, count: usize) -> Result<Vec<StoreConfig>> {\n    if count == 1 {\n        return Ok(vec![config]);\n    }\n\n    let mut configs = Vec::with_capacity(count);\n    for i in 0..count {\n        configs.push(StoreConfig::from_config(\n            &config,\n            format!(\"{}-{}\", config.id, i),\n            None,\n        ));\n        configs[i].rows_to_discard = config.rows_to_discard;\n    }\n\n    Ok(configs)\n}\n\n// Given a StoreConfig, generate additional ones with appended numbers\n// to uniquely identify them and return the results.  If count is 1,\n// the original config is not modified.\n//\n// Useful for testing, where there the config may be None.\npub fn split_config_wrapped(\n    config: Option<StoreConfig>,\n    count: usize,\n) -> Result<Vec<Option<StoreConfig>>> {\n    if count == 1 {\n        return Ok(vec![config]);\n    }\n\n    match config {\n        Some(c) => {\n            let mut configs = Vec::with_capacity(count);\n            for i in 0..count {\n                configs.push(Some(StoreConfig::from_config(\n                    &c,\n                    format!(\"{}-{}\", c.id, i),\n                    None,\n                )));\n            }\n            Ok(configs)\n        }\n        None => Ok(vec![None]),\n    }\n}\n\n// Given a StoreConfig, replica path and tree_width (leaf nodes),\n// append numbers to each StoreConfig to uniquely identify them and\n// return the results along with a ReplicaConfig using calculated\n// offsets into the single replica path specified for later use with\n// external readers.  If count is 1, the original config is not\n// modified.\npub fn split_config_and_replica(\n    config: StoreConfig,\n    replica_path: PathBuf,\n    sub_tree_width: usize, // nodes, not bytes\n    count: usize,\n) -> Result<(Vec<StoreConfig>, ReplicaConfig)> {\n    if count == 1 {\n        return Ok((\n            vec![config],\n            ReplicaConfig {\n                path: replica_path,\n                offsets: vec![0],\n            },\n        ));\n    }\n\n    let mut configs = Vec::with_capacity(count);\n    let mut replica_offsets = Vec::with_capacity(count);\n\n    for i in 0..count {\n        configs.push(StoreConfig::from_config(\n            &config,\n            format!(\"{}-{}\", config.id, i),\n            None,\n        ));\n        configs[i].rows_to_discard = config.rows_to_discard;\n\n        replica_offsets.push(i * sub_tree_width * NODE_SIZE);\n    }\n\n    Ok((\n        configs,\n        ReplicaConfig {\n            path: replica_path,\n            offsets: replica_offsets,\n        },\n    ))\n}\n\npub fn get_base_tree_count<Tree: MerkleTreeTrait>() -> usize {\n    if Tree::TopTreeArity::to_usize() == 0 && Tree::SubTreeArity::to_usize() == 0 {\n        return 1;\n    }\n\n    if Tree::TopTreeArity::to_usize() > 0 {\n        assert!(Tree::SubTreeArity::to_usize() != 0);\n\n        Tree::TopTreeArity::to_usize() * Tree::SubTreeArity::to_usize()\n    } else {\n        Tree::SubTreeArity::to_usize()\n    }\n}\n\npub fn get_base_tree_leafs<Tree: MerkleTreeTrait>(base_tree_size: usize) -> Result<usize> {\n    get_merkle_tree_leafs(base_tree_size, Tree::Arity::to_usize())\n}\n\npub type ResTree<Tree> = MerkleTreeWrapper<\n    <Tree as MerkleTreeTrait>::Hasher,\n    <Tree as MerkleTreeTrait>::Store,\n    <Tree as MerkleTreeTrait>::Arity,\n    <Tree as MerkleTreeTrait>::SubTreeArity,\n    <Tree as MerkleTreeTrait>::TopTreeArity,\n>;\n\nfn generate_base_tree<R: Rng, Tree: MerkleTreeTrait>(\n    rng: &mut R,\n    nodes: usize,\n    temp_path: Option<PathBuf>,\n) -> (Vec<u8>, ResTree<Tree>)\nwhere\n    Tree::Store: 'static,\n{\n    let elements = (0..nodes)\n        .map(|_| <Tree::Hasher as Hasher>::Domain::random(rng))\n        .collect::<Vec<_>>();\n\n    let mut data = Vec::new();\n    for el in &elements {\n        data.extend_from_slice(AsRef::<[u8]>::as_ref(el));\n    }\n\n    if let Some(ref temp_path) = temp_path {\n        let id: u64 = rng.gen();\n        let replica_path = temp_path.join(format!(\"replica-path-{}\", id));\n        let config = StoreConfig::new(\n            &temp_path,\n            format!(\"test-lc-tree-{}\", id),\n            default_rows_to_discard(nodes, Tree::Arity::to_usize()),\n        );\n\n        let mut tree =\n            MerkleTreeWrapper::try_from_iter_with_config(elements.iter().map(|v| (Ok(*v))), config)\n                .expect(\"try from iter with config failure\");\n\n        // Write out the replica data.\n        let mut f = File::create(&replica_path).expect(\"replica file create failure\");\n        f.write_all(&data).expect(\"replica file write failure\");\n\n        {\n            // Beware: evil dynamic downcasting RUST MAGIC down below.\n            if let Some(lc_tree) = <dyn Any>::downcast_mut::<\n                MerkleTree<\n                    <Tree::Hasher as Hasher>::Domain,\n                    <Tree::Hasher as Hasher>::Function,\n                    LevelCacheStore<<Tree::Hasher as Hasher>::Domain, File>,\n                    Tree::Arity,\n                    Tree::SubTreeArity,\n                    Tree::TopTreeArity,\n                >,\n            >(&mut tree.inner)\n            {\n                lc_tree\n                    .set_external_reader_path(&replica_path)\n                    .expect(\"lc tree set external reader failure\");\n            }\n        }\n\n        (data, tree)\n    } else {\n        (\n            data,\n            MerkleTreeWrapper::try_from_iter(elements.iter().map(|v| Ok(*v)))\n                .expect(\"try from iter map failure\"),\n        )\n    }\n}\n\nfn generate_sub_tree<R: Rng, Tree: MerkleTreeTrait>(\n    rng: &mut R,\n    nodes: usize,\n    temp_path: Option<PathBuf>,\n) -> (Vec<u8>, ResTree<Tree>)\nwhere\n    Tree::Store: 'static,\n{\n    let base_tree_count = Tree::SubTreeArity::to_usize();\n    let base_tree_size = nodes / base_tree_count;\n    let mut trees = Vec::with_capacity(base_tree_count);\n    let mut data = Vec::new();\n\n    for _ in 0..base_tree_count {\n        let (inner_data, tree) = generate_base_tree::<\n            R,\n            MerkleTreeWrapper<Tree::Hasher, Tree::Store, Tree::Arity>,\n        >(rng, base_tree_size, temp_path.clone());\n        trees.push(tree);\n        data.extend(inner_data);\n    }\n\n    (\n        data,\n        MerkleTreeWrapper::from_trees(trees).expect(\"from trees failure\"),\n    )\n}\n\n/// Only used for testing, but can't cfg-test it as that stops exports.\npub fn generate_tree<Tree: MerkleTreeTrait, R: Rng>(\n    rng: &mut R,\n    nodes: usize,\n    temp_path: Option<PathBuf>,\n) -> (Vec<u8>, ResTree<Tree>)\nwhere\n    Tree::Store: 'static,\n{\n    let sub_tree_arity = Tree::SubTreeArity::to_usize();\n    let top_tree_arity = Tree::TopTreeArity::to_usize();\n\n    if top_tree_arity > 0 {\n        assert!(\n            sub_tree_arity != 0,\n            \"malformed tree with TopTreeArity > 0 and SubTreeARity == 0\"\n        );\n\n        let mut sub_trees = Vec::with_capacity(top_tree_arity);\n        let mut data = Vec::new();\n        for _i in 0..top_tree_arity {\n            let (inner_data, tree) = generate_sub_tree::<\n                R,\n                MerkleTreeWrapper<Tree::Hasher, Tree::Store, Tree::Arity, Tree::SubTreeArity, U0>,\n            >(rng, nodes / top_tree_arity, temp_path.clone());\n\n            sub_trees.push(tree);\n            data.extend(inner_data);\n        }\n        (\n            data,\n            MerkleTreeWrapper::from_sub_trees(sub_trees).expect(\"from sub trees failure\"),\n        )\n    } else if sub_tree_arity > 0 {\n        generate_sub_tree::<R, Tree>(rng, nodes, temp_path)\n    } else {\n        generate_base_tree::<R, Tree>(rng, nodes, temp_path)\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/merkle/mod.rs",
    "content": "#![allow(clippy::len_without_is_empty)]\n\nuse std::fs::File;\n\npub use merkletree::store::{DiskStore, ExternalReader, Store};\n\nuse filecoin_hashers::Hasher;\nuse generic_array::typenum::{U0, U2, U4, U8};\nuse merkletree::store::LevelCacheStore;\n\nmod builders;\nmod proof;\nmod tree;\n\npub use builders::*;\npub use proof::*;\npub use tree::*;\n\npub type LCStore<E> = LevelCacheStore<E, File>;\n\npub type MerkleStore<T> = DiskStore<T>;\n\npub type DiskTree<H, U, V, W> = MerkleTreeWrapper<H, DiskStore<<H as Hasher>::Domain>, U, V, W>;\npub type LCTree<H, U, V, W> = MerkleTreeWrapper<H, LCStore<<H as Hasher>::Domain>, U, V, W>;\n\npub type MerkleTree<H, U> = DiskTree<H, U, U0, U0>;\npub type LCMerkleTree<H, U> = LCTree<H, U, U0, U0>;\n\npub type BinaryMerkleTree<H> = MerkleTree<H, U2>;\npub type BinaryLCMerkleTree<H> = LCMerkleTree<H, U2>;\n\npub type BinarySubMerkleTree<H> = DiskTree<H, U2, U2, U0>;\n\npub type QuadMerkleTree<H> = MerkleTree<H, U4>;\npub type QuadLCMerkleTree<H> = LCMerkleTree<H, U4>;\n\npub type OctMerkleTree<H> = DiskTree<H, U8, U0, U0>;\npub type OctSubMerkleTree<H> = DiskTree<H, U8, U2, U0>;\npub type OctTopMerkleTree<H> = DiskTree<H, U8, U8, U2>;\n\npub type OctLCMerkleTree<H> = LCTree<H, U8, U0, U0>;\npub type OctLCSubMerkleTree<H> = LCTree<H, U8, U2, U0>;\npub type OctLCTopMerkleTree<H> = LCTree<H, U8, U8, U2>;\n"
  },
  {
    "path": "storage-proofs-core/src/merkle/proof.rs",
    "content": "#![allow(clippy::len_without_is_empty)]\n\nuse std::fmt::Debug;\nuse std::marker::PhantomData;\nuse std::slice::Iter;\n\nuse anyhow::{ensure, Result};\nuse bellperson::bls::Fr;\nuse filecoin_hashers::{Hasher, PoseidonArity};\nuse generic_array::typenum::{Unsigned, U0};\nuse merkletree::hash::Algorithm;\nuse serde::{de::DeserializeOwned, Deserialize, Serialize};\n\nuse crate::drgraph::graph_height;\n\n/// Trait to abstract over the concept of Merkle Proof.\npub trait MerkleProofTrait: Clone + Serialize + DeserializeOwned + Debug + Sync + Send {\n    type Hasher: Hasher;\n    type Arity: 'static + PoseidonArity;\n    type SubTreeArity: 'static + PoseidonArity;\n    type TopTreeArity: 'static + PoseidonArity;\n\n    /// Try to convert a merkletree proof into this structure.\n    fn try_from_proof(\n        p: merkletree::proof::Proof<<Self::Hasher as Hasher>::Domain, Self::Arity>,\n    ) -> Result<Self>;\n\n    fn as_options(&self) -> Vec<(Vec<Option<Fr>>, Option<usize>)> {\n        self.path()\n            .iter()\n            .map(|v| {\n                (\n                    v.0.iter().copied().map(Into::into).map(Some).collect(),\n                    Some(v.1),\n                )\n            })\n            .collect::<Vec<_>>()\n    }\n\n    fn into_options_with_leaf(self) -> (Option<Fr>, Vec<(Vec<Option<Fr>>, Option<usize>)>) {\n        let leaf = self.leaf();\n        let path = self.path();\n        (\n            Some(leaf.into()),\n            path.into_iter()\n                .map(|(a, b)| {\n                    (\n                        a.iter().copied().map(Into::into).map(Some).collect(),\n                        Some(b),\n                    )\n                })\n                .collect::<Vec<_>>(),\n        )\n    }\n    fn as_pairs(&self) -> Vec<(Vec<Fr>, usize)> {\n        self.path()\n            .iter()\n            .map(|v| (v.0.iter().copied().map(Into::into).collect(), v.1))\n            .collect::<Vec<_>>()\n    }\n    fn verify(&self) -> bool;\n\n    /// Validates the MerkleProof and that it corresponds to the supplied node.\n    ///\n    /// TODO: audit performance and usage in case verification is\n    /// unnecessary based on how it's used.\n    fn validate(&self, node: usize) -> bool {\n        if !self.verify() {\n            return false;\n        }\n\n        node == self.path_index()\n    }\n\n    fn validate_data(&self, data: <Self::Hasher as Hasher>::Domain) -> bool {\n        if !self.verify() {\n            return false;\n        }\n\n        self.leaf() == data\n    }\n\n    fn leaf(&self) -> <Self::Hasher as Hasher>::Domain;\n    fn root(&self) -> <Self::Hasher as Hasher>::Domain;\n    fn len(&self) -> usize;\n    fn path(&self) -> Vec<(Vec<<Self::Hasher as Hasher>::Domain>, usize)>;\n\n    fn path_index(&self) -> usize {\n        self.path()\n            .iter()\n            .rev()\n            .fold(0, |acc, (_, index)| (acc * Self::Arity::to_usize()) + index)\n    }\n\n    fn proves_challenge(&self, challenge: usize) -> bool {\n        self.path_index() == challenge\n    }\n\n    /// Calcluates the exected length of the full path, given the number of leaves in the base layer.\n    fn expected_len(&self, leaves: usize) -> usize {\n        compound_path_length::<Self::Arity, Self::SubTreeArity, Self::TopTreeArity>(leaves)\n    }\n}\n\npub fn base_path_length<A: Unsigned, B: Unsigned, C: Unsigned>(leaves: usize) -> usize {\n    let leaves = if C::to_usize() > 0 {\n        leaves / C::to_usize() / B::to_usize()\n    } else if B::to_usize() > 0 {\n        leaves / B::to_usize()\n    } else {\n        leaves\n    };\n\n    graph_height::<A>(leaves) - 1\n}\n\npub fn compound_path_length<A: Unsigned, B: Unsigned, C: Unsigned>(leaves: usize) -> usize {\n    let mut len = base_path_length::<A, B, C>(leaves);\n    if B::to_usize() > 0 {\n        len += 1;\n    }\n\n    if C::to_usize() > 0 {\n        len += 1;\n    }\n\n    len\n}\npub fn compound_tree_height<A: Unsigned, B: Unsigned, C: Unsigned>(leaves: usize) -> usize {\n    // base layer\n    let a = graph_height::<A>(leaves) - 1;\n\n    // sub tree layer\n    let b = if B::to_usize() > 0 {\n        B::to_usize() - 1\n    } else {\n        0\n    };\n\n    // top tree layer\n    let c = if C::to_usize() > 0 {\n        C::to_usize() - 1\n    } else {\n        0\n    };\n\n    a + b + c\n}\n\nmacro_rules! forward_method {\n    ($caller:expr, $name:ident) => {\n        match $caller {\n            ProofData::Single(ref proof) => proof.$name(),\n            ProofData::Sub(ref proof) => proof.$name(),\n            ProofData::Top(ref proof) => proof.$name(),\n        }\n    };\n    ($caller:expr, $name:ident, $( $args:expr ),+) => {\n        match $caller {\n            ProofData::Single(ref proof) => proof.$name($($args),+),\n            ProofData::Sub(ref proof) => proof.$name($($args),+),\n            ProofData::Top(ref proof) => proof.$name($($args),+),\n        }\n    };\n}\n\n#[derive(Debug, Default, Clone, Serialize, Deserialize)]\npub struct InclusionPath<H: Hasher, Arity: PoseidonArity> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    path: Vec<PathElement<H, Arity>>,\n}\n\nimpl<H: Hasher, Arity: PoseidonArity> From<Vec<PathElement<H, Arity>>> for InclusionPath<H, Arity> {\n    fn from(path: Vec<PathElement<H, Arity>>) -> Self {\n        Self { path }\n    }\n}\n\nimpl<H: Hasher, Arity: PoseidonArity> InclusionPath<H, Arity> {\n    /// Calculate the root of this path, given the leaf as input.\n    pub fn root(&self, leaf: H::Domain) -> H::Domain {\n        let mut a = H::Function::default();\n        (0..self.path.len()).fold(leaf, |h, height| {\n            a.reset();\n\n            let index = self.path[height].index;\n            let mut nodes = self.path[height].hashes.clone();\n            nodes.insert(index, h);\n\n            a.multi_node(&nodes, height)\n        })\n    }\n\n    pub fn len(&self) -> usize {\n        self.path.len()\n    }\n\n    pub fn is_empty(&self) -> bool {\n        self.path.is_empty()\n    }\n\n    pub fn iter(&self) -> Iter<'_, PathElement<H, Arity>> {\n        self.path.iter()\n    }\n\n    pub fn path_index(&self) -> usize {\n        self.path\n            .iter()\n            .rev()\n            .fold(0, |acc, p| (acc * Arity::to_usize()) + p.index)\n    }\n}\n\n#[derive(Debug, Default, Clone, Serialize, Deserialize)]\npub struct PathElement<H: Hasher, Arity: PoseidonArity> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    hashes: Vec<H::Domain>,\n    index: usize,\n    #[serde(skip)]\n    _arity: PhantomData<Arity>,\n}\n\n/// Representation of a merkle proof.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct MerkleProof<\n    H: Hasher,\n    BaseArity: PoseidonArity,\n    SubTreeArity: PoseidonArity = U0,\n    TopTreeArity: PoseidonArity = U0,\n> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    data: ProofData<H, BaseArity, SubTreeArity, TopTreeArity>,\n}\n\nimpl<\n        H: Hasher,\n        Arity: 'static + PoseidonArity,\n        SubTreeArity: 'static + PoseidonArity,\n        TopTreeArity: 'static + PoseidonArity,\n    > MerkleProofTrait for MerkleProof<H, Arity, SubTreeArity, TopTreeArity>\n{\n    type Hasher = H;\n    type Arity = Arity;\n    type SubTreeArity = SubTreeArity;\n    type TopTreeArity = TopTreeArity;\n\n    fn try_from_proof(\n        p: merkletree::proof::Proof<<Self::Hasher as Hasher>::Domain, Self::Arity>,\n    ) -> Result<Self> {\n        if p.top_layer_nodes() > 0 {\n            Ok(MerkleProof {\n                data: ProofData::Top(TopProof::try_from_proof(p)?),\n            })\n        } else if p.sub_layer_nodes() > 0 {\n            Ok(MerkleProof {\n                data: ProofData::Sub(SubProof::try_from_proof(p)?),\n            })\n        } else {\n            Ok(MerkleProof {\n                data: ProofData::Single(SingleProof::try_from_proof(p)?),\n            })\n        }\n    }\n\n    fn verify(&self) -> bool {\n        forward_method!(self.data, verify)\n    }\n\n    fn leaf(&self) -> H::Domain {\n        forward_method!(self.data, leaf)\n    }\n\n    fn root(&self) -> H::Domain {\n        forward_method!(self.data, root)\n    }\n\n    fn len(&self) -> usize {\n        forward_method!(self.data, len)\n    }\n\n    fn path(&self) -> Vec<(Vec<H::Domain>, usize)> {\n        forward_method!(self.data, path)\n    }\n    fn path_index(&self) -> usize {\n        forward_method!(self.data, path_index)\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\nenum ProofData<\n    H: Hasher,\n    BaseArity: PoseidonArity,\n    SubTreeArity: PoseidonArity,\n    TopTreeArity: PoseidonArity,\n> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    Single(SingleProof<H, BaseArity>),\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    Sub(SubProof<H, BaseArity, SubTreeArity>),\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    Top(TopProof<H, BaseArity, SubTreeArity, TopTreeArity>),\n}\n\n#[derive(Debug, Default, Clone, Serialize, Deserialize)]\nstruct SingleProof<H: Hasher, Arity: PoseidonArity> {\n    /// Root of the merkle tree.\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    root: H::Domain,\n    /// The original leaf data for this prof.\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    leaf: H::Domain,\n    /// The path from leaf to root.\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    path: InclusionPath<H, Arity>,\n}\n\nimpl<H: Hasher, Arity: PoseidonArity> SingleProof<H, Arity> {\n    pub fn new(path: InclusionPath<H, Arity>, root: H::Domain, leaf: H::Domain) -> Self {\n        SingleProof { root, leaf, path }\n    }\n}\n\n#[derive(Debug, Default, Clone, Serialize, Deserialize)]\nstruct SubProof<H: Hasher, BaseArity: PoseidonArity, SubTreeArity: PoseidonArity> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    base_proof: InclusionPath<H, BaseArity>,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    sub_proof: InclusionPath<H, SubTreeArity>,\n    /// Root of the merkle tree.\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    root: H::Domain,\n    /// The original leaf data for this prof.\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    leaf: H::Domain,\n}\n\nimpl<H: Hasher, BaseArity: PoseidonArity, SubTreeArity: PoseidonArity>\n    SubProof<H, BaseArity, SubTreeArity>\n{\n    pub fn new(\n        base_proof: InclusionPath<H, BaseArity>,\n        sub_proof: InclusionPath<H, SubTreeArity>,\n        root: H::Domain,\n        leaf: H::Domain,\n    ) -> Self {\n        Self {\n            base_proof,\n            sub_proof,\n            root,\n            leaf,\n        }\n    }\n}\n\n#[derive(Debug, Default, Clone, Serialize, Deserialize)]\nstruct TopProof<\n    H: Hasher,\n    BaseArity: PoseidonArity,\n    SubTreeArity: PoseidonArity,\n    TopTreeArity: PoseidonArity,\n> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    base_proof: InclusionPath<H, BaseArity>,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    sub_proof: InclusionPath<H, SubTreeArity>,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    top_proof: InclusionPath<H, TopTreeArity>,\n    /// Root of the merkle tree.\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    root: H::Domain,\n    /// The original leaf data for this prof.\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    leaf: H::Domain,\n}\n\nimpl<\n        H: Hasher,\n        BaseArity: PoseidonArity,\n        SubTreeArity: PoseidonArity,\n        TopTreeArity: PoseidonArity,\n    > TopProof<H, BaseArity, SubTreeArity, TopTreeArity>\n{\n    pub fn new(\n        base_proof: InclusionPath<H, BaseArity>,\n        sub_proof: InclusionPath<H, SubTreeArity>,\n        top_proof: InclusionPath<H, TopTreeArity>,\n        root: H::Domain,\n        leaf: H::Domain,\n    ) -> Self {\n        Self {\n            base_proof,\n            sub_proof,\n            top_proof,\n            root,\n            leaf,\n        }\n    }\n}\n\nimpl<\n        H: Hasher,\n        BaseArity: PoseidonArity,\n        SubTreeArity: PoseidonArity,\n        TopTreeArity: PoseidonArity,\n    > MerkleProof<H, BaseArity, SubTreeArity, TopTreeArity>\n{\n    pub fn new(n: usize) -> Self {\n        let root = Default::default();\n        let leaf = Default::default();\n        let path_elem = PathElement {\n            hashes: vec![Default::default(); BaseArity::to_usize()],\n            index: 0,\n            _arity: Default::default(),\n        };\n        let path = vec![path_elem; n];\n        MerkleProof {\n            data: ProofData::Single(SingleProof::new(path.into(), root, leaf)),\n        }\n    }\n}\n\n/// Converts a merkle_light proof to a SingleProof\nfn proof_to_single<H: Hasher, Arity: PoseidonArity, TargetArity: PoseidonArity>(\n    proof: &merkletree::proof::Proof<H::Domain, Arity>,\n    lemma_start_index: usize,\n    sub_root: Option<H::Domain>,\n) -> SingleProof<H, TargetArity> {\n    let root = proof.root();\n    let leaf = if let Some(sub_root) = sub_root {\n        sub_root\n    } else {\n        proof.item()\n    };\n    let path = extract_path::<H, TargetArity>(proof.lemma(), proof.path(), lemma_start_index);\n\n    SingleProof::new(path, root, leaf)\n}\n\n/// 'lemma_start_index' is required because sub/top proofs start at\n/// index 0 and base proofs start at index 1 (skipping the leaf at the\n/// front)\nfn extract_path<H: Hasher, Arity: PoseidonArity>(\n    lemma: &[H::Domain],\n    path: &[usize],\n    lemma_start_index: usize,\n) -> InclusionPath<H, Arity> {\n    let path = lemma[lemma_start_index..lemma.len() - 1]\n        .chunks(Arity::to_usize() - 1)\n        .zip(path.iter())\n        .map(|(hashes, index)| PathElement {\n            hashes: hashes.to_vec(),\n            index: *index,\n            _arity: Default::default(),\n        })\n        .collect::<Vec<_>>();\n\n    path.into()\n}\n\nimpl<H: Hasher, Arity: 'static + PoseidonArity> SingleProof<H, Arity> {\n    fn try_from_proof(p: merkletree::proof::Proof<<H as Hasher>::Domain, Arity>) -> Result<Self> {\n        Ok(proof_to_single(&p, 1, None))\n    }\n\n    fn verify(&self) -> bool {\n        let calculated_root = self.path.root(self.leaf);\n        self.root == calculated_root\n    }\n\n    fn leaf(&self) -> H::Domain {\n        self.leaf\n    }\n\n    fn root(&self) -> H::Domain {\n        self.root\n    }\n\n    fn len(&self) -> usize {\n        self.path.len() * (Arity::to_usize() - 1) + 2\n    }\n\n    fn path(&self) -> Vec<(Vec<H::Domain>, usize)> {\n        self.path\n            .iter()\n            .map(|x| (x.hashes.clone(), x.index))\n            .collect::<Vec<_>>()\n    }\n\n    fn path_index(&self) -> usize {\n        self.path.path_index()\n    }\n}\n\nimpl<H: Hasher, Arity: 'static + PoseidonArity, SubTreeArity: 'static + PoseidonArity>\n    SubProof<H, Arity, SubTreeArity>\n{\n    fn try_from_proof(p: merkletree::proof::Proof<<H as Hasher>::Domain, Arity>) -> Result<Self> {\n        ensure!(\n            p.sub_layer_nodes() == SubTreeArity::to_usize(),\n            \"sub arity mismatch\"\n        );\n        ensure!(\n            p.sub_tree_proof.is_some(),\n            \"Cannot generate sub proof without a base-proof\"\n        );\n        let base_p = p.sub_tree_proof.as_ref().expect(\"proof as_ref failure\");\n\n        // Generate SubProof\n        let root = p.root();\n        let leaf = base_p.item();\n        let base_proof = extract_path::<H, Arity>(base_p.lemma(), base_p.path(), 1);\n        let sub_proof = extract_path::<H, SubTreeArity>(p.lemma(), p.path(), 0);\n\n        Ok(SubProof::new(base_proof, sub_proof, root, leaf))\n    }\n\n    fn verify(&self) -> bool {\n        let sub_leaf = self.base_proof.root(self.leaf);\n        let calculated_root = self.sub_proof.root(sub_leaf);\n\n        self.root == calculated_root\n    }\n\n    fn leaf(&self) -> H::Domain {\n        self.leaf\n    }\n\n    fn root(&self) -> H::Domain {\n        self.root\n    }\n\n    fn len(&self) -> usize {\n        SubTreeArity::to_usize()\n    }\n\n    fn path(&self) -> Vec<(Vec<H::Domain>, usize)> {\n        self.base_proof\n            .iter()\n            .map(|x| (x.hashes.clone(), x.index))\n            .chain(self.sub_proof.iter().map(|x| (x.hashes.clone(), x.index)))\n            .collect()\n    }\n\n    fn path_index(&self) -> usize {\n        let mut base_proof_leaves = 1;\n        for _i in 0..self.base_proof.len() {\n            base_proof_leaves *= Arity::to_usize()\n        }\n\n        let sub_proof_index = self.sub_proof.path_index();\n\n        (sub_proof_index * base_proof_leaves) + self.base_proof.path_index()\n    }\n}\n\nimpl<\n        H: Hasher,\n        Arity: 'static + PoseidonArity,\n        SubTreeArity: 'static + PoseidonArity,\n        TopTreeArity: 'static + PoseidonArity,\n    > TopProof<H, Arity, SubTreeArity, TopTreeArity>\n{\n    fn try_from_proof(p: merkletree::proof::Proof<<H as Hasher>::Domain, Arity>) -> Result<Self> {\n        ensure!(\n            p.top_layer_nodes() == TopTreeArity::to_usize(),\n            \"top arity mismatch\"\n        );\n        ensure!(\n            p.sub_layer_nodes() == SubTreeArity::to_usize(),\n            \"sub arity mismatch\"\n        );\n\n        ensure!(\n            p.sub_tree_proof.is_some(),\n            \"Cannot generate top proof without a sub-proof\"\n        );\n        let sub_p = p.sub_tree_proof.as_ref().expect(\"proofs as ref failure\");\n\n        ensure!(\n            sub_p.sub_tree_proof.is_some(),\n            \"Cannot generate top proof without a base-proof\"\n        );\n        let base_p = sub_p\n            .sub_tree_proof\n            .as_ref()\n            .expect(\"proofs as ref failure\");\n\n        let root = p.root();\n        let leaf = base_p.item();\n\n        let base_proof = extract_path::<H, Arity>(base_p.lemma(), base_p.path(), 1);\n        let sub_proof = extract_path::<H, SubTreeArity>(sub_p.lemma(), sub_p.path(), 0);\n        let top_proof = extract_path::<H, TopTreeArity>(p.lemma(), p.path(), 0);\n\n        Ok(TopProof::new(base_proof, sub_proof, top_proof, root, leaf))\n    }\n\n    fn verify(&self) -> bool {\n        let sub_leaf = self.base_proof.root(self.leaf);\n        let top_leaf = self.sub_proof.root(sub_leaf);\n        let calculated_root = self.top_proof.root(top_leaf);\n\n        self.root == calculated_root\n    }\n\n    fn leaf(&self) -> H::Domain {\n        self.leaf\n    }\n\n    fn root(&self) -> H::Domain {\n        self.root\n    }\n\n    fn len(&self) -> usize {\n        TopTreeArity::to_usize()\n    }\n\n    fn path(&self) -> Vec<(Vec<H::Domain>, usize)> {\n        self.base_proof\n            .iter()\n            .map(|x| (x.hashes.clone(), x.index))\n            .chain(self.sub_proof.iter().map(|x| (x.hashes.clone(), x.index)))\n            .chain(self.top_proof.iter().map(|x| (x.hashes.clone(), x.index)))\n            .collect()\n    }\n\n    fn path_index(&self) -> usize {\n        let mut base_proof_leaves = 1;\n        for _i in 0..self.base_proof.len() {\n            base_proof_leaves *= Arity::to_usize()\n        }\n\n        let sub_proof_leaves = base_proof_leaves * SubTreeArity::to_usize();\n\n        let sub_proof_index = self.sub_proof.path_index();\n        let top_proof_index = self.top_proof.path_index();\n\n        (sub_proof_index * base_proof_leaves)\n            + (top_proof_index * sub_proof_leaves)\n            + self.base_proof.path_index()\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use filecoin_hashers::{\n        blake2s::Blake2sHasher, poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain,\n    };\n    use generic_array::typenum::{U2, U4, U8};\n    use rand::thread_rng;\n\n    use crate::merkle::{\n        generate_tree, get_base_tree_count, DiskStore, MerkleTreeTrait, MerkleTreeWrapper,\n    };\n\n    fn merklepath<Tree: 'static + MerkleTreeTrait>() {\n        let node_size = 32;\n        let nodes = 64 * get_base_tree_count::<Tree>();\n\n        let mut rng = thread_rng();\n        let (data, tree) = generate_tree::<Tree, _>(&mut rng, nodes, None);\n\n        for i in 0..nodes {\n            let proof = tree.gen_proof(i).expect(\"gen_proof failure\");\n\n            assert!(proof.verify(), \"failed to validate\");\n\n            assert!(proof.validate(i), \"failed to validate valid merkle path\");\n            let data_slice = &data[i * node_size..(i + 1) * node_size].to_vec();\n            assert!(\n                proof.validate_data(\n                    <Tree::Hasher as Hasher>::Domain::try_from_bytes(data_slice)\n                        .expect(\"try from bytes failure\")\n                ),\n                \"failed to validate valid data\"\n            );\n        }\n    }\n\n    #[test]\n    fn merklepath_poseidon_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PoseidonHasher,\n                DiskStore<<PoseidonHasher as Hasher>::Domain>,\n                U2,\n                U0,\n                U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_poseidon_4() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PoseidonHasher,\n                DiskStore<<PoseidonHasher as Hasher>::Domain>,\n                U4,\n                U0,\n                U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_poseidon_8() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PoseidonHasher,\n                DiskStore<<PoseidonHasher as Hasher>::Domain>,\n                U8,\n                U0,\n                U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_poseidon_8_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PoseidonHasher,\n                DiskStore<<PoseidonHasher as Hasher>::Domain>,\n                U8,\n                U2,\n                U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_poseidon_8_4() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PoseidonHasher,\n                DiskStore<<PoseidonHasher as Hasher>::Domain>,\n                U8,\n                U4,\n                U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_poseidon_8_4_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                PoseidonHasher,\n                DiskStore<<PoseidonHasher as Hasher>::Domain>,\n                U8,\n                U4,\n                U2,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_sha256_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                Sha256Hasher,\n                DiskStore<<Sha256Hasher as Hasher>::Domain>,\n                U2,\n                U0,\n                U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_sha256_4() {\n        merklepath::<\n            MerkleTreeWrapper<\n                Sha256Hasher,\n                DiskStore<<Sha256Hasher as Hasher>::Domain>,\n                U4,\n                U0,\n                U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_sha256_2_4() {\n        merklepath::<\n            MerkleTreeWrapper<\n                Sha256Hasher,\n                DiskStore<<Sha256Hasher as Hasher>::Domain>,\n                U2,\n                U4,\n                U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_sha256_top_2_4_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                Sha256Hasher,\n                DiskStore<<Sha256Hasher as Hasher>::Domain>,\n                U2,\n                U4,\n                U2,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_blake2s_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                Blake2sHasher,\n                DiskStore<<Blake2sHasher as Hasher>::Domain>,\n                U2,\n                U0,\n                U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_blake2s_4() {\n        merklepath::<\n            MerkleTreeWrapper<\n                Blake2sHasher,\n                DiskStore<<Blake2sHasher as Hasher>::Domain>,\n                U4,\n                U0,\n                U0,\n            >,\n        >();\n    }\n\n    #[test]\n    fn merklepath_blake2s_8_4_2() {\n        merklepath::<\n            MerkleTreeWrapper<\n                Blake2sHasher,\n                DiskStore<<Blake2sHasher as Hasher>::Domain>,\n                U8,\n                U4,\n                U2,\n            >,\n        >();\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/merkle/tree.rs",
    "content": "#![allow(clippy::len_without_is_empty)]\n\nuse std::fmt::{self, Debug, Formatter};\nuse std::marker::PhantomData;\nuse std::ops::{Deref, DerefMut};\n\nuse anyhow::Result;\nuse filecoin_hashers::{Hasher, PoseidonArity};\nuse generic_array::typenum::U0;\nuse merkletree::{\n    hash::Hashable,\n    merkle::{FromIndexedParallelIterator, MerkleTree},\n    store::{ReplicaConfig, Store, StoreConfig},\n};\nuse rayon::prelude::{IndexedParallelIterator, IntoParallelIterator};\n\nuse crate::merkle::{LCTree, MerkleProof, MerkleProofTrait};\n\n/// Trait used to abstract over the way Merkle Trees are constructed and stored.\npub trait MerkleTreeTrait: Send + Sync + Debug {\n    type Arity: 'static + PoseidonArity;\n    type SubTreeArity: 'static + PoseidonArity;\n    type TopTreeArity: 'static + PoseidonArity;\n    type Hasher: 'static + Hasher;\n    type Store: Store<<Self::Hasher as Hasher>::Domain>;\n    type Proof: MerkleProofTrait<\n        Hasher = Self::Hasher,\n        Arity = Self::Arity,\n        SubTreeArity = Self::SubTreeArity,\n        TopTreeArity = Self::TopTreeArity,\n    >;\n\n    /// Print a unique name for this configuration.\n    fn display() -> String;\n    /// Returns the root hash of the tree.\n    fn root(&self) -> <Self::Hasher as Hasher>::Domain;\n    /// Creates a merkle proof of the node at the given index.\n    fn gen_proof(&self, index: usize) -> Result<Self::Proof>;\n    fn gen_cached_proof(&self, i: usize, rows_to_discard: Option<usize>) -> Result<Self::Proof>;\n    fn row_count(&self) -> usize;\n    fn leaves(&self) -> usize;\n    fn from_merkle(\n        tree: MerkleTree<\n            <Self::Hasher as Hasher>::Domain,\n            <Self::Hasher as Hasher>::Function,\n            Self::Store,\n            Self::Arity,\n            Self::SubTreeArity,\n            Self::TopTreeArity,\n        >,\n    ) -> Self;\n}\n\npub struct MerkleTreeWrapper<\n    H: Hasher,\n    S: Store<<H as Hasher>::Domain>,\n    U: PoseidonArity,\n    V: PoseidonArity = U0,\n    W: PoseidonArity = U0,\n> {\n    pub inner: MerkleTree<<H as Hasher>::Domain, <H as Hasher>::Function, S, U, V, W>,\n    pub h: PhantomData<H>,\n}\n\nimpl<\n        H: 'static + Hasher,\n        S: Store<<H as Hasher>::Domain>,\n        U: 'static + PoseidonArity,\n        V: 'static + PoseidonArity,\n        W: 'static + PoseidonArity,\n    > MerkleTreeTrait for MerkleTreeWrapper<H, S, U, V, W>\n{\n    type Arity = U;\n    type SubTreeArity = V;\n    type TopTreeArity = W;\n    type Hasher = H;\n    type Store = S;\n    type Proof = MerkleProof<Self::Hasher, Self::Arity, Self::SubTreeArity, Self::TopTreeArity>;\n\n    fn display() -> String {\n        format!(\n            \"merkletree-{}-{}-{}-{}\",\n            H::name(),\n            U::to_usize(),\n            V::to_usize(),\n            W::to_usize()\n        )\n    }\n\n    fn root(&self) -> <Self::Hasher as Hasher>::Domain {\n        self.inner.root()\n    }\n\n    fn gen_proof(&self, i: usize) -> Result<Self::Proof> {\n        let proof = self.inner.gen_proof(i)?;\n\n        debug_assert!(proof.validate::<H::Function>().expect(\"validate failed\"));\n\n        MerkleProof::try_from_proof(proof)\n    }\n\n    fn gen_cached_proof(&self, i: usize, rows_to_discard: Option<usize>) -> Result<Self::Proof> {\n        if rows_to_discard.is_some() && rows_to_discard.expect(\"rows to discard failure\") == 0 {\n            return self.gen_proof(i);\n        }\n\n        let proof = self.inner.gen_cached_proof(i, rows_to_discard)?;\n\n        debug_assert!(proof.validate::<H::Function>().expect(\"validate failed\"));\n\n        MerkleProof::try_from_proof(proof)\n    }\n\n    fn row_count(&self) -> usize {\n        self.inner.row_count()\n    }\n\n    fn leaves(&self) -> usize {\n        self.inner.leafs()\n    }\n\n    fn from_merkle(\n        tree: MerkleTree<\n            <Self::Hasher as Hasher>::Domain,\n            <Self::Hasher as Hasher>::Function,\n            Self::Store,\n            Self::Arity,\n            Self::SubTreeArity,\n            Self::TopTreeArity,\n        >,\n    ) -> Self {\n        tree.into()\n    }\n}\n\nimpl<\n        H: Hasher,\n        S: Store<<H as Hasher>::Domain>,\n        U: PoseidonArity,\n        V: PoseidonArity,\n        W: PoseidonArity,\n    > From<MerkleTree<<H as Hasher>::Domain, <H as Hasher>::Function, S, U, V, W>>\n    for MerkleTreeWrapper<H, S, U, V, W>\n{\n    fn from(tree: MerkleTree<<H as Hasher>::Domain, <H as Hasher>::Function, S, U, V, W>) -> Self {\n        Self {\n            inner: tree,\n            h: Default::default(),\n        }\n    }\n}\n\nimpl<\n        H: Hasher,\n        S: Store<<H as Hasher>::Domain>,\n        U: PoseidonArity,\n        V: PoseidonArity,\n        W: PoseidonArity,\n    > MerkleTreeWrapper<H, S, U, V, W>\n{\n    pub fn new<I: IntoIterator<Item = H::Domain>>(data: I) -> Result<Self> {\n        let tree = MerkleTree::new(data)?;\n        Ok(tree.into())\n    }\n\n    pub fn new_with_config<I: IntoIterator<Item = H::Domain>>(\n        data: I,\n        config: StoreConfig,\n    ) -> Result<Self> {\n        let tree = MerkleTree::new_with_config(data, config)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_data_with_config<O: Hashable<H::Function>, I: IntoIterator<Item = O>>(\n        data: I,\n        config: StoreConfig,\n    ) -> Result<Self> {\n        let tree = MerkleTree::from_data_with_config(data, config)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_data_store(data: S, leafs: usize) -> Result<Self> {\n        let tree = MerkleTree::from_data_store(data, leafs)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_byte_slice_with_config(data: &[u8], config: StoreConfig) -> Result<Self> {\n        let tree = MerkleTree::from_byte_slice_with_config(data, config)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_tree_slice(data: &[u8], leafs: usize) -> Result<Self> {\n        let tree = MerkleTree::from_tree_slice(data, leafs)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_tree_slice_with_config(\n        data: &[u8],\n        leafs: usize,\n        config: StoreConfig,\n    ) -> Result<Self> {\n        let tree = MerkleTree::from_tree_slice_with_config(data, leafs, config)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_trees(trees: Vec<MerkleTreeWrapper<H, S, U, U0, U0>>) -> Result<Self> {\n        let trees = trees.into_iter().map(|t| t.inner).collect();\n        let tree = MerkleTree::from_trees(trees)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_sub_trees(trees: Vec<MerkleTreeWrapper<H, S, U, V, U0>>) -> Result<Self> {\n        let trees = trees.into_iter().map(|t| t.inner).collect();\n        let tree = MerkleTree::from_sub_trees(trees)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_sub_trees_as_trees(trees: Vec<MerkleTreeWrapper<H, S, U, U0, U0>>) -> Result<Self> {\n        let trees = trees.into_iter().map(|t| t.inner).collect();\n        let tree = MerkleTree::from_sub_trees_as_trees(trees)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_slices(\n        tree_data: &[&[u8]],\n        leafs: usize,\n    ) -> Result<MerkleTreeWrapper<H, S, U, V, U0>> {\n        let tree =\n            MerkleTree::<<H as Hasher>::Domain, <H as Hasher>::Function, S, U, V, U0>::from_slices(\n                tree_data, leafs,\n            )?;\n        Ok(tree.into())\n    }\n\n    pub fn from_slices_with_configs(\n        tree_data: &[&[u8]],\n        leafs: usize,\n        configs: &[StoreConfig],\n    ) -> Result<Self> {\n        let tree = MerkleTree::from_slices_with_configs(tree_data, leafs, configs)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_stores(leafs: usize, stores: Vec<S>) -> Result<Self> {\n        let tree = MerkleTree::from_stores(leafs, stores)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_store_configs(leafs: usize, configs: &[StoreConfig]) -> Result<Self> {\n        let tree = MerkleTree::from_store_configs(leafs, configs)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_store_configs_and_replica(\n        leafs: usize,\n        configs: &[StoreConfig],\n        replica_config: &ReplicaConfig,\n    ) -> Result<LCTree<H, U, V, W>> {\n        let tree = MerkleTree::from_store_configs_and_replica(leafs, configs, replica_config)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_sub_tree_store_configs(leafs: usize, configs: &[StoreConfig]) -> Result<Self> {\n        let tree = MerkleTree::from_sub_tree_store_configs(leafs, configs)?;\n        Ok(tree.into())\n    }\n\n    pub fn try_from_iter<I: IntoIterator<Item = Result<H::Domain>>>(into: I) -> Result<Self> {\n        let tree = MerkleTree::try_from_iter(into)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_sub_tree_store_configs_and_replica(\n        leafs: usize,\n        configs: &[StoreConfig],\n        replica_config: &ReplicaConfig,\n    ) -> Result<LCTree<H, U, V, W>> {\n        let tree =\n            MerkleTree::from_sub_tree_store_configs_and_replica(leafs, configs, replica_config)?;\n        Ok(tree.into())\n    }\n\n    pub fn try_from_iter_with_config<I: IntoIterator<Item = Result<H::Domain>>>(\n        into: I,\n        config: StoreConfig,\n    ) -> Result<Self> {\n        let tree = MerkleTree::try_from_iter_with_config(into, config)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_par_iter<I>(par_iter: I) -> Result<Self>\n    where\n        I: IntoParallelIterator<Item = H::Domain>,\n        I::Iter: IndexedParallelIterator,\n    {\n        let tree = MerkleTree::from_par_iter(par_iter)?;\n        Ok(tree.into())\n    }\n\n    pub fn from_par_iter_with_config<I>(par_iter: I, config: StoreConfig) -> Result<Self>\n    where\n        I: IntoParallelIterator<Item = H::Domain>,\n        I::Iter: IndexedParallelIterator,\n    {\n        let tree = MerkleTree::from_par_iter_with_config(par_iter, config)?;\n        Ok(tree.into())\n    }\n}\n\nimpl<\n        H: Hasher,\n        S: Store<<H as Hasher>::Domain>,\n        BaseArity: PoseidonArity,\n        SubTreeArity: PoseidonArity,\n        TopTreeArity: PoseidonArity,\n    > Debug for MerkleTreeWrapper<H, S, BaseArity, SubTreeArity, TopTreeArity>\n{\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"MerkleTreeWrapper\")\n            .field(\"inner\", &self.inner)\n            .field(\"Hasher\", &H::name())\n            .finish()\n    }\n}\n\nimpl<\n        H: Hasher,\n        S: Store<<H as Hasher>::Domain>,\n        BaseArity: PoseidonArity,\n        SubTreeArity: PoseidonArity,\n        TopTreeArity: PoseidonArity,\n    > Deref for MerkleTreeWrapper<H, S, BaseArity, SubTreeArity, TopTreeArity>\n{\n    type Target = MerkleTree<H::Domain, H::Function, S, BaseArity, SubTreeArity, TopTreeArity>;\n\n    fn deref(&self) -> &Self::Target {\n        &self.inner\n    }\n}\n\nimpl<\n        H: Hasher,\n        S: Store<<H as Hasher>::Domain>,\n        BaseArity: PoseidonArity,\n        SubTreeArity: PoseidonArity,\n        TopTreeArity: PoseidonArity,\n    > DerefMut for MerkleTreeWrapper<H, S, BaseArity, SubTreeArity, TopTreeArity>\n{\n    fn deref_mut(&mut self) -> &mut Self::Target {\n        &mut self.inner\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/multi_proof.rs",
    "content": "use std::io::{Read, Write};\n\nuse anyhow::{ensure, Context};\nuse bellperson::{\n    bls::Bls12,\n    groth16::{self, PreparedVerifyingKey},\n};\n\nuse crate::error::Result;\n\npub struct MultiProof<'a> {\n    pub circuit_proofs: Vec<groth16::Proof<Bls12>>,\n    pub verifying_key: &'a PreparedVerifyingKey<Bls12>,\n}\n\nconst GROTH_PROOF_SIZE: usize = 192;\n\nimpl<'a> MultiProof<'a> {\n    pub fn new(\n        groth_proofs: Vec<groth16::Proof<Bls12>>,\n        verifying_key: &'a PreparedVerifyingKey<Bls12>,\n    ) -> Self {\n        MultiProof {\n            circuit_proofs: groth_proofs,\n            verifying_key,\n        }\n    }\n\n    pub fn new_from_reader<R: Read>(\n        partitions: Option<usize>,\n        mut reader: R,\n        verifying_key: &'a PreparedVerifyingKey<Bls12>,\n    ) -> Result<Self> {\n        let num_proofs = partitions.unwrap_or(1);\n\n        let mut proof_vec: Vec<u8> = Vec::with_capacity(num_proofs * GROTH_PROOF_SIZE);\n        reader.read_to_end(&mut proof_vec)?;\n\n        Self::new_from_bytes(partitions, &proof_vec, verifying_key)\n    }\n\n    // Parallelizing reduces deserialization time for 10 proofs from 13ms to 2ms.\n    pub fn new_from_bytes(\n        partitions: Option<usize>,\n        proof_bytes: &[u8],\n        verifying_key: &'a PreparedVerifyingKey<Bls12>,\n    ) -> Result<Self> {\n        let num_proofs = partitions.unwrap_or(1);\n\n        let proofs = groth16::Proof::read_many(proof_bytes, num_proofs)?;\n\n        ensure!(\n            num_proofs == proofs.len(),\n            \"expected {} proofs but found only {}\",\n            num_proofs,\n            proofs.len()\n        );\n\n        Ok(Self::new(proofs, verifying_key))\n    }\n\n    pub fn write<W: Write>(&self, mut writer: W) -> Result<()> {\n        for proof in &self.circuit_proofs {\n            proof.write(&mut writer)?\n        }\n        Ok(())\n    }\n\n    pub fn to_vec(&self) -> Result<Vec<u8>> {\n        let mut out = Vec::new();\n        self.write(&mut out).context(\"known allocation target\")?;\n        Ok(out)\n    }\n\n    pub fn len(&self) -> usize {\n        self.circuit_proofs.len()\n    }\n\n    pub fn is_empty(&self) -> bool {\n        self.circuit_proofs.is_empty()\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/parameter_cache.rs",
    "content": "use std::collections::{BTreeMap, HashSet};\nuse std::fs::{create_dir_all, File, OpenOptions};\nuse std::io::{self, Read, Seek, SeekFrom, Write};\nuse std::path::{Path, PathBuf};\nuse std::sync::Mutex;\nuse std::time::Instant;\n\nuse anyhow::bail;\nuse bellperson::{bls::Bls12, groth16, Circuit};\nuse blake2b_simd::Params as Blake2bParams;\nuse fs2::FileExt;\nuse itertools::Itertools;\nuse lazy_static::lazy_static;\nuse log::info;\nuse rand::RngCore;\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\n\nuse crate::{\n    error::{Error, Result},\n    settings::SETTINGS,\n};\n\n/// Bump this when circuits change to invalidate the cache.\npub const VERSION: usize = 28;\n\npub const GROTH_PARAMETER_EXT: &str = \"params\";\npub const PARAMETER_METADATA_EXT: &str = \"meta\";\npub const VERIFYING_KEY_EXT: &str = \"vk\";\n\n#[derive(Debug)]\npub struct LockedFile(File);\n\npub type ParameterMap = BTreeMap<String, ParameterData>;\n\n#[derive(Debug, Deserialize, Serialize)]\npub struct ParameterData {\n    pub cid: String,\n    pub digest: String,\n    pub sector_size: u64,\n}\n\npub const PARAMETERS_DATA: &str = include_str!(\"../parameters.json\");\n\nlazy_static! {\n    pub static ref PARAMETERS: ParameterMap =\n        serde_json::from_str(PARAMETERS_DATA).expect(\"Invalid parameters.json\");\n    /// Contains the parameters that were previously verified. This way the parameter files are\n    /// only hashed once and not on every usage.\n    static ref VERIFIED_PARAMETERS: Mutex<HashSet<String>> = Mutex::new(HashSet::new());\n}\n\npub fn parameter_id(cache_id: &str) -> String {\n    format!(\"v{}-{}.params\", VERSION, cache_id)\n}\n\npub fn verifying_key_id(cache_id: &str) -> String {\n    format!(\"v{}-{}.vk\", VERSION, cache_id)\n}\n\npub fn metadata_id(cache_id: &str) -> String {\n    format!(\"v{}-{}.meta\", VERSION, cache_id)\n}\n\n/// Get the correct parameter data for a given cache id.\npub fn get_parameter_data_from_id(parameter_id: &str) -> Option<&ParameterData> {\n    PARAMETERS.get(parameter_id)\n}\n\n/// Get the correct parameter data for a given cache id.\npub fn get_parameter_data(cache_id: &str) -> Option<&ParameterData> {\n    PARAMETERS.get(&parameter_id(cache_id))\n}\n\n/// Get the correct verifying key data for a given cache id.\npub fn get_verifying_key_data(cache_id: &str) -> Option<&ParameterData> {\n    PARAMETERS.get(&verifying_key_id(cache_id))\n}\n\n// TODO: use in memory lock as well, as file locks do not guarantee exclusive access across OSes.\n\nimpl LockedFile {\n    pub fn open_exclusive_read<P: AsRef<Path>>(p: P) -> io::Result<Self> {\n        let f = OpenOptions::new().read(true).create(false).open(p)?;\n        f.lock_exclusive()?;\n\n        Ok(LockedFile(f))\n    }\n\n    pub fn open_exclusive<P: AsRef<Path>>(p: P) -> io::Result<Self> {\n        let f = OpenOptions::new()\n            .read(true)\n            .write(true)\n            .create_new(true)\n            .open(p)?;\n        f.lock_exclusive()?;\n\n        Ok(LockedFile(f))\n    }\n\n    pub fn open_shared_read<P: AsRef<Path>>(p: P) -> io::Result<Self> {\n        let f = OpenOptions::new().read(true).create(false).open(p)?;\n        f.lock_shared()?;\n\n        Ok(LockedFile(f))\n    }\n}\n\nimpl AsRef<File> for LockedFile {\n    fn as_ref(&self) -> &File {\n        &self.0\n    }\n}\n\nimpl Write for LockedFile {\n    fn write(&mut self, buf: &[u8]) -> io::Result<usize> {\n        self.0.write(buf)\n    }\n\n    fn flush(&mut self) -> io::Result<()> {\n        self.0.flush()\n    }\n}\n\nimpl Read for LockedFile {\n    fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {\n        self.0.read(buf)\n    }\n}\n\nimpl Seek for LockedFile {\n    fn seek(&mut self, pos: SeekFrom) -> io::Result<u64> {\n        self.0.seek(pos)\n    }\n}\n\nimpl Drop for LockedFile {\n    fn drop(&mut self) {\n        self.0\n            .unlock()\n            .unwrap_or_else(|e| panic!(\"{}: failed to {:?} unlock file safely\", e, &self.0));\n    }\n}\n\npub fn parameter_cache_dir_name() -> String {\n    SETTINGS.parameter_cache.clone()\n}\n\npub fn parameter_cache_dir() -> PathBuf {\n    Path::new(&parameter_cache_dir_name()).to_path_buf()\n}\n\npub fn parameter_cache_params_path(parameter_set_identifier: &str) -> PathBuf {\n    let dir = Path::new(&parameter_cache_dir_name()).to_path_buf();\n    dir.join(format!(\n        \"v{}-{}.{}\",\n        VERSION, parameter_set_identifier, GROTH_PARAMETER_EXT\n    ))\n}\n\npub fn parameter_cache_metadata_path(parameter_set_identifier: &str) -> PathBuf {\n    let dir = Path::new(&parameter_cache_dir_name()).to_path_buf();\n    dir.join(format!(\n        \"v{}-{}.{}\",\n        VERSION, parameter_set_identifier, PARAMETER_METADATA_EXT\n    ))\n}\n\npub fn parameter_cache_verifying_key_path(parameter_set_identifier: &str) -> PathBuf {\n    let dir = Path::new(&parameter_cache_dir_name()).to_path_buf();\n    dir.join(format!(\n        \"v{}-{}.{}\",\n        VERSION, parameter_set_identifier, VERIFYING_KEY_EXT\n    ))\n}\n\nfn ensure_ancestor_dirs_exist(cache_entry_path: PathBuf) -> Result<PathBuf> {\n    info!(\n        \"ensuring that all ancestor directories for: {:?} exist\",\n        cache_entry_path\n    );\n\n    if let Some(parent_dir) = cache_entry_path.parent() {\n        if let Err(err) = create_dir_all(&parent_dir) {\n            match err.kind() {\n                io::ErrorKind::AlreadyExists => {}\n                _ => return Err(From::from(err)),\n            }\n        }\n    } else {\n        bail!(\"{:?} has no parent directory\", cache_entry_path);\n    }\n\n    Ok(cache_entry_path)\n}\n\npub trait ParameterSetMetadata {\n    fn identifier(&self) -> String;\n    fn sector_size(&self) -> u64;\n}\n\n#[derive(Clone, Debug, Serialize, Deserialize)]\npub struct CacheEntryMetadata {\n    pub sector_size: u64,\n}\n\npub trait CacheableParameters<C, P>\nwhere\n    C: Circuit<Bls12>,\n    P: ParameterSetMetadata,\n{\n    fn cache_prefix() -> String;\n\n    fn cache_meta(pub_params: &P) -> CacheEntryMetadata {\n        CacheEntryMetadata {\n            sector_size: pub_params.sector_size(),\n        }\n    }\n\n    fn cache_identifier(pub_params: &P) -> String {\n        let param_identifier = pub_params.identifier();\n        info!(\"parameter set identifier for cache: {}\", param_identifier);\n        let mut hasher = Sha256::default();\n        hasher.update(&param_identifier.into_bytes());\n        let circuit_hash = hasher.finalize();\n        format!(\n            \"{}-{:02x}\",\n            Self::cache_prefix(),\n            circuit_hash.iter().format(\"\")\n        )\n    }\n\n    fn get_param_metadata(_circuit: C, pub_params: &P) -> Result<CacheEntryMetadata> {\n        let id = Self::cache_identifier(pub_params);\n\n        // generate (or load) metadata\n        let meta_path = ensure_ancestor_dirs_exist(parameter_cache_metadata_path(&id))?;\n        read_cached_metadata(&meta_path)\n            .or_else(|_| write_cached_metadata(&meta_path, Self::cache_meta(pub_params)))\n            .map_err(Into::into)\n    }\n\n    /// If the rng option argument is set, parameters will be\n    /// generated using it.  This is used for testing only, or where\n    /// parameters are otherwise unavailable (e.g. benches).  If rng\n    /// is not set, an error will result if parameters are not\n    /// present.\n    fn get_groth_params<R: RngCore>(\n        rng: Option<&mut R>,\n        circuit: C,\n        pub_params: &P,\n    ) -> Result<groth16::MappedParameters<Bls12>> {\n        let id = Self::cache_identifier(pub_params);\n        let cache_path = ensure_ancestor_dirs_exist(parameter_cache_params_path(&id))?;\n\n        let generate = || -> Result<_> {\n            if let Some(rng) = rng {\n                info!(\"Actually generating groth params. (id: {})\", &id);\n                let start = Instant::now();\n                let parameters = groth16::generate_random_parameters::<Bls12, _, _>(circuit, rng)?;\n                let generation_time = start.elapsed();\n                info!(\n                    \"groth_parameter_generation_time: {:?} (id: {})\",\n                    generation_time, &id\n                );\n                Ok(parameters)\n            } else {\n                bail!(\n                    \"No cached parameters found for {} [failure finding {}]\",\n                    id,\n                    cache_path.display()\n                );\n            }\n        };\n\n        // load or generate Groth parameter mappings\n        read_cached_params(&cache_path).or_else(|err| match err.downcast::<Error>() {\n            Ok(error @ Error::InvalidParameters(_)) => Err(error.into()),\n            _ => {\n                // if the file already exists, another process is already trying to generate these.\n                if !cache_path.exists() {\n                    match write_cached_params(&cache_path, generate()?) {\n                        Ok(_) => {}\n                        Err(e) if e.kind() == io::ErrorKind::AlreadyExists => {\n                            // other thread just wrote it, do nothing\n                        }\n                        Err(e) => panic!(\"{}: failed to write generated parameters to cache\", e),\n                    }\n                }\n                Ok(read_cached_params(&cache_path)?)\n            }\n        })\n    }\n\n    /// If the rng option argument is set, parameters will be\n    /// generated using it.  This is used for testing only, or where\n    /// parameters are otherwise unavailable (e.g. benches).  If rng\n    /// is not set, an error will result if parameters are not\n    /// present.\n    fn get_verifying_key<R: RngCore>(\n        rng: Option<&mut R>,\n        circuit: C,\n        pub_params: &P,\n    ) -> Result<groth16::VerifyingKey<Bls12>> {\n        let id = Self::cache_identifier(pub_params);\n\n        let generate = || -> Result<groth16::VerifyingKey<Bls12>> {\n            let groth_params = Self::get_groth_params(rng, circuit, pub_params)?;\n            info!(\"Getting verifying key. (id: {})\", &id);\n            Ok(groth_params.vk)\n        };\n\n        // generate (or load) verifying key\n        let cache_path = ensure_ancestor_dirs_exist(parameter_cache_verifying_key_path(&id))?;\n        match read_cached_verifying_key(&cache_path) {\n            Ok(key) => Ok(key),\n            Err(_) => write_cached_verifying_key(&cache_path, generate()?).map_err(Into::into),\n        }\n    }\n}\n\nfn ensure_parent(path: &PathBuf) -> io::Result<()> {\n    match path.parent() {\n        Some(dir) => {\n            create_dir_all(dir)?;\n            Ok(())\n        }\n        None => Ok(()),\n    }\n}\n\n// Reads parameter mappings using mmap so that they can be lazily\n// loaded later.\npub fn read_cached_params(cache_entry_path: &PathBuf) -> Result<groth16::MappedParameters<Bls12>> {\n    info!(\"checking cache_path: {:?} for parameters\", cache_entry_path);\n\n    let verify_production_params = SETTINGS.verify_production_params;\n\n    // If the verify production params is set, we make sure that the path being accessed matches a\n    // production cache key, found in the 'parameters.json' file. The parameter data file is also\n    // hashed and matched against the hash in the `parameters.json` file.\n    if verify_production_params {\n        let cache_key = cache_entry_path\n            .file_name()\n            .expect(\"failed to get cached param filename\")\n            .to_str()\n            .expect(\"failed to convert to str\")\n            .to_string();\n\n        match get_parameter_data_from_id(&cache_key) {\n            Some(data) => {\n                // Verify the actual hash only once per parameters file\n                let not_yet_verified = VERIFIED_PARAMETERS\n                    .lock()\n                    .expect(\"verified parameters lock failed\")\n                    .get(&cache_key)\n                    .is_none();\n                if not_yet_verified {\n                    info!(\"generating consistency digest for parameters\");\n                    let hash = with_exclusive_read_lock::<_, io::Error, _>(\n                        cache_entry_path,\n                        |mut file| {\n                            let mut hasher = Blake2bParams::new().to_state();\n                            io::copy(&mut file, &mut hasher)\n                                .expect(\"copying file into hasher failed\");\n                            Ok(hasher.finalize())\n                        },\n                    )?;\n                    info!(\"generated consistency digest for parameters\");\n\n                    // The hash in the parameters file is truncated to 256 bits.\n                    let digest_hex = &hash.to_hex()[..32];\n\n                    if digest_hex != data.digest {\n                        return Err(Error::InvalidParameters(\n                            cache_entry_path.display().to_string(),\n                        )\n                        .into());\n                    }\n\n                    VERIFIED_PARAMETERS\n                        .lock()\n                        .expect(\"verified parameters lock failed\")\n                        .insert(cache_key);\n                }\n            }\n            None => {\n                return Err(Error::InvalidParameters(cache_entry_path.display().to_string()).into())\n            }\n        }\n    }\n\n    with_exclusive_read_lock::<_, io::Error, _>(cache_entry_path, |_file| {\n        let mapped_params =\n            groth16::Parameters::build_mapped_parameters(cache_entry_path.to_path_buf(), false)?;\n        info!(\"read parameters from cache {:?} \", cache_entry_path);\n\n        Ok(mapped_params)\n    })\n    .map_err(Into::into)\n}\n\nfn read_cached_verifying_key(\n    cache_entry_path: &PathBuf,\n) -> io::Result<groth16::VerifyingKey<Bls12>> {\n    info!(\n        \"checking cache_path: {:?} for verifying key\",\n        cache_entry_path\n    );\n    with_exclusive_read_lock(cache_entry_path, |mut file| {\n        let key = groth16::VerifyingKey::read(&mut file)?;\n        info!(\"read verifying key from cache {:?} \", cache_entry_path);\n\n        Ok(key)\n    })\n}\n\nfn read_cached_metadata(cache_entry_path: &PathBuf) -> io::Result<CacheEntryMetadata> {\n    info!(\"checking cache_path: {:?} for metadata\", cache_entry_path);\n    with_exclusive_read_lock(cache_entry_path, |file| {\n        let value = serde_json::from_reader(file)?;\n        info!(\"read metadata from cache {:?} \", cache_entry_path);\n\n        Ok(value)\n    })\n}\n\nfn write_cached_metadata(\n    cache_entry_path: &PathBuf,\n    value: CacheEntryMetadata,\n) -> io::Result<CacheEntryMetadata> {\n    with_exclusive_lock(cache_entry_path, |file| {\n        serde_json::to_writer(file, &value)?;\n        info!(\"wrote metadata to cache {:?} \", cache_entry_path);\n\n        Ok(value)\n    })\n}\n\nfn write_cached_verifying_key(\n    cache_entry_path: &PathBuf,\n    value: groth16::VerifyingKey<Bls12>,\n) -> io::Result<groth16::VerifyingKey<Bls12>> {\n    with_exclusive_lock(cache_entry_path, |mut file| {\n        value.write(&mut file)?;\n        file.flush()?;\n        info!(\"wrote verifying key to cache {:?} \", cache_entry_path);\n\n        Ok(value)\n    })\n}\n\nfn write_cached_params(\n    cache_entry_path: &PathBuf,\n    value: groth16::Parameters<Bls12>,\n) -> io::Result<groth16::Parameters<Bls12>> {\n    with_exclusive_lock(cache_entry_path, |mut file| {\n        value.write(&mut file)?;\n        file.flush()?;\n        info!(\"wrote groth parameters to cache {:?} \", cache_entry_path);\n\n        Ok(value)\n    })\n}\n\npub fn with_exclusive_lock<T, E, F>(file_path: &PathBuf, f: F) -> std::result::Result<T, E>\nwhere\n    F: FnOnce(&mut LockedFile) -> std::result::Result<T, E>,\n    E: From<io::Error>,\n{\n    with_open_file(file_path, LockedFile::open_exclusive, f)\n}\n\npub fn with_exclusive_read_lock<T, E, F>(file_path: &PathBuf, f: F) -> std::result::Result<T, E>\nwhere\n    F: FnOnce(&mut LockedFile) -> std::result::Result<T, E>,\n    E: From<io::Error>,\n{\n    with_open_file(file_path, LockedFile::open_exclusive_read, f)\n}\n\npub fn with_open_file<'a, T, E, F, G>(\n    file_path: &'a PathBuf,\n    open_file: G,\n    f: F,\n) -> std::result::Result<T, E>\nwhere\n    F: FnOnce(&mut LockedFile) -> std::result::Result<T, E>,\n    G: FnOnce(&'a PathBuf) -> io::Result<LockedFile>,\n    E: From<io::Error>,\n{\n    ensure_parent(&file_path)?;\n    f(&mut open_file(&file_path)?)\n}\n"
  },
  {
    "path": "storage-proofs-core/src/partitions.rs",
    "content": "pub type Partitions = Option<usize>;\n\npub fn partition_count(partitions: Partitions) -> usize {\n    match partitions {\n        None => 1,\n        Some(0) => panic!(\"cannot specify zero partitions\"),\n        Some(k) => k,\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/pieces.rs",
    "content": "use std::io::Read;\n\nuse anyhow::{ensure, Context};\nuse filecoin_hashers::{Domain, Hasher};\nuse fr32::Fr32Ary;\nuse merkletree::merkle::next_pow2;\n\nuse crate::{\n    error::{Error, Result},\n    merkle::BinaryMerkleTree,\n    util::NODE_SIZE,\n};\n\n/// `position`, `length` are in H::Domain units\n#[derive(Clone, Debug)]\npub struct PieceSpec {\n    pub comm_p: Fr32Ary,\n    pub position: usize,\n    pub number_of_leaves: usize,\n}\n\nimpl PieceSpec {\n    /// `compute_packing` returns a packing list and a proof size.\n    /// A packing list is a pair of (start, length) pairs, relative to the beginning of the piece,\n    /// in leaf units.\n    /// Proof size is a number of elements (size same as one leaf) provided in the variable part of a PieceInclusionProof.\n    pub fn compute_packing(&self, tree_len: usize) -> Result<(Vec<(usize, usize)>, usize)> {\n        ensure!(self.is_aligned(tree_len)?, Error::UnalignedPiece);\n\n        let packing_list = vec![(0, self.number_of_leaves)];\n        Ok((packing_list, self.proof_length(tree_len)))\n    }\n\n    pub fn is_aligned(&self, tree_len: usize) -> Result<bool> {\n        piece_is_aligned(self.position, self.number_of_leaves, tree_len)\n    }\n\n    fn height(&self) -> usize {\n        height_for_length(self.number_of_leaves)\n    }\n\n    // `proof_length` is length of proof that comm_p is in the containing root, excluding comm_p and root, which aren't needed for the proof itself.\n    fn proof_length(&self, tree_len: usize) -> usize {\n        height_for_length(tree_len) - self.height()\n    }\n}\n\n/// Generate `comm_p` from a source and return it as bytes.\npub fn generate_piece_commitment_bytes_from_source<H: Hasher>(\n    source: &mut dyn Read,\n    padded_piece_size: usize,\n) -> Result<Fr32Ary> {\n    ensure!(padded_piece_size > 32, \"piece is too small\");\n    ensure!(padded_piece_size % 32 == 0, \"piece is not valid size\");\n\n    let mut buf = [0; NODE_SIZE];\n\n    let parts = (padded_piece_size as f64 / NODE_SIZE as f64).ceil() as usize;\n\n    let tree = BinaryMerkleTree::<H>::try_from_iter((0..parts).map(|_| {\n        source.read_exact(&mut buf)?;\n        <H::Domain as Domain>::try_from_bytes(&buf).context(\"invalid Fr element\")\n    }))\n    .context(\"failed to build tree\")?;\n\n    let mut comm_p_bytes = [0; NODE_SIZE];\n    let comm_p = tree.root();\n    comm_p.write_bytes(&mut comm_p_bytes)?;\n\n    Ok(comm_p_bytes)\n}\n\n////////////////////////////////////////////////////////////////////////////////\n// Utility\n\npub fn piece_is_aligned(position: usize, length: usize, tree_len: usize) -> Result<bool> {\n    let capacity_at_pos = subtree_capacity(position, tree_len)?;\n\n    Ok(capacity_at_pos.is_power_of_two() && capacity_at_pos >= length)\n}\n\nfn height_for_length(n: usize) -> usize {\n    if n == 0 {\n        0\n    } else {\n        (n as f64).log2().ceil() as usize\n    }\n}\n\nfn subtree_capacity(pos: usize, total: usize) -> Result<usize> {\n    ensure!(pos < total, \"position must be less than tree capacity\");\n\n    let mut capacity = 1;\n    // If tree is not 'full', then pos 0 will have subtree_capacity greater than size of tree.\n    let mut cursor = pos + next_pow2(total);\n\n    while cursor & 1 == 0 {\n        capacity *= 2;\n        cursor >>= 1;\n    }\n    Ok(capacity)\n}\n////////////////////////////////////////////////////////////////////////////////\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use filecoin_hashers::poseidon::PoseidonHasher;\n\n    #[test]\n    fn test_subtree_capacity() {\n        assert_eq!(subtree_capacity(0, 16).unwrap(), 16);\n        assert_eq!(subtree_capacity(1, 16).unwrap(), 1);\n        assert_eq!(subtree_capacity(2, 16).unwrap(), 2);\n        assert_eq!(subtree_capacity(3, 16).unwrap(), 1);\n        assert_eq!(subtree_capacity(4, 16).unwrap(), 4);\n        assert_eq!(subtree_capacity(5, 16).unwrap(), 1);\n        assert_eq!(subtree_capacity(6, 16).unwrap(), 2);\n        assert_eq!(subtree_capacity(7, 16).unwrap(), 1);\n        assert_eq!(subtree_capacity(8, 16).unwrap(), 8);\n        assert_eq!(subtree_capacity(9, 16).unwrap(), 1);\n        assert_eq!(subtree_capacity(10, 16).unwrap(), 2);\n        assert_eq!(subtree_capacity(11, 16).unwrap(), 1);\n        assert_eq!(subtree_capacity(12, 16).unwrap(), 4);\n        assert_eq!(subtree_capacity(13, 16).unwrap(), 1);\n        assert_eq!(subtree_capacity(14, 16).unwrap(), 2);\n        assert_eq!(subtree_capacity(15, 16).unwrap(), 1);\n    }\n\n    #[test]\n    fn test_generate_piece_commitment_bytes_from_source() -> Result<()> {\n        let some_bytes: Vec<u8> = vec![0; 64];\n        let mut some_bytes_slice: &[u8] = &some_bytes;\n        generate_piece_commitment_bytes_from_source::<PoseidonHasher>(&mut some_bytes_slice, 64)\n            .expect(\"threshold for sufficient bytes is 32\");\n\n        let not_enough_bytes: Vec<u8> = vec![0; 7];\n        let mut not_enough_bytes_slice: &[u8] = &not_enough_bytes;\n        assert!(\n            generate_piece_commitment_bytes_from_source::<PoseidonHasher>(\n                &mut not_enough_bytes_slice,\n                7\n            )\n            .is_err(),\n            \"insufficient bytes should error out\"\n        );\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/por.rs",
    "content": "use std::marker::PhantomData;\n\nuse anyhow::ensure;\nuse filecoin_hashers::{Domain, Hasher};\nuse serde::{Deserialize, Serialize};\n\nuse crate::{\n    error::{Error, Result},\n    merkle::{MerkleProofTrait, MerkleTreeTrait},\n    parameter_cache::ParameterSetMetadata,\n    proof::{NoRequirements, ProofScheme},\n};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct DataProof<Proof: MerkleProofTrait> {\n    #[serde(bound(\n        serialize = \"<Proof::Hasher as Hasher>::Domain: Serialize\",\n        deserialize = \"<Proof::Hasher as Hasher>::Domain: Deserialize<'de>\"\n    ))]\n    pub proof: Proof,\n    #[serde(bound(\n        serialize = \"<Proof::Hasher as Hasher>::Domain: Serialize\",\n        deserialize = \"<Proof::Hasher as Hasher>::Domain: Deserialize<'de>\"\n    ))]\n    pub data: <Proof::Hasher as Hasher>::Domain,\n}\n\n/// The parameters shared between the prover and verifier.\n#[derive(Clone, Debug)]\npub struct PublicParams {\n    /// How many leaves the underlying merkle tree has.\n    pub leaves: usize,\n    pub private: bool,\n}\n\nimpl ParameterSetMetadata for PublicParams {\n    fn identifier(&self) -> String {\n        format!(\n            \"merklepor::PublicParams{{leaves: {}; private: {}}}\",\n            self.leaves, self.private\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        unimplemented!(\"required for parameter metadata file generation\")\n    }\n}\n\n/// The inputs that are necessary for the verifier to verify the proof.\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct PublicInputs<T: Domain> {\n    /// The root hash of the underlying merkle tree.\n    #[serde(bound = \"\")]\n    pub commitment: Option<T>,\n    /// The challenge, which leaf to prove.\n    pub challenge: usize,\n}\n\n/// The inputs that are only available to the prover.\n#[derive(Debug)]\npub struct PrivateInputs<'a, Tree: MerkleTreeTrait> {\n    /// The data of the leaf.\n    pub leaf: <Tree::Hasher as Hasher>::Domain,\n    /// The underlying merkle tree.\n    pub tree: &'a Tree,\n}\n\nimpl<'a, Tree: MerkleTreeTrait> PrivateInputs<'a, Tree> {\n    pub fn new(leaf: <Tree::Hasher as Hasher>::Domain, tree: &'a Tree) -> Self {\n        PrivateInputs { leaf, tree }\n    }\n}\n\n#[derive(Clone, Debug)]\npub struct SetupParams {\n    pub leaves: usize,\n    pub private: bool,\n}\n\n/// Merkle tree based proof of retrievability.\n#[derive(Debug, Default)]\npub struct PoR<Tree: MerkleTreeTrait> {\n    _tree: PhantomData<Tree>,\n}\n\nimpl<'a, Tree: 'a + MerkleTreeTrait> ProofScheme<'a> for PoR<Tree> {\n    type PublicParams = PublicParams;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<<Tree::Hasher as Hasher>::Domain>;\n    type PrivateInputs = PrivateInputs<'a, Tree>;\n    type Proof = DataProof<Tree::Proof>;\n    type Requirements = NoRequirements;\n\n    fn setup(sp: &SetupParams) -> Result<PublicParams> {\n        // atm only binary trees are implemented\n        Ok(PublicParams {\n            leaves: sp.leaves,\n            private: sp.private,\n        })\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        let challenge = pub_inputs.challenge % pub_params.leaves;\n        let tree = priv_inputs.tree;\n\n        if let Some(ref commitment) = pub_inputs.commitment {\n            ensure!(commitment == &tree.root(), Error::InvalidCommitment);\n        }\n        let proof = tree.gen_proof(challenge)?;\n        Ok(Self::Proof {\n            proof,\n            data: priv_inputs.leaf,\n        })\n    }\n\n    fn verify(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        proof: &Self::Proof,\n    ) -> Result<bool> {\n        {\n            // This was verify_proof_meta.\n            let commitments_match = match pub_inputs.commitment {\n                Some(ref commitment) => commitment == &proof.proof.root(),\n                None => true,\n            };\n\n            let expected_path_length = proof.proof.expected_len(pub_params.leaves);\n            let path_length_match = expected_path_length == proof.proof.path().len();\n\n            if !(commitments_match && path_length_match) {\n                dbg!(\n                    commitments_match,\n                    path_length_match,\n                    expected_path_length,\n                    proof.proof.path().len()\n                );\n                return Ok(false);\n            }\n        }\n\n        let data_valid = proof.proof.validate_data(proof.data);\n        let path_valid = proof.proof.validate(pub_inputs.challenge);\n\n        Ok(data_valid && path_valid)\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/proof.rs",
    "content": "use std::time::Instant;\n\nuse log::info;\nuse serde::{de::DeserializeOwned, Serialize};\n\nuse crate::error::Result;\n\n/// The ProofScheme trait provides the methods that any proof scheme needs to implement.\npub trait ProofScheme<'a> {\n    type PublicParams: Clone;\n    type SetupParams: Clone;\n    type PublicInputs: Clone + Serialize + DeserializeOwned;\n    type PrivateInputs;\n    type Proof: Clone + Serialize + DeserializeOwned;\n    type Requirements: Default;\n\n    /// setup is used to generate public parameters from setup parameters in order to specialize\n    /// a ProofScheme to the specific parameters required by a consumer.\n    fn setup(_: &Self::SetupParams) -> Result<Self::PublicParams>;\n\n    /// prove generates and returns a proof from public parameters, public inputs, and private inputs.\n    fn prove(\n        _: &Self::PublicParams,\n        _: &Self::PublicInputs,\n        _: &Self::PrivateInputs,\n    ) -> Result<Self::Proof>;\n\n    fn prove_all_partitions(\n        pub_params: &Self::PublicParams,\n        pub_in: &Self::PublicInputs,\n        priv_in: &Self::PrivateInputs,\n        partition_count: usize,\n    ) -> Result<Vec<Self::Proof>> {\n        info!(\"groth_proof_count: {}\", partition_count);\n        info!(\"generating {} groth proofs.\", partition_count);\n\n        let start = Instant::now();\n\n        let result = (0..partition_count)\n            .map(|k| {\n                info!(\"generating groth proof {}.\", k);\n                let start = Instant::now();\n\n                let partition_pub_in = Self::with_partition((*pub_in).clone(), Some(k));\n                let proof = Self::prove(pub_params, &partition_pub_in, priv_in);\n\n                let proof_time = start.elapsed();\n                info!(\"groth_proof_time: {:?}\", proof_time);\n\n                proof\n            })\n            .collect::<Result<Vec<Self::Proof>>>();\n\n        let total_proof_time = start.elapsed();\n        info!(\"total_groth_proof_time: {:?}\", total_proof_time);\n\n        result\n    }\n\n    /// verify returns true if the supplied proof is valid for the given public parameter and public inputs.\n    /// Note that verify does not have access to private inputs.\n    /// Remember that proof is untrusted, and any data it provides MUST be validated as corresponding\n    /// to the supplied public parameters and inputs.\n    fn verify(\n        _pub_params: &Self::PublicParams,\n        _pub_inputs: &Self::PublicInputs,\n        _proof: &Self::Proof,\n    ) -> Result<bool> {\n        unimplemented!();\n    }\n\n    fn verify_all_partitions(\n        pub_params: &Self::PublicParams,\n        pub_in: &Self::PublicInputs,\n        proofs: &[Self::Proof],\n    ) -> Result<bool> {\n        for (k, proof) in proofs.iter().enumerate() {\n            let partition_pub_in = Self::with_partition((*pub_in).clone(), Some(k)); //\n\n            if !Self::verify(pub_params, &partition_pub_in, proof)? {\n                return Ok(false);\n            }\n        }\n\n        Ok(true)\n    }\n\n    // This method must be specialized by concrete ProofScheme implementations which use partitions.\n    fn with_partition(pub_in: Self::PublicInputs, _k: Option<usize>) -> Self::PublicInputs {\n        pub_in\n    }\n\n    fn satisfies_requirements(\n        _pub_params: &Self::PublicParams,\n        _requirements: &Self::Requirements,\n        _partitions: usize,\n    ) -> bool {\n        true\n    }\n}\n\n#[derive(Default)]\npub struct NoRequirements;\n"
  },
  {
    "path": "storage-proofs-core/src/sector.rs",
    "content": "use std::collections::BTreeSet;\nuse std::fmt::{self, Display, Formatter};\n\nuse bellperson::bls::{Fr, FrRepr};\nuse byteorder::{ByteOrder, LittleEndian};\nuse ff::PrimeField;\nuse serde::{Deserialize, Serialize};\n\n/// An ordered set of `SectorId`s.\npub type OrderedSectorSet = BTreeSet<SectorId>;\n\n/// Identifier for a single sector.\n#[derive(\n    Default, Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Serialize, Deserialize,\n)]\npub struct SectorId(u64);\n\nimpl From<u64> for SectorId {\n    fn from(n: u64) -> Self {\n        SectorId(n)\n    }\n}\n\nimpl From<SectorId> for u64 {\n    fn from(n: SectorId) -> Self {\n        n.0\n    }\n}\n\nimpl From<SectorId> for Fr {\n    fn from(n: SectorId) -> Self {\n        Fr::from_repr(FrRepr::from(n.0)).expect(\"from repr failure\")\n    }\n}\n\nimpl Display for SectorId {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        write!(f, \"SectorId({})\", self.0)\n    }\n}\n\nimpl SectorId {\n    pub fn as_fr_safe(self) -> [u8; 32] {\n        let mut buf: [u8; 32] = [0; 32];\n        LittleEndian::write_u64(&mut buf[0..8], self.0);\n        buf\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/settings.rs",
    "content": "use std::env;\n\nuse config::{Config, ConfigError, Environment, File};\nuse lazy_static::lazy_static;\nuse serde::{Deserialize, Serialize};\n\nlazy_static! {\n    pub static ref SETTINGS: Settings = Settings::new().expect(\"invalid configuration\");\n}\n\nconst SETTINGS_PATH: &str = \"./rust-fil-proofs.config.toml\";\nconst PREFIX: &str = \"FIL_PROOFS\";\n\n#[derive(Debug, Serialize, Deserialize)]\n#[serde(default)]\npub struct Settings {\n    pub verify_cache: bool,\n    pub verify_production_params: bool,\n    pub use_gpu_column_builder: bool,\n    pub max_gpu_column_batch_size: u32,\n    pub column_write_batch_size: u32,\n    pub use_gpu_tree_builder: bool,\n    pub max_gpu_tree_batch_size: u32,\n    pub rows_to_discard: u32,\n    pub sdr_parents_cache_size: u32,\n    pub window_post_synthesis_num_cpus: u32,\n    pub parameter_cache: String,\n    pub parent_cache: String,\n    pub use_multicore_sdr: bool,\n    pub multicore_sdr_producers: usize,\n    pub multicore_sdr_producer_stride: u64,\n    pub multicore_sdr_lookahead: usize,\n}\n\nimpl Default for Settings {\n    fn default() -> Self {\n        Settings {\n            verify_cache: false,\n            verify_production_params: false,\n            use_gpu_column_builder: false,\n            max_gpu_column_batch_size: 400_000,\n            column_write_batch_size: 262_144,\n            use_gpu_tree_builder: false,\n            max_gpu_tree_batch_size: 700_000,\n            rows_to_discard: 2,\n            sdr_parents_cache_size: 2_048,\n            window_post_synthesis_num_cpus: num_cpus::get() as u32,\n            // `parameter_cache` does not use the cache() mechanism because it is now used\n            // for durable, canonical Groth parameters and verifying keys.\n            // The name is retained for backwards compatibility.\n            parameter_cache: \"/var/tmp/filecoin-proof-parameters/\".to_string(),\n            parent_cache: cache(\"filecoin-parents\"),\n            use_multicore_sdr: false,\n            multicore_sdr_producers: 3,\n            multicore_sdr_producer_stride: 128,\n            multicore_sdr_lookahead: 800,\n        }\n    }\n}\n\n/// All cache files and directories paths should be constructed using this function,\n/// which its base directory from the FIL_PROOFS_CACHE_DIR env var, and defaults to /var/tmp.\n/// Note that FIL_PROOFS_CACHE_DIR is not a first class setting and can only be set by env var.\nfn cache(s: &str) -> String {\n    let cache_var = format!(\"{}_CACHE_DIR\", PREFIX);\n    let mut cache_name = env::var(cache_var).unwrap_or_else(|_| \"/var/tmp/\".to_string());\n    cache_name.push_str(s);\n    cache_name\n}\n\nimpl Settings {\n    fn new() -> Result<Settings, ConfigError> {\n        let mut s = Config::new();\n\n        s.merge(File::with_name(SETTINGS_PATH).required(false))?;\n        s.merge(Environment::with_prefix(PREFIX))?;\n\n        s.try_into()\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/test_helper.rs",
    "content": "use std::fs::OpenOptions;\nuse std::io::Write;\nuse std::path::Path;\n\nuse memmap::{MmapMut, MmapOptions};\n\npub fn setup_replica(data: &[u8], replica_path: &Path) -> MmapMut {\n    let mut f = OpenOptions::new()\n        .read(true)\n        .write(true)\n        .create(true)\n        .open(replica_path)\n        .expect(\"Failed to create replica\");\n    f.write_all(data).expect(\"Failed to write data to replica\");\n\n    unsafe {\n        MmapOptions::new()\n            .map_mut(&f)\n            .expect(\"Failed to back memory map with tempfile\")\n    }\n}\n\n#[macro_export]\nmacro_rules! table_tests {\n    ($property_test_func:ident {\n        $( $(#[$attr:meta])* $test_name:ident( $( $param:expr ),* ); )+\n    }) => {\n        $(\n            $(#[$attr])*\n                #[test]\n            fn $test_name() {\n                $property_test_func($( $param ),* )\n            }\n        )+\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/src/util.rs",
    "content": "use std::cmp::min;\n\nuse anyhow::ensure;\nuse bellperson::{\n    bls::Engine,\n    gadgets::boolean::{AllocatedBit, Boolean},\n    ConstraintSystem, SynthesisError,\n};\nuse merkletree::merkle::get_merkle_tree_row_count;\n\nuse crate::{error::Error, settings::SETTINGS};\n\npub const NODE_SIZE: usize = 32;\n\n/// Returns the start position of the data, 0-indexed.\npub fn data_at_node_offset(v: usize) -> usize {\n    v * NODE_SIZE\n}\n\n/// Returns the byte slice representing one node (of uniform size, NODE_SIZE) at position v in data.\npub fn data_at_node(data: &[u8], v: usize) -> anyhow::Result<&[u8]> {\n    let offset = data_at_node_offset(v);\n\n    ensure!(\n        offset + NODE_SIZE <= data.len(),\n        Error::OutOfBounds(offset + NODE_SIZE, data.len())\n    );\n\n    Ok(&data[offset..offset + NODE_SIZE])\n}\n\n/// Converts bytes into their bit representation, in little endian format.\npub fn bytes_into_bits(bytes: &[u8]) -> Vec<bool> {\n    bytes\n        .iter()\n        .flat_map(|&byte| (0..8).map(move |i| (byte >> i) & 1u8 == 1u8))\n        .collect()\n}\n\n/// Converts bytes into their bit representation, in little endian format.\npub fn bytes_into_bits_opt(bytes: &[u8]) -> Vec<Option<bool>> {\n    bytes\n        .iter()\n        .flat_map(|&byte| (0..8).map(move |i| Some((byte >> i) & 1u8 == 1u8)))\n        .collect()\n}\n\n/// Converts bytes into their bit representation, in big endian format.\npub fn bytes_into_bits_be(bytes: &[u8]) -> Vec<bool> {\n    bytes\n        .iter()\n        .flat_map(|&byte| (0..8).rev().map(move |i| (byte >> i) & 1u8 == 1u8))\n        .collect()\n}\n\n/// Converts the bytes into a boolean vector, in little endian format.\npub fn bytes_into_boolean_vec<E: Engine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    value: Option<&[u8]>,\n    size: usize,\n) -> Result<Vec<Boolean>, SynthesisError> {\n    let values = match value {\n        Some(value) => bytes_into_bits(value).into_iter().map(Some).collect(),\n        None => vec![None; size],\n    };\n\n    let bits = values\n        .into_iter()\n        .enumerate()\n        .map(|(i, b)| {\n            Ok(Boolean::from(AllocatedBit::alloc(\n                cs.namespace(|| format!(\"bit {}\", i)),\n                b,\n            )?))\n        })\n        .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n    Ok(bits)\n}\n\n/// Converts the bytes into a boolean vector, in big endian format.\npub fn bytes_into_boolean_vec_be<E: Engine, CS: ConstraintSystem<E>>(\n    mut cs: CS,\n    value: Option<&[u8]>,\n    size: usize,\n) -> Result<Vec<Boolean>, SynthesisError> {\n    let values = match value {\n        Some(value) => bytes_into_bits_be(value).into_iter().map(Some).collect(),\n        None => vec![None; size],\n    };\n\n    let bits = values\n        .into_iter()\n        .enumerate()\n        .map(|(i, b)| {\n            Ok(Boolean::from(AllocatedBit::alloc(\n                cs.namespace(|| format!(\"bit {}\", i)),\n                b,\n            )?))\n        })\n        .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n    Ok(bits)\n}\n\n#[allow(dead_code)]\n#[inline]\nfn bool_to_u8(bit: bool, offset: usize) -> u8 {\n    if bit {\n        1u8 << offset\n    } else {\n        0u8\n    }\n}\n\n/// Converts a slice of bools into their byte representation, in little endian.\n#[allow(dead_code)]\npub fn bits_to_bytes(bits: &[bool]) -> Vec<u8> {\n    bits.chunks(8)\n        .map(|bits| {\n            bool_to_u8(bits[7], 7)\n                | bool_to_u8(bits[6], 6)\n                | bool_to_u8(bits[5], 5)\n                | bool_to_u8(bits[4], 4)\n                | bool_to_u8(bits[3], 3)\n                | bool_to_u8(bits[2], 2)\n                | bool_to_u8(bits[1], 1)\n                | bool_to_u8(bits[0], 0)\n        })\n        .collect()\n}\n\n/// Reverse the order of bits within each byte (bit numbering), but without altering the order of bytes\n/// within the array (endianness) — when bit array is viewed as a flattened sequence of octets.\n/// Before intra-byte bit reversal begins, zero-bit padding is added so every byte is full.\npub fn reverse_bit_numbering(bits: Vec<Boolean>) -> Vec<Boolean> {\n    let mut padded_bits = bits;\n    // Pad partial bytes\n    while padded_bits.len() % 8 != 0 {\n        padded_bits.push(Boolean::Constant(false));\n    }\n\n    padded_bits\n        .chunks(8)\n        .map(|chunk| chunk.iter().rev())\n        .flatten()\n        .cloned()\n        .collect()\n}\n\n// If the tree is large enough to use the default value (per-arity), use it.  If it's too small to cache anything (i.e. not enough rows), don't discard any.\npub fn default_rows_to_discard(leafs: usize, arity: usize) -> usize {\n    let row_count = get_merkle_tree_row_count(leafs, arity);\n    if row_count <= 2 {\n        // If a tree only has a root row and/or base, there is\n        // nothing to discard.\n        return 0;\n    } else if row_count == 3 {\n        // If a tree only has 1 row between the base and root,\n        // it's all that can be discarded.\n        return 1;\n    }\n\n    // row_count - 2 discounts the base layer (1) and root (1)\n    let max_rows_to_discard = row_count - 2;\n\n    // This configurable setting is for a default oct-tree\n    // rows_to_discard value, which defaults to 2.\n    let rows_to_discard = SETTINGS.rows_to_discard as usize;\n\n    // Discard at most 'constant value' rows (coded below,\n    // differing by arity) while respecting the max number that\n    // the tree can support discarding.\n    match arity {\n        2 => min(max_rows_to_discard, 7),\n        4 => min(max_rows_to_discard, 5),\n        _ => min(max_rows_to_discard, rows_to_discard),\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use bellperson::{\n        bls::{Bls12, Fr},\n        gadgets::num::AllocatedNum,\n        util_cs::test_cs::TestConstraintSystem,\n    };\n    use ff::Field;\n    use filecoin_hashers::{sha256::Sha256Function, HashFunction};\n    use fr32::fr_into_bytes;\n    use merkletree::hash::Algorithm;\n    use rand::{Rng, SeedableRng};\n    use rand_xorshift::XorShiftRng;\n\n    use crate::TEST_SEED;\n\n    #[test]\n    fn test_bytes_into_boolean_vec() {\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n        for i in 0..100 {\n            let data: Vec<u8> = (0..i + 10).map(|_| rng.gen()).collect();\n            let bools = {\n                let mut cs = cs.namespace(|| format!(\"round: {}\", i));\n                bytes_into_boolean_vec(&mut cs, Some(data.as_slice()), 8)\n                    .expect(\"bytes into boolean vec failure\")\n            };\n\n            let bytes_actual: Vec<u8> = bits_to_bytes(\n                bools\n                    .iter()\n                    .map(|b| b.get_value().expect(\"get_value failure\"))\n                    .collect::<Vec<bool>>()\n                    .as_slice(),\n            );\n\n            assert_eq!(data, bytes_actual);\n        }\n    }\n\n    #[test]\n    fn test_bool_to_u8() {\n        assert_eq!(bool_to_u8(false, 2), 0b0000_0000);\n        assert_eq!(bool_to_u8(true, 0), 0b0000_0001);\n        assert_eq!(bool_to_u8(true, 1), 0b0000_0010);\n        assert_eq!(bool_to_u8(true, 7), 0b1000_0000);\n    }\n\n    #[test]\n    fn test_bits_into_bytes() {\n        assert_eq!(\n            bits_to_bytes(&[true, false, false, false, false, false, false, false]),\n            vec![1]\n        );\n        assert_eq!(\n            bits_to_bytes(&[true, true, true, true, true, true, true, true]),\n            vec![255]\n        );\n    }\n\n    #[test]\n    fn test_bytes_into_bits() {\n        assert_eq!(\n            bytes_into_bits(&[1u8]),\n            vec![true, false, false, false, false, false, false, false]\n        );\n\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n        for i in 10..100 {\n            let bytes: Vec<u8> = (0..i).map(|_| rng.gen()).collect();\n\n            let bits = bytes_into_bits(bytes.as_slice());\n            assert_eq!(bits_to_bytes(bits.as_slice()), bytes);\n        }\n    }\n\n    #[test]\n    fn test_reverse_bit_numbering() {\n        for _ in 0..100 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n            let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n            let val_fr = Fr::random(rng);\n            let val_vec = fr_into_bytes(&val_fr);\n\n            let val_num = AllocatedNum::alloc(cs.namespace(|| \"val_num\"), || Ok(val_fr))\n                .expect(\"alloc failure\");\n            let val_num_bits = val_num\n                .to_bits_le(cs.namespace(|| \"val_bits\"))\n                .expect(\"to_bits_le failure\");\n\n            let bits =\n                bytes_into_boolean_vec_be(cs.namespace(|| \"val_bits_2\"), Some(&val_vec), 256)\n                    .expect(\"bytes_into_boolean_vec_be failure\");\n\n            let val_num_reversed_bit_numbering = reverse_bit_numbering(val_num_bits);\n\n            let a_values: Vec<bool> = val_num_reversed_bit_numbering\n                .iter()\n                .map(|v| v.get_value().expect(\"get_value failure\"))\n                .collect();\n\n            let b_values: Vec<bool> = bits\n                .iter()\n                .map(|v| v.get_value().expect(\"get_value failure\"))\n                .collect();\n            assert_eq!(&a_values[..], &b_values[..]);\n        }\n    }\n\n    #[test]\n    fn hash_leaf_bits_circuit() {\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n        let left_fr = Fr::random(rng);\n        let right_fr = Fr::random(rng);\n        let left: Vec<u8> = fr_into_bytes(&left_fr);\n        let right: Vec<u8> = fr_into_bytes(&right_fr);\n        let height = 1;\n\n        let left_bits: Vec<Boolean> = {\n            let mut cs = cs.namespace(|| \"left\");\n            bytes_into_boolean_vec(&mut cs, Some(left.as_slice()), 256).expect(\"left bits failure\")\n        };\n\n        let right_bits: Vec<Boolean> = {\n            let mut cs = cs.namespace(|| \"right\");\n            bytes_into_boolean_vec(&mut cs, Some(right.as_slice()), 256)\n                .expect(\"right bits failure\")\n        };\n\n        let out = Sha256Function::hash_leaf_bits_circuit(\n            cs.namespace(|| \"hash_leaf_circuit\"),\n            &left_bits,\n            &right_bits,\n            height,\n        )\n        .expect(\"key derivation function failed\");\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n        assert_eq!(cs.num_constraints(), 45_387);\n\n        let expected: Fr = Sha256Function::default()\n            .node(left_fr.into(), right_fr.into(), height)\n            .into();\n\n        assert_eq!(\n            expected,\n            out.get_value().expect(\"get_value failure\"),\n            \"circuit and non circuit do not match\"\n        );\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/tests/por_circuit.rs",
    "content": "use bellperson::{\n    bls::{Bls12, Fr},\n    gadgets::{boolean::AllocatedBit, multipack, num::AllocatedNum},\n    util_cs::test_cs::TestConstraintSystem,\n    Circuit, ConstraintSystem,\n};\nuse ff::Field;\nuse filecoin_hashers::{\n    blake2s::Blake2sHasher, poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain, Hasher,\n};\nuse fr32::{bytes_into_fr, fr_into_bytes};\nuse generic_array::typenum::{Unsigned, U0, U2, U4, U8};\nuse merkletree::store::VecStore;\nuse pretty_assertions::assert_eq;\nuse rand::{Rng, SeedableRng};\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    compound_proof::CompoundProof,\n    gadgets::por::{\n        challenge_into_auth_path_bits, por_no_challenge_input, PoRCircuit, PoRCompound,\n    },\n    merkle::{\n        create_base_merkle_tree, generate_tree, get_base_tree_count, MerkleProofTrait,\n        MerkleTreeTrait, MerkleTreeWrapper, ResTree,\n    },\n    por::{self, PoR},\n    proof::ProofScheme,\n    util::data_at_node,\n    TEST_SEED,\n};\n\ntype TreeBase<H, A> = MerkleTreeWrapper<H, VecStore<<H as Hasher>::Domain>, A, U0, U0>;\ntype TreeSub<H, A, B> = MerkleTreeWrapper<H, VecStore<<H as Hasher>::Domain>, A, B, U0>;\ntype TreeTop<H, A, B, C> = MerkleTreeWrapper<H, VecStore<<H as Hasher>::Domain>, A, B, C>;\n\n#[test]\nfn test_por_circuit_blake2s_base_2() {\n    test_por_circuit::<TreeBase<Blake2sHasher, U2>>(3, 129_135);\n}\n\n#[test]\nfn test_por_circuit_sha256_base_2() {\n    test_por_circuit::<TreeBase<Sha256Hasher, U2>>(3, 272_295);\n}\n\n#[test]\nfn test_por_circuit_poseidon_base_2() {\n    test_por_circuit::<TreeBase<PoseidonHasher, U2>>(3, 1_887);\n}\n\n#[test]\nfn test_por_circuit_blake2s_base_4() {\n    test_por_circuit::<TreeBase<Blake2sHasher, U4>>(3, 130_296);\n}\n\n#[test]\nfn test_por_circuit_sha256_base_4() {\n    test_por_circuit::<TreeBase<Sha256Hasher, U4>>(3, 216_258);\n}\n\n#[test]\nfn test_por_circuit_poseidon_base_4() {\n    test_por_circuit::<TreeBase<PoseidonHasher, U4>>(3, 1_164);\n}\n\n#[test]\nfn test_por_circuit_blake2s_base_8() {\n    test_por_circuit::<TreeBase<Blake2sHasher, U8>>(3, 174_503);\n}\n\n#[test]\nfn test_por_circuit_sha256_base_8() {\n    test_por_circuit::<TreeBase<Sha256Hasher, U8>>(3, 250_987);\n}\n\n#[test]\nfn test_por_circuit_poseidon_base_8() {\n    test_por_circuit::<TreeBase<PoseidonHasher, U8>>(3, 1_063);\n}\n\n#[test]\nfn test_por_circuit_poseidon_sub_8_2() {\n    test_por_circuit::<TreeSub<PoseidonHasher, U8, U2>>(3, 1_377);\n}\n\n#[test]\nfn test_por_circuit_poseidon_top_8_4_2() {\n    test_por_circuit::<TreeTop<PoseidonHasher, U8, U4, U2>>(3, 1_764);\n}\n\n#[test]\nfn test_por_circuit_poseidon_sub_8_8() {\n    // This is the shape we want for 32GiB sectors.\n    test_por_circuit::<TreeSub<PoseidonHasher, U8, U8>>(3, 1_593);\n}\n#[test]\nfn test_por_circuit_poseidon_top_8_8_2() {\n    // This is the shape we want for 64GiB secotrs.\n    test_por_circuit::<TreeTop<PoseidonHasher, U8, U8, U2>>(3, 1_907);\n}\n\n#[test]\nfn test_por_circuit_poseidon_top_8_2_4() {\n    // We can handle top-heavy trees with a non-zero subtree arity.\n    // These should never be produced, though.\n    test_por_circuit::<TreeTop<PoseidonHasher, U8, U2, U4>>(3, 1_764);\n}\n\nfn test_por_circuit<Tree: 'static + MerkleTreeTrait>(num_inputs: usize, num_constraints: usize) {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    // Ensure arity will evenly fill tree.\n    let leaves = 64 * get_base_tree_count::<Tree>();\n\n    // -- Basic Setup\n    let (data, tree) = generate_tree::<Tree, _>(rng, leaves, None);\n\n    for i in 0..leaves {\n        //println!(\"challenge: {}, ({})\", i, leaves);\n\n        // -- PoR\n        let pub_params = por::PublicParams {\n            leaves,\n            private: false,\n        };\n        let pub_inputs = por::PublicInputs::<<Tree::Hasher as Hasher>::Domain> {\n            challenge: i,\n            commitment: Some(tree.root()),\n        };\n        let leaf =\n            data_at_node(data.as_slice(), pub_inputs.challenge).expect(\"data_at_node failure\");\n        let leaf_element =\n            <Tree::Hasher as Hasher>::Domain::try_from_bytes(leaf).expect(\"try_from_bytes failure\");\n        let priv_inputs = por::PrivateInputs::<ResTree<Tree>>::new(leaf_element, &tree);\n        let p = tree.gen_proof(i).expect(\"gen_proof failure\");\n        assert!(p.verify());\n\n        // create a non circuit proof\n        let proof = PoR::<ResTree<Tree>>::prove(&pub_params, &pub_inputs, &priv_inputs)\n            .expect(\"proving failed\");\n\n        // make sure it verifies\n        let is_valid = PoR::<ResTree<Tree>>::verify(&pub_params, &pub_inputs, &proof)\n            .expect(\"verification failed\");\n        assert!(is_valid, \"failed to verify por proof\");\n\n        // -- Circuit\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n\n        // Root is public input.\n        let por = PoRCircuit::<ResTree<Tree>>::new(proof.proof, false);\n        por.synthesize(&mut cs).expect(\"circuit synthesis failed\");\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n        assert_eq!(cs.num_inputs(), num_inputs, \"wrong number of inputs\");\n        assert_eq!(\n            cs.num_constraints(),\n            num_constraints,\n            \"wrong number of constraints\"\n        );\n\n        let generated_inputs =\n            PoRCompound::<ResTree<Tree>>::generate_public_inputs(&pub_inputs, &pub_params, None)\n                .expect(\"generate_public_inputs failure\");\n\n        let expected_inputs = cs.get_inputs();\n\n        for ((input, label), generated_input) in\n            expected_inputs.iter().skip(1).zip(generated_inputs.iter())\n        {\n            assert_eq!(input, generated_input, \"{}\", label);\n        }\n\n        assert_eq!(\n            generated_inputs.len(),\n            expected_inputs.len() - 1,\n            \"inputs are not the same length\"\n        );\n\n        assert!(cs.verify(&generated_inputs), \"failed to verify inputs\");\n    }\n}\n\n#[test]\nfn test_por_circuit_poseidon_base_2_private_root() {\n    test_por_circuit_private_root::<TreeBase<PoseidonHasher, U2>>(1_886);\n}\n\n#[test]\nfn test_por_circuit_poseidon_base_4_private_root() {\n    test_por_circuit_private_root::<TreeBase<PoseidonHasher, U4>>(1_163);\n}\n\n#[test]\nfn test_por_circuit_poseidon_base_8_private_root() {\n    test_por_circuit_private_root::<TreeBase<PoseidonHasher, U8>>(1_062);\n}\n\nfn test_por_circuit_private_root<Tree: MerkleTreeTrait>(num_constraints: usize) {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 64 * get_base_tree_count::<Tree>();\n    for i in 0..leaves {\n        // -- Basic Setup\n\n        let data: Vec<u8> = (0..leaves)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n\n        let tree = create_base_merkle_tree::<Tree>(None, leaves, data.as_slice())\n            .expect(\"create_base_merkle_tree failure\");\n\n        // -- PoR\n\n        let pub_params = por::PublicParams {\n            leaves,\n            private: true,\n        };\n        let pub_inputs = por::PublicInputs {\n            challenge: i,\n            commitment: None,\n        };\n\n        let priv_inputs = por::PrivateInputs::<Tree>::new(\n            bytes_into_fr(\n                data_at_node(data.as_slice(), pub_inputs.challenge).expect(\"data_at_node failure\"),\n            )\n            .expect(\"bytes_into_fr failure\")\n            .into(),\n            &tree,\n        );\n\n        // create a non circuit proof\n        let proof =\n            PoR::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs).expect(\"proving failed\");\n\n        // make sure it verifies\n        let is_valid =\n            PoR::<Tree>::verify(&pub_params, &pub_inputs, &proof).expect(\"verification failed\");\n        assert!(is_valid, \"failed to verify por proof\");\n\n        // -- Circuit\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n\n        // Root is private input.\n        let por = PoRCircuit::<Tree>::new(proof.proof, true);\n        por.synthesize(&mut cs).expect(\"circuit synthesis failed\");\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n        assert_eq!(cs.num_inputs(), 2, \"wrong number of inputs\");\n        assert_eq!(\n            cs.num_constraints(),\n            num_constraints,\n            \"wrong number of constraints\"\n        );\n\n        let auth_path_bits = challenge_into_auth_path_bits(pub_inputs.challenge, pub_params.leaves);\n        let packed_auth_path = multipack::compute_multipacking::<Bls12>(&auth_path_bits);\n\n        let mut expected_inputs = Vec::new();\n        expected_inputs.extend(packed_auth_path);\n\n        assert_eq!(cs.get_input(0, \"ONE\"), Fr::one(), \"wrong input 0\");\n\n        assert_eq!(\n            cs.get_input(1, \"path/input 0\"),\n            expected_inputs[0],\n            \"wrong packed_auth_path\"\n        );\n\n        assert!(cs.is_satisfied(), \"constraints are not all satisfied\");\n        assert!(cs.verify(&expected_inputs), \"failed to verify inputs\");\n    }\n}\n\n#[test]\nfn test_por_no_challenge_input() {\n    type Arity = U8;\n    type Tree = TreeBase<PoseidonHasher, Arity>;\n\n    // == Setup\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let height = 3;\n    let n_leaves = Arity::to_usize() << height;\n\n    let data: Vec<u8> = (0..n_leaves)\n        .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n        .collect();\n\n    let tree = create_base_merkle_tree::<Tree>(None, n_leaves, &data)\n        .expect(\"create_base_merkle_tree failure\");\n    let root = tree.root();\n\n    let challenge = rng.gen::<usize>() % n_leaves;\n    let leaf_bytes = data_at_node(&data, challenge).expect(\"data_at_node failure\");\n    let leaf = bytes_into_fr(leaf_bytes).expect(\"bytes_into_fr failure\");\n\n    // == Vanilla PoR proof\n    let proof = {\n        use por::{PoR, PrivateInputs, PublicInputs, PublicParams};\n        let pub_params = PublicParams {\n            leaves: n_leaves,\n            private: false,\n        };\n        let pub_inputs = PublicInputs {\n            challenge,\n            commitment: None,\n        };\n        let priv_inputs = PrivateInputs {\n            leaf: leaf.into(),\n            tree: &tree,\n        };\n        let proof =\n            PoR::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs).expect(\"proving failed\");\n        let is_valid =\n            PoR::<Tree>::verify(&pub_params, &pub_inputs, &proof).expect(\"verification failed\");\n        assert!(is_valid, \"failed to verify por proof\");\n        proof.proof\n    };\n\n    // == Test PoR gadget\n    let mut cs = TestConstraintSystem::<Bls12>::new();\n\n    let challenge_bit_len = n_leaves.trailing_zeros() as usize;\n    let challenge: Vec<AllocatedBit> = (0..challenge_bit_len)\n        .map(|i| {\n            AllocatedBit::alloc(\n                cs.namespace(|| format!(\"challenge bit {}\", i)),\n                Some((challenge >> i) & 1 == 1),\n            )\n            .expect(\"failed to allocate challenge bit\")\n        })\n        .collect();\n\n    let leaf = AllocatedNum::alloc(cs.namespace(|| \"leaf\".to_string()), || Ok(leaf))\n        .expect(\"failed to allocate leaf\");\n\n    let path_values: Vec<Vec<AllocatedNum<Bls12>>> = proof\n        .path()\n        .iter()\n        .enumerate()\n        .map(|(height, (siblings, _insert_index))| {\n            siblings\n                .iter()\n                .enumerate()\n                .map(|(sib_index, &sib)| {\n                    AllocatedNum::alloc(\n                        cs.namespace(|| format!(\"sib {}, height {}\", sib_index, height)),\n                        || Ok(sib.into()),\n                    )\n                    .expect(\"failed to allocate sibling\")\n                })\n                .collect()\n        })\n        .collect();\n\n    let root = AllocatedNum::alloc(cs.namespace(|| \"root\".to_string()), || Ok(root.into()))\n        .expect(\"failed to allocate root\");\n\n    por_no_challenge_input::<Tree, _>(&mut cs, challenge, leaf, path_values, root)\n        .expect(\"por gadget failed\");\n\n    assert!(cs.is_satisfied());\n    let public_inputs = vec![];\n    assert!(cs.verify(&public_inputs));\n}\n"
  },
  {
    "path": "storage-proofs-core/tests/por_compound.rs",
    "content": "use bellperson::{\n    bls::Fr,\n    util_cs::{metric_cs::MetricCS, test_cs::TestConstraintSystem},\n    Circuit,\n};\nuse ff::Field;\nuse filecoin_hashers::{poseidon::PoseidonHasher, Hasher};\nuse fr32::{bytes_into_fr, fr_into_bytes};\nuse generic_array::typenum::{U0, U2, U4, U8};\nuse merkletree::store::VecStore;\nuse pretty_assertions::assert_eq;\nuse rand::SeedableRng;\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    compound_proof::{self, CompoundProof},\n    gadgets::por::PoRCompound,\n    merkle::{\n        create_base_merkle_tree, generate_tree, get_base_tree_count, MerkleTreeTrait,\n        MerkleTreeWrapper, ResTree,\n    },\n    por,\n    proof::NoRequirements,\n    util::data_at_node,\n    TEST_SEED,\n};\n\ntype TreeBase<H, A> = MerkleTreeWrapper<H, VecStore<<H as Hasher>::Domain>, A, U0, U0>;\ntype TreeSub<H, A, B> = MerkleTreeWrapper<H, VecStore<<H as Hasher>::Domain>, A, B, U0>;\ntype TreeTop<H, A, B, C> = MerkleTreeWrapper<H, VecStore<<H as Hasher>::Domain>, A, B, C>;\n\n#[test]\n#[ignore]\nfn test_por_compound_poseidon_base_8() {\n    por_compound::<TreeBase<PoseidonHasher, U8>>();\n}\n\nfn por_compound<Tree: 'static + MerkleTreeTrait>() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 64 * get_base_tree_count::<Tree>();\n\n    let data: Vec<u8> = (0..leaves)\n        .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n        .collect();\n    let tree = create_base_merkle_tree::<Tree>(None, leaves, data.as_slice())\n        .expect(\"create_base_merkle_tree failure\");\n\n    let public_inputs = por::PublicInputs {\n        challenge: 2,\n        commitment: Some(tree.root()),\n    };\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params: por::SetupParams {\n            leaves,\n            private: false,\n        },\n        partitions: None,\n        priority: false,\n    };\n    let public_params = PoRCompound::<Tree>::setup(&setup_params).expect(\"setup failed\");\n\n    let private_inputs = por::PrivateInputs::<Tree>::new(\n        bytes_into_fr(\n            data_at_node(data.as_slice(), public_inputs.challenge).expect(\"bytes_into_fr failure\"),\n        )\n        .expect(\"failed to create Fr from node data\")\n        .into(),\n        &tree,\n    );\n\n    let gparams = PoRCompound::<Tree>::groth_params(Some(rng), &public_params.vanilla_params)\n        .expect(\"failed to generate groth params\");\n\n    let proof =\n        PoRCompound::<Tree>::prove(&public_params, &public_inputs, &private_inputs, &gparams)\n            .expect(\"failed while proving\");\n\n    let verified =\n        PoRCompound::<Tree>::verify(&public_params, &public_inputs, &proof, &NoRequirements)\n            .expect(\"failed while verifying\");\n    assert!(verified);\n\n    let (circuit, inputs) =\n        PoRCompound::<Tree>::circuit_for_test(&public_params, &public_inputs, &private_inputs)\n            .expect(\"circuit_for_test failure\");\n\n    let mut cs = TestConstraintSystem::new();\n\n    circuit.synthesize(&mut cs).expect(\"failed to synthesize\");\n    assert!(cs.is_satisfied());\n    assert!(cs.verify(&inputs));\n}\n\n#[ignore]\n#[test]\nfn test_por_compound_poseidon_base_2_private_root() {\n    por_compound_private_root::<TreeBase<PoseidonHasher, U2>>();\n}\n\n#[ignore]\n#[test]\nfn test_por_compound_poseidon_base_4_private_root() {\n    por_compound_private_root::<TreeBase<PoseidonHasher, U4>>();\n}\n\n#[ignore]\n#[test]\nfn test_por_compound_poseidon_sub_8_2_private_root() {\n    por_compound_private_root::<TreeSub<PoseidonHasher, U8, U2>>();\n}\n\n#[ignore]\n#[test]\nfn test_por_compound_poseidon_top_8_4_2_private_root() {\n    por_compound_private_root::<TreeTop<PoseidonHasher, U8, U4, U2>>();\n}\n\n#[ignore]\n#[test]\nfn test_por_compound_poseidon_sub_8_8_private_root() {\n    por_compound_private_root::<TreeSub<PoseidonHasher, U8, U8>>();\n}\n\n#[ignore]\n#[test]\nfn test_por_compound_poseidon_top_8_8_2_private_root() {\n    por_compound_private_root::<TreeTop<PoseidonHasher, U8, U8, U2>>();\n}\n\n#[ignore]\n#[test]\nfn test_por_compound_poseidon_top_8_2_4_private_root() {\n    por_compound_private_root::<TreeTop<PoseidonHasher, U8, U2, U4>>();\n}\n\nfn por_compound_private_root<Tree: 'static + MerkleTreeTrait>() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    // Ensure arity will evenly fill tree.\n    let leaves = 64 * get_base_tree_count::<Tree>();\n\n    // -- Basic Setup\n    let (data, tree) = generate_tree::<Tree, _>(rng, leaves, None);\n\n    for i in 0..3 {\n        let public_inputs = por::PublicInputs {\n            challenge: i,\n            commitment: None,\n        };\n\n        let setup_params = compound_proof::SetupParams {\n            vanilla_params: por::SetupParams {\n                leaves,\n                private: true,\n            },\n            partitions: None,\n            priority: false,\n        };\n        let public_params =\n            PoRCompound::<ResTree<Tree>>::setup(&setup_params).expect(\"setup failed\");\n\n        let private_inputs = por::PrivateInputs::<ResTree<Tree>>::new(\n            bytes_into_fr(\n                data_at_node(data.as_slice(), public_inputs.challenge)\n                    .expect(\"data_at_node failure\"),\n            )\n            .expect(\"failed to create Fr from node data\")\n            .into(),\n            &tree,\n        );\n\n        {\n            let (circuit, inputs) =\n                PoRCompound::circuit_for_test(&public_params, &public_inputs, &private_inputs)\n                    .expect(\"circuit_for_test\");\n\n            let mut cs = TestConstraintSystem::new();\n\n            circuit.synthesize(&mut cs).expect(\"failed to synthesize\");\n\n            if !cs.is_satisfied() {\n                panic!(\n                    \"failed to satisfy: {:?}\",\n                    cs.which_is_unsatisfied().expect(\"cs is_satisfied failure\")\n                );\n            }\n            assert!(\n                cs.verify(&inputs),\n                \"verification failed with TestContraintSystem and generated inputs\"\n            );\n        }\n        // NOTE: This diagnostic code currently fails, even though the proof generated from the blank circuit verifies.\n        // Use this to debug differences between blank and regular circuit generation.\n        {\n            let (circuit1, _inputs) =\n                PoRCompound::circuit_for_test(&public_params, &public_inputs, &private_inputs)\n                    .expect(\"circuit_for_test failure\");\n            let blank_circuit =\n                PoRCompound::<ResTree<Tree>>::blank_circuit(&public_params.vanilla_params);\n\n            let mut cs_blank = MetricCS::new();\n            blank_circuit\n                .synthesize(&mut cs_blank)\n                .expect(\"failed to synthesize\");\n\n            let a = cs_blank.pretty_print_list();\n\n            let mut cs1 = TestConstraintSystem::new();\n            circuit1.synthesize(&mut cs1).expect(\"failed to synthesize\");\n            let b = cs1.pretty_print_list();\n\n            for (i, (a, b)) in a.chunks(100).zip(b.chunks(100)).enumerate() {\n                assert_eq!(a, b, \"failed at chunk {}\", i);\n            }\n        }\n\n        let blank_groth_params =\n            PoRCompound::<ResTree<Tree>>::groth_params(Some(rng), &public_params.vanilla_params)\n                .expect(\"failed to generate groth params\");\n\n        let proof = PoRCompound::prove(\n            &public_params,\n            &public_inputs,\n            &private_inputs,\n            &blank_groth_params,\n        )\n        .expect(\"failed while proving\");\n\n        let verified = PoRCompound::verify(&public_params, &public_inputs, &proof, &NoRequirements)\n            .expect(\"failed while verifying\");\n\n        assert!(verified);\n    }\n}\n"
  },
  {
    "path": "storage-proofs-core/tests/por_vanilla.rs",
    "content": "use std::convert::Into;\n\nuse bellperson::bls::Fr;\nuse ff::Field;\nuse filecoin_hashers::{\n    blake2s::Blake2sHasher, poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain, Hasher,\n};\nuse fr32::fr_into_bytes;\nuse generic_array::typenum::{U0, U2, U4};\nuse rand::SeedableRng;\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    drgraph::{BucketGraph, Graph, BASE_DEGREE},\n    merkle::{create_base_merkle_tree, DiskStore, MerkleTreeTrait, MerkleTreeWrapper},\n    por::{self, PoR},\n    proof::ProofScheme,\n    util::data_at_node,\n    TEST_SEED,\n};\n\ntype TreeBase<H, U> = MerkleTreeWrapper<H, DiskStore<<H as Hasher>::Domain>, U, U0, U0>;\n\n#[test]\nfn test_por_poseidon_base_2() {\n    test_por::<TreeBase<PoseidonHasher, U2>>();\n}\n\n#[test]\nfn test_por_sha256_base_2() {\n    test_por::<TreeBase<Sha256Hasher, U2>>();\n}\n\n#[test]\nfn test_por_blake2s_base_2() {\n    test_por::<TreeBase<Blake2sHasher, U2>>();\n}\n\n#[test]\nfn test_por_poseidon_base_4() {\n    test_por::<TreeBase<PoseidonHasher, U4>>();\n}\n\n#[test]\nfn test_por_sha256_base_4() {\n    test_por::<TreeBase<Sha256Hasher, U4>>();\n}\n\n#[test]\nfn test_por_blake2s_base_4() {\n    test_por::<TreeBase<Blake2sHasher, U4>>();\n}\n\nfn test_por<Tree: MerkleTreeTrait>() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 16;\n    let pub_params = por::PublicParams {\n        leaves,\n        private: false,\n    };\n\n    let data: Vec<u8> = (0..leaves)\n        .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n        .collect();\n    let porep_id = [3; 32];\n    let graph =\n        BucketGraph::<Tree::Hasher>::new(leaves, BASE_DEGREE, 0, porep_id, ApiVersion::V1_1_0)\n            .unwrap();\n    let tree = create_base_merkle_tree::<Tree>(None, graph.size(), data.as_slice()).unwrap();\n\n    let pub_inputs = por::PublicInputs {\n        challenge: 3,\n        commitment: Some(tree.root()),\n    };\n\n    let leaf = <Tree::Hasher as Hasher>::Domain::try_from_bytes(\n        data_at_node(data.as_slice(), pub_inputs.challenge).unwrap(),\n    )\n    .unwrap();\n\n    let priv_inputs = por::PrivateInputs::new(leaf, &tree);\n\n    let proof = PoR::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs).expect(\"proving failed\");\n\n    let is_valid =\n        PoR::<Tree>::verify(&pub_params, &pub_inputs, &proof).expect(\"verification failed\");\n\n    assert!(is_valid);\n}\n\n#[test]\nfn test_por_validates_proof_sha256_base_2() {\n    test_por_validates_proof::<TreeBase<Sha256Hasher, U2>>();\n}\n\n#[test]\nfn test_por_validates_proof_blake2s_base_2() {\n    test_por_validates_proof::<TreeBase<Blake2sHasher, U2>>();\n}\n\n#[test]\nfn test_por_validates_proof_poseidon_base_2() {\n    test_por_validates_proof::<TreeBase<PoseidonHasher, U2>>();\n}\n\n#[test]\nfn test_por_validates_proof_sha256_base_4() {\n    test_por_validates_proof::<TreeBase<Sha256Hasher, U4>>();\n}\n\n#[test]\nfn test_por_validates_proof_blake2s_base_4() {\n    test_por_validates_proof::<TreeBase<Blake2sHasher, U4>>();\n}\n\n#[test]\nfn test_por_validates_proof_poseidon_base_4() {\n    test_por_validates_proof::<TreeBase<PoseidonHasher, U4>>();\n}\n\nfn test_por_validates_proof<Tree: MerkleTreeTrait>() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 64;\n    let pub_params = por::PublicParams {\n        leaves,\n        private: false,\n    };\n\n    let data: Vec<u8> = (0..leaves)\n        .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n        .collect();\n\n    let porep_id = [99; 32];\n\n    let graph =\n        BucketGraph::<Tree::Hasher>::new(leaves, BASE_DEGREE, 0, porep_id, ApiVersion::V1_1_0)\n            .unwrap();\n    let tree = create_base_merkle_tree::<Tree>(None, graph.size(), data.as_slice()).unwrap();\n\n    let pub_inputs = por::PublicInputs {\n        challenge: 3,\n        commitment: Some(tree.root()),\n    };\n\n    let leaf = <Tree::Hasher as Hasher>::Domain::try_from_bytes(\n        data_at_node(data.as_slice(), pub_inputs.challenge).unwrap(),\n    )\n    .unwrap();\n\n    let priv_inputs = por::PrivateInputs::<Tree>::new(leaf, &tree);\n\n    let good_proof =\n        PoR::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs).expect(\"proving failed\");\n\n    let verified =\n        PoR::<Tree>::verify(&pub_params, &pub_inputs, &good_proof).expect(\"verification failed\");\n    assert!(verified);\n\n    // Invalidate the proof.\n    let bad_proof = {\n        let mut proof = good_proof.clone();\n        let mut bad_leaf = Into::<Fr>::into(proof.data);\n        bad_leaf.add_assign(&Fr::one());\n        proof.data = bad_leaf.into();\n        proof\n    };\n\n    let verified =\n        PoR::<Tree>::verify(&pub_params, &pub_inputs, &bad_proof).expect(\"verification failed\");\n\n    assert!(!verified);\n}\n\n#[test]\nfn test_por_validates_challenge_sha256_base_2() {\n    test_por_validates_challenge::<TreeBase<Sha256Hasher, U2>>();\n}\n\n#[test]\nfn test_por_validates_challenge_blake2s_base_2() {\n    test_por_validates_challenge::<TreeBase<Blake2sHasher, U2>>();\n}\n\n#[test]\nfn test_por_validates_challenge_poseidon_base_2() {\n    test_por_validates_challenge::<TreeBase<PoseidonHasher, U2>>();\n}\n\n#[test]\nfn test_por_validates_challenge_sha256_base_4() {\n    test_por_validates_challenge::<TreeBase<Sha256Hasher, U4>>();\n}\n\n#[test]\nfn test_por_validates_challenge_blake2s_base_4() {\n    test_por_validates_challenge::<TreeBase<Blake2sHasher, U4>>();\n}\n\n#[test]\nfn test_por_validates_challenge_poseidon_base_4() {\n    test_por_validates_challenge::<TreeBase<PoseidonHasher, U4>>();\n}\n\nfn test_por_validates_challenge<Tree: MerkleTreeTrait>() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 64;\n\n    let pub_params = por::PublicParams {\n        leaves,\n        private: false,\n    };\n\n    let data: Vec<u8> = (0..leaves)\n        .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n        .collect();\n\n    let porep_id = [32; 32];\n    let graph =\n        BucketGraph::<Tree::Hasher>::new(leaves, BASE_DEGREE, 0, porep_id, ApiVersion::V1_1_0)\n            .unwrap();\n    let tree = create_base_merkle_tree::<Tree>(None, graph.size(), data.as_slice()).unwrap();\n\n    let pub_inputs = por::PublicInputs {\n        challenge: 3,\n        commitment: Some(tree.root()),\n    };\n\n    let leaf = <Tree::Hasher as Hasher>::Domain::try_from_bytes(\n        data_at_node(data.as_slice(), pub_inputs.challenge).unwrap(),\n    )\n    .unwrap();\n\n    let priv_inputs = por::PrivateInputs::<Tree>::new(leaf, &tree);\n\n    let proof = PoR::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs).expect(\"proving failed\");\n\n    // Invalidate the challenge.\n    let different_pub_inputs = por::PublicInputs {\n        challenge: 999,\n        commitment: Some(tree.root()),\n    };\n\n    let verified = PoR::<Tree>::verify(&pub_params, &different_pub_inputs, &proof)\n        .expect(\"verification failed\");\n\n    // A proof created with a the wrong challenge not be verified!\n    assert!(!verified);\n}\n"
  },
  {
    "path": "storage-proofs-porep/Cargo.toml",
    "content": "[package]\nname = \"storage-proofs-porep\"\nversion = \"7.0.1\"\nauthors = [\"dignifiedquire <me@dignifiedquire.com>\"]\ndescription = \"Proofs of replication\"\nlicense = \"MIT OR Apache-2.0\"\nedition = \"2018\"\nrepository = \"https://github.com/filecoin-project/rust-fil-proofs\"\nreadme = \"README.md\"\n\n[dependencies]\ncrossbeam = \"0.8\"\ndigest = \"0.9\"\nstorage-proofs-core = { path = \"../storage-proofs-core\", version = \"^7.0.0\", default-features = false}\nsha2raw = { path = \"../sha2raw\", version = \"^2.0.0\"}\nfilecoin-hashers = { path = \"../filecoin-hashers\", version = \"^2.0.0\", default-features = false, features = [\"poseidon\", \"sha256\"]}\nrand = \"0.7\"\nmerkletree = \"0.21.0\"\nmapr = \"0.8.0\"\nnum-bigint = \"0.2\"\nnum-traits = \"0.2\"\nrayon = \"1.0.0\"\nserde = { version = \"1.0\", features = [\"derive\"]}\nserde_json = \"1.0\"\nff = { version = \"0.2.3\", package = \"fff\" }\nbellperson = { version = \"0.13\", default-features = false }\nlog = \"0.4.7\"\npretty_assertions = \"0.6.1\"\ngeneric-array = \"0.14.4\"\nanyhow = \"1.0.23\"\nneptune = { version = \"2.2.0\", default-features = false }\nnum_cpus = \"1.10.1\"\nhex = \"0.4.2\"\nbincode = \"1.1.2\"\nbyteorder = \"1.3.4\"\nlazy_static = \"1.2\"\nbyte-slice-cast = \"1.0.0\"\nhwloc = \"0.3.0\"\nlibc = \"0.2\"\nfdlimit = \"0.2.0\"\nfr32 = { path = \"../fr32\", version = \"^0.2.0\", default-features = false }\n\n[target.\"cfg(target_arch = \\\"aarch64\\\")\".dependencies]\nsha2 = { version = \"0.9.3\", features = [\"compress\", \"asm\"] }\n[target.\"cfg(not(target_arch = \\\"aarch64\\\"))\".dependencies]\nsha2 = { version = \"0.9.3\", features = [\"compress\"] }\n\n[dev-dependencies]\ntempfile = \"3\"\nrand_xorshift = \"0.2.0\"\ncriterion = \"0.3.2\"\nglob = \"0.3.0\"\npretty_env_logger = \"0.4.0\"\nfilecoin-hashers = { path = \"../filecoin-hashers\", version = \"^2.0.0\", default-features = false, features = [\"poseidon\", \"sha256\", \"blake2s\"]}\n\n[features]\ndefault = [\"pairing\", \"gpu\"]\ngpu = [\"storage-proofs-core/gpu\", \"filecoin-hashers/gpu\", \"neptune/opencl\", \"bellperson/gpu\", \"fr32/gpu\"]\npairing = [\"storage-proofs-core/pairing\", \"bellperson/pairing\", \"neptune/pairing\", \"filecoin-hashers/pairing\", \"fr32/pairing\"]\nblst = [\"storage-proofs-core/blst\", \"bellperson/blst\", \"neptune/blst\", \"filecoin-hashers/blst\", \"fr32/blst\"]\nsingle-threaded = []\n\n[[bench]]\nname = \"encode\"\nharness = false\n\n[[bench]]\nname = \"parents\"\nharness = false\n"
  },
  {
    "path": "storage-proofs-porep/README.md",
    "content": "# Storage Proofs PoRep\n\n## License\n\nMIT or Apache 2.0\n"
  },
  {
    "path": "storage-proofs-porep/benches/encode.rs",
    "content": "use bellperson::bls::Fr;\nuse criterion::{black_box, criterion_group, criterion_main, Criterion, Throughput};\nuse ff::Field;\nuse filecoin_hashers::{sha256::Sha256Hasher, Domain, Hasher};\nuse fr32::fr_into_bytes;\nuse rand::thread_rng;\nuse storage_proofs_core::api_version::ApiVersion;\nuse storage_proofs_porep::stacked::{\n    create_label::single::{create_label, create_label_exp},\n    StackedBucketGraph,\n};\n\nstruct Pregenerated<H: 'static + Hasher> {\n    data: Vec<u8>,\n    replica_id: H::Domain,\n    graph: StackedBucketGraph<H>,\n}\n\nfn pregenerate_data<H: Hasher>(degree: usize) -> Pregenerated<H> {\n    assert_eq!(degree, 6 + 8);\n    let mut rng = thread_rng();\n    let size = degree * 4 * 1024 * 1024;\n    let api_version = ApiVersion::V1_0_0;\n    let data: Vec<u8> = (0..size)\n        .flat_map(|_| fr_into_bytes(&Fr::random(&mut rng)))\n        .collect();\n    let replica_id: H::Domain = H::Domain::random(&mut rng);\n\n    let graph = StackedBucketGraph::<H>::new_stacked(size, 6, 8, [32; 32], api_version).unwrap();\n\n    Pregenerated {\n        data,\n        replica_id,\n        graph,\n    }\n}\n\nfn kdf_benchmark(c: &mut Criterion) {\n    let degree = 14;\n    let Pregenerated {\n        data,\n        replica_id,\n        graph,\n    } = pregenerate_data::<Sha256Hasher>(degree);\n\n    let mut group = c.benchmark_group(\"kdf\");\n    group.sample_size(10);\n    group.throughput(Throughput::Bytes(\n        /* replica id + 37 parents + node id */ 39 * 32,\n    ));\n\n    group.bench_function(\"exp\", |b| {\n        let mut raw_data = data.clone();\n        raw_data.extend_from_slice(&data);\n        let (data, exp_data) = raw_data.split_at_mut(data.len());\n\n        let graph = &graph;\n\n        b.iter(|| {\n            black_box(create_label_exp(\n                graph,\n                None,\n                &replica_id,\n                &*exp_data,\n                data,\n                1,\n                2,\n            ))\n        })\n    });\n\n    group.bench_function(\"non-exp\", |b| {\n        let mut data = data.clone();\n        let graph = &graph;\n\n        b.iter(|| black_box(create_label(graph, None, &replica_id, &mut data, 1, 2)))\n    });\n\n    group.finish();\n}\n\ncriterion_group!(benches, kdf_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs-porep/benches/parents.rs",
    "content": "use criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};\nuse filecoin_hashers::{blake2s::Blake2sHasher, sha256::Sha256Hasher, Hasher};\n#[cfg(feature = \"cpu-profile\")]\nuse gperftools::profiler::PROFILER;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    drgraph::{Graph, BASE_DEGREE},\n};\nuse storage_proofs_porep::stacked::{StackedBucketGraph, EXP_DEGREE};\n\n#[cfg(feature = \"cpu-profile\")]\n#[inline(always)]\nfn start_profile(stage: &str) {\n    PROFILER\n        .lock()\n        .unwrap()\n        .start(format!(\"./{}.profile\", stage))\n        .unwrap();\n}\n\n#[cfg(not(feature = \"cpu-profile\"))]\n#[inline(always)]\nfn start_profile(_stage: &str) {}\n\n#[cfg(feature = \"cpu-profile\")]\n#[inline(always)]\nfn stop_profile() {\n    PROFILER.lock().unwrap().stop().unwrap();\n}\n\n#[cfg(not(feature = \"cpu-profile\"))]\n#[inline(always)]\nfn stop_profile() {}\n\nfn pregenerate_graph<H: Hasher>(size: usize, api_version: ApiVersion) -> StackedBucketGraph<H> {\n    StackedBucketGraph::<H>::new_stacked(size, BASE_DEGREE, EXP_DEGREE, [32; 32], api_version)\n        .unwrap()\n}\n\nfn parents_loop<H: Hasher, G: Graph<H>>(graph: &G, parents: &mut [u32]) {\n    (0..graph.size())\n        .map(|node| graph.parents(node, parents).unwrap())\n        .collect()\n}\n\n#[allow(clippy::unit_arg)]\nfn parents_loop_benchmark(cc: &mut Criterion) {\n    let sizes = vec![10, 50, 1000];\n\n    cc.bench(\n        \"parents in a loop\",\n        ParameterizedBenchmark::new(\n            \"Blake2s\",\n            |b, size| {\n                let graph = pregenerate_graph::<Blake2sHasher>(*size, ApiVersion::V1_1_0);\n                let mut parents = vec![0; graph.degree()];\n                start_profile(&format!(\"parents-blake2s-{}\", *size));\n                b.iter(|| black_box(parents_loop::<Blake2sHasher, _>(&graph, &mut parents)));\n                stop_profile();\n            },\n            sizes,\n        )\n        .with_function(\"Sha256\", |b, degree| {\n            let graph = pregenerate_graph::<Sha256Hasher>(*degree, ApiVersion::V1_1_0);\n            let mut parents = vec![0; graph.degree()];\n            b.iter(|| black_box(parents_loop::<Sha256Hasher, _>(&graph, &mut parents)))\n        }),\n    );\n}\n\ncriterion_group!(benches, parents_loop_benchmark);\ncriterion_main!(benches);\n"
  },
  {
    "path": "storage-proofs-porep/parent_cache.json",
    "content": "{\n  \"v28-sdr-parent-21981246c370f9d76c7a77ab273d94bde0ceb4e938292334960bce05585dc117\": {\n    \"sector_size\": 34359738368,\n    \"digest\": \"93deeac5e3052b6927467d4b2641bb782f05491de18d510147c93eeedd8672da\"\n  },\n  \"v28-sdr-parent-2aa9c77c3e58259481351cc4be2079cc71e1c9af39700866545c043bfa30fb42\": {\n    \"sector_size\": 536870912,\n    \"digest\": \"3adcc092423aa76d6a7184016893406da44dd974b219a89cd3ece25e4e3018f5\"\n  },\n  \"v28-sdr-parent-3f0eef38bb48af1f48ad65e14eb85b4ebfc167cec18cd81764f6d998836c9899\": {\n    \"sector_size\": 2048,\n    \"digest\": \"3da49221e2ed55371b86d0bf3d6526fcf128af61bed904f966428db1b531750d\"\n  },\n  \"v28-sdr-parent-4905486b7af19558ac3649bc6261411858b6add534438878c4ee3b29d8b9de0b\": {\n    \"sector_size\": 68719476736,\n    \"digest\": \"2698b74eb2606b55b98d8b095e18b6320b47f46e00075956d48640ccd1641b03\"\n  },\n  \"v28-sdr-parent-494d91dc80f2df5272c4b9e129bc7ade9405225993af9fe34e6542a39a47554b\": {\n    \"sector_size\": 2048,\n    \"digest\": \"840057702eea7652cf97e04306c30fe57174714d90de156a25eddd6075c25b97\"\n  },\n  \"v28-sdr-parent-5eed212119fd91aa6220a27f31a8966444a9381842bceb3a1ea61525bd47a5b5\": {\n    \"sector_size\": 8388608,\n    \"digest\": \"03cd13565ded97c240a5f52e54295ad127bd0461b57904cb3a4d79b097bbecab\"\n  },\n  \"v28-sdr-parent-7ba215a1d2345774ab90b8cb1158d296e409d6068819d7b8c7baf0b25d63dc34\": {\n    \"sector_size\": 536870912,\n    \"digest\": \"b5877d1963793efebc261fd8fde4dd6bc59e6b5c7abf52617a4ee023b8dc173a\"\n  },\n  \"v28-sdr-parent-8a99e8d6b6be7ab87a56b632e6739ff201c23ea14e99737c74690f0e265574d6\": {\n    \"sector_size\": 68719476736,\n    \"digest\": \"2778a732ad46a7dc18e0564dfdf59fd321dcde74ab476fd6d3c4e6735d7cd89c\"\n  },\n  \"v28-sdr-parent-dcdabb0fbe4364bf0ac28b6a18c66de246409fa1a9020a00f33fb3e3053da6dc\": {\n    \"sector_size\": 8388608,\n    \"digest\": \"a76604f2f59f2744c7151653bbb1d8596b6b57d295e6fa6c1f0c41d725b502ce\"\n  },\n  \"v28-sdr-parent-e1fa5d5b811ddbd118be3412c4a8c329156b8b8acc72632bca459455b5a05a13\": {\n    \"sector_size\": 34359738368,\n    \"digest\": \"3c4f9841fcc75aed8c695800e58d08480629f25af3a2aefd81904181d75cc0b6\"\n  }\n}"
  },
  {
    "path": "storage-proofs-porep/src/drg/circuit.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellperson::{\n    bls::{Bls12, Engine, Fr},\n    gadgets::{boolean::Boolean, multipack, num::AllocatedNum, sha256::sha256 as sha256_circuit},\n    Circuit, ConstraintSystem, SynthesisError,\n};\nuse ff::PrimeField;\nuse filecoin_hashers::Hasher;\nuse storage_proofs_core::{\n    compound_proof::CircuitComponent,\n    error::Result,\n    gadgets::{constraint, encode, por::PoRCircuit, uint64::UInt64, variables::Root},\n    merkle::BinaryMerkleTree,\n    util::reverse_bit_numbering,\n};\n\n/// DRG based Proof of Replication.\n///\n/// # Fields\n///\n/// * `params` - parameters for the curve\n///\n/// ----> Private `replica_node` - The replica node being proven.\n///\n/// * `replica_node` - The replica node being proven.\n/// * `replica_node_path` - The path of the replica node being proven.\n/// * `replica_root` - The merkle root of the replica.\n///\n/// * `replica_parents` - A list of all parents in the replica, with their value.\n/// * `replica_parents_paths` - A list of all parents paths in the replica.\n///\n/// ----> Private `data_node` - The data node being proven.\n///\n/// * `data_node_path` - The path of the data node being proven.\n/// * `data_root` - The merkle root of the data.\n/// * `replica_id` - The id of the replica.\n///\n\npub struct DrgPoRepCircuit<'a, H: Hasher> {\n    pub replica_nodes: Vec<Option<Fr>>,\n    #[allow(clippy::type_complexity)]\n    pub replica_nodes_paths: Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>,\n    pub replica_root: Root<Bls12>,\n    pub replica_parents: Vec<Vec<Option<Fr>>>,\n    #[allow(clippy::type_complexity)]\n    pub replica_parents_paths: Vec<Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>>,\n    pub data_nodes: Vec<Option<Fr>>,\n    #[allow(clippy::type_complexity)]\n    pub data_nodes_paths: Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>,\n    pub data_root: Root<Bls12>,\n    pub replica_id: Option<Fr>,\n    pub private: bool,\n    pub _h: PhantomData<&'a H>,\n}\n\nimpl<'a, H: 'static + Hasher> DrgPoRepCircuit<'a, H> {\n    #[allow(clippy::type_complexity, clippy::too_many_arguments)]\n    pub fn synthesize<CS>(\n        mut cs: CS,\n        replica_nodes: Vec<Option<Fr>>,\n        replica_nodes_paths: Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>,\n        replica_root: Root<Bls12>,\n        replica_parents: Vec<Vec<Option<Fr>>>,\n        replica_parents_paths: Vec<Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>>,\n        data_nodes: Vec<Option<Fr>>,\n        data_nodes_paths: Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>,\n        data_root: Root<Bls12>,\n        replica_id: Option<Fr>,\n        private: bool,\n    ) -> Result<(), SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>,\n    {\n        DrgPoRepCircuit::<H> {\n            replica_nodes,\n            replica_nodes_paths,\n            replica_root,\n            replica_parents,\n            replica_parents_paths,\n            data_nodes,\n            data_nodes_paths,\n            data_root,\n            replica_id,\n            private,\n            _h: Default::default(),\n        }\n        .synthesize(&mut cs)\n    }\n}\n\n#[derive(Default, Clone)]\npub struct ComponentPrivateInputs {\n    pub comm_r: Option<Root<Bls12>>,\n    pub comm_d: Option<Root<Bls12>>,\n}\n\nimpl<'a, H: Hasher> CircuitComponent for DrgPoRepCircuit<'a, H> {\n    type ComponentPrivateInputs = ComponentPrivateInputs;\n}\n\n///\n/// # Public Inputs\n///\n/// * [0] replica_id/0\n/// * [1] replica_id/1\n/// * [2] replica auth_path_bits\n/// * [3] replica commitment (root hash)\n/// * for i in 0..replica_parents.len()\n///   * [ ] replica parent auth_path_bits\n///   * [ ] replica parent commitment (root hash) // Same for all.\n/// * [r + 1] data auth_path_bits\n/// * [r + 2] data commitment (root hash)\n///\n///  Total = 6 + (2 * replica_parents.len())\n/// # Private Inputs\n///\n/// * [ ] replica value/0\n/// * for i in 0..replica_parents.len()\n///  * [ ] replica parent value/0\n/// * [ ] data value/\n///\n/// Total = 2 + replica_parents.len()\n///\nimpl<'a, H: 'static + Hasher> Circuit<Bls12> for DrgPoRepCircuit<'a, H> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let replica_id = self.replica_id;\n        let replica_root = self.replica_root;\n        let data_root = self.data_root;\n\n        let nodes = self.data_nodes.len();\n\n        assert_eq!(self.replica_nodes.len(), nodes);\n        assert_eq!(self.replica_nodes_paths.len(), nodes);\n        assert_eq!(self.replica_parents.len(), nodes);\n        assert_eq!(self.replica_parents_paths.len(), nodes);\n        assert_eq!(self.data_nodes_paths.len(), nodes);\n\n        let replica_node_num = AllocatedNum::alloc(cs.namespace(|| \"replica_id_num\"), || {\n            replica_id.ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        replica_node_num.inputize(cs.namespace(|| \"replica_id\"))?;\n\n        // get the replica_id in bits\n        let replica_id_bits =\n            reverse_bit_numbering(replica_node_num.to_bits_le(cs.namespace(|| \"replica_id_bits\"))?);\n\n        let replica_root_var = Root::Var(replica_root.allocated(cs.namespace(|| \"replica_root\"))?);\n        let data_root_var = Root::Var(data_root.allocated(cs.namespace(|| \"data_root\"))?);\n\n        for i in 0..self.data_nodes.len() {\n            let mut cs = cs.namespace(|| format!(\"challenge_{}\", i));\n            // ensure that all inputs are well formed\n            let replica_node_path = &self.replica_nodes_paths[i];\n            let replica_parents_paths = &self.replica_parents_paths[i];\n            let data_node_path = &self.data_nodes_paths[i];\n\n            let replica_node = &self.replica_nodes[i];\n            let replica_parents = &self.replica_parents[i];\n            let data_node = &self.data_nodes[i];\n\n            assert_eq!(replica_parents.len(), replica_parents_paths.len());\n            assert_eq!(data_node_path.len(), replica_node_path.len());\n            assert_eq!(replica_node.is_some(), data_node.is_some());\n\n            // Inclusion checks\n            {\n                let mut cs = cs.namespace(|| \"inclusion_checks\");\n                PoRCircuit::<BinaryMerkleTree<H>>::synthesize(\n                    cs.namespace(|| \"replica_inclusion\"),\n                    Root::Val(*replica_node),\n                    replica_node_path.clone().into(),\n                    replica_root_var.clone(),\n                    self.private,\n                )?;\n\n                // validate each replica_parents merkle proof\n                for j in 0..replica_parents.len() {\n                    PoRCircuit::<BinaryMerkleTree<H>>::synthesize(\n                        cs.namespace(|| format!(\"parents_inclusion_{}\", j)),\n                        Root::Val(replica_parents[j]),\n                        replica_parents_paths[j].clone().into(),\n                        replica_root_var.clone(),\n                        self.private,\n                    )?;\n                }\n\n                // validate data node commitment\n                PoRCircuit::<BinaryMerkleTree<H>>::synthesize(\n                    cs.namespace(|| \"data_inclusion\"),\n                    Root::Val(*data_node),\n                    data_node_path.clone().into(),\n                    data_root_var.clone(),\n                    self.private,\n                )?;\n            }\n\n            // Encoding checks\n            {\n                let mut cs = cs.namespace(|| \"encoding_checks\");\n                // get the parents into bits\n                let parents_bits: Vec<Vec<Boolean>> = replica_parents\n                    .iter()\n                    .enumerate()\n                    .map(|(i, val)| {\n                        let num = AllocatedNum::alloc(\n                            cs.namespace(|| format!(\"parents_{}_num\", i)),\n                            || val.map(Into::into).ok_or(SynthesisError::AssignmentMissing),\n                        )?;\n                        Ok(reverse_bit_numbering(num.to_bits_le(\n                            cs.namespace(|| format!(\"parents_{}_bits\", i)),\n                        )?))\n                    })\n                    .collect::<Result<Vec<Vec<Boolean>>, SynthesisError>>()?;\n\n                // generate the encryption key\n                let key = kdf(\n                    cs.namespace(|| \"kdf\"),\n                    &replica_id_bits,\n                    parents_bits,\n                    None,\n                    None,\n                )?;\n\n                let replica_node_num =\n                    AllocatedNum::alloc(cs.namespace(|| \"replica_node\"), || {\n                        (*replica_node).ok_or(SynthesisError::AssignmentMissing)\n                    })?;\n\n                let decoded = encode::decode(cs.namespace(|| \"decode\"), &key, &replica_node_num)?;\n\n                // TODO this should not be here, instead, this should be the leaf Fr in the data_auth_path\n                // TODO also note that we need to change/makesurethat the leaves are the data, instead of hashes of the data\n                let expected = AllocatedNum::alloc(cs.namespace(|| \"data node\"), || {\n                    data_node.ok_or(SynthesisError::AssignmentMissing)\n                })?;\n\n                // ensure the encrypted data and data_node match\n                constraint::equal(&mut cs, || \"equality\", &expected, &decoded);\n            }\n        }\n        // profit!\n        Ok(())\n    }\n}\n\n/// Key derivation function.\nfn kdf<E, CS>(\n    mut cs: CS,\n    id: &[Boolean],\n    parents: Vec<Vec<Boolean>>,\n    window_index: Option<UInt64>,\n    node: Option<UInt64>,\n) -> Result<AllocatedNum<E>, SynthesisError>\nwhere\n    E: Engine,\n    CS: ConstraintSystem<E>,\n{\n    // ciphertexts will become a buffer of the layout\n    // id | node | encodedParentNode1 | encodedParentNode1 | ...\n\n    let mut ciphertexts = id.to_vec();\n\n    if let Some(window_index) = window_index {\n        ciphertexts.extend_from_slice(&window_index.to_bits_be());\n    }\n\n    if let Some(node) = node {\n        ciphertexts.extend_from_slice(&node.to_bits_be());\n    }\n\n    for parent in parents.into_iter() {\n        ciphertexts.extend_from_slice(&parent);\n    }\n\n    let alloc_bits = sha256_circuit(cs.namespace(|| \"hash\"), &ciphertexts[..])?;\n    let fr = if alloc_bits[0].get_value().is_some() {\n        let be_bits = alloc_bits\n            .iter()\n            .map(|v| v.get_value().ok_or(SynthesisError::AssignmentMissing))\n            .collect::<Result<Vec<bool>, SynthesisError>>()?;\n\n        let le_bits = be_bits\n            .chunks(8)\n            .flat_map(|chunk| chunk.iter().rev())\n            .copied()\n            .take(E::Fr::CAPACITY as usize)\n            .collect::<Vec<bool>>();\n\n        Ok(multipack::compute_multipacking::<E>(&le_bits)[0])\n    } else {\n        Err(SynthesisError::AssignmentMissing)\n    };\n\n    AllocatedNum::<E>::alloc(cs.namespace(|| \"result_num\"), || fr)\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/drg/compound.rs",
    "content": "use std::marker::PhantomData;\n\nuse anyhow::{ensure, Context};\nuse bellperson::bls::{Bls12, Fr};\nuse bellperson::Circuit;\nuse filecoin_hashers::Hasher;\nuse generic_array::typenum;\nuse storage_proofs_core::{\n    compound_proof::{CircuitComponent, CompoundProof},\n    drgraph::Graph,\n    error::Result,\n    gadgets::por::PoRCompound,\n    gadgets::variables::Root,\n    merkle::{BinaryMerkleTree, MerkleProofTrait},\n    parameter_cache::{CacheableParameters, ParameterSetMetadata},\n    por,\n    proof::ProofScheme,\n};\nuse typenum::U2;\n\nuse crate::drg::{DrgPoRep, DrgPoRepCircuit};\n\n/// DRG based Proof of Replication.\n///\n/// # Fields\n///\n/// * `params` - parameters for the curve\n///\n/// ----> Private `replica_node` - The replica node being proven.\n///\n/// * `replica_node` - The replica node being proven.\n/// * `replica_node_path` - The path of the replica node being proven.\n/// * `replica_root` - The merkle root of the replica.\n///\n/// * `replica_parents` - A list of all parents in the replica, with their value.\n/// * `replica_parents_paths` - A list of all parents paths in the replica.\n///\n/// ----> Private `data_node` - The data node being proven.\n///\n/// * `data_node_path` - The path of the data node being proven.\n/// * `data_root` - The merkle root of the data.\n/// * `replica_id` - The id of the replica.\n///\n\npub struct DrgPoRepCompound<H, G>\nwhere\n    H: Hasher,\n    G::Key: AsRef<H::Domain>,\n    G: Graph<H>,\n{\n    // Sad phantom is sad\n    _h: PhantomData<H>,\n    _g: PhantomData<G>,\n}\n\nimpl<C: Circuit<Bls12>, H: Hasher, G: Graph<H>, P: ParameterSetMetadata> CacheableParameters<C, P>\n    for DrgPoRepCompound<H, G>\nwhere\n    G::Key: AsRef<H::Domain>,\n{\n    fn cache_prefix() -> String {\n        format!(\"drg-proof-of-replication-{}\", H::name())\n    }\n}\n\nimpl<'a, H, G> CompoundProof<'a, DrgPoRep<'a, H, G>, DrgPoRepCircuit<'a, H>>\n    for DrgPoRepCompound<H, G>\nwhere\n    H: 'static + Hasher,\n    G::Key: AsRef<<H as Hasher>::Domain>,\n    G: 'a + Graph<H> + ParameterSetMetadata + Sync + Send,\n{\n    fn generate_public_inputs(\n        pub_in: &<DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicInputs,\n        pub_params: &<DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicParams,\n        // We can ignore k because challenges are generated by caller and included\n        // in PublicInputs.\n        _k: Option<usize>,\n    ) -> Result<Vec<Fr>> {\n        let replica_id = pub_in.replica_id.context(\"missing replica id\")?;\n        let challenges = &pub_in.challenges;\n\n        ensure!(\n            pub_in.tau.is_none() == pub_params.private,\n            \"Public input parameter tau must be unset\"\n        );\n\n        let (comm_r, comm_d) = match pub_in.tau {\n            None => (None, None),\n            Some(tau) => (Some(tau.comm_r), Some(tau.comm_d)),\n        };\n\n        let leaves = pub_params.graph.size();\n\n        let por_pub_params = por::PublicParams {\n            leaves,\n            private: pub_params.private,\n        };\n\n        let mut input: Vec<Fr> = vec![replica_id.into()];\n\n        let mut parents = vec![0; pub_params.graph.degree()];\n        for challenge in challenges {\n            let mut por_nodes = vec![*challenge as u32];\n            pub_params.graph.parents(*challenge, &mut parents)?;\n            por_nodes.extend_from_slice(&parents);\n\n            for node in por_nodes {\n                let por_pub_inputs = por::PublicInputs {\n                    commitment: comm_r,\n                    challenge: node as usize,\n                };\n                let por_inputs = PoRCompound::<BinaryMerkleTree<H>>::generate_public_inputs(\n                    &por_pub_inputs,\n                    &por_pub_params,\n                    None,\n                )?;\n\n                input.extend(por_inputs);\n            }\n\n            let por_pub_inputs = por::PublicInputs {\n                commitment: comm_d,\n                challenge: *challenge,\n            };\n\n            let por_inputs = PoRCompound::<BinaryMerkleTree<H>>::generate_public_inputs(\n                &por_pub_inputs,\n                &por_pub_params,\n                None,\n            )?;\n            input.extend(por_inputs);\n        }\n        Ok(input)\n    }\n\n    fn circuit(\n        public_inputs: &<DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicInputs,\n        component_private_inputs: <DrgPoRepCircuit<'_, H> as CircuitComponent>::ComponentPrivateInputs,\n        proof: &<DrgPoRep<'a, H, G> as ProofScheme<'a>>::Proof,\n        public_params: &<DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Result<DrgPoRepCircuit<'a, H>> {\n        let challenges = public_params.challenges_count;\n        let len = proof.nodes.len();\n\n        ensure!(len <= challenges, \"too many challenges\");\n        ensure!(\n            proof.replica_parents.len() == len,\n            \"Number of replica parents must match\"\n        );\n        ensure!(\n            proof.replica_nodes.len() == len,\n            \"Number of replica nodes must match\"\n        );\n\n        let replica_nodes: Vec<_> = proof\n            .replica_nodes\n            .iter()\n            .map(|node| Some(node.data.into()))\n            .collect();\n\n        let replica_nodes_paths: Vec<_> = proof\n            .replica_nodes\n            .iter()\n            .map(|node| node.proof.as_options())\n            .collect();\n\n        let is_private = public_params.private;\n\n        let (data_root, replica_root) = if is_private {\n            (\n                component_private_inputs.comm_d.context(\"is_private\")?,\n                component_private_inputs.comm_r.context(\"is_private\")?,\n            )\n        } else {\n            (\n                Root::Val(Some(proof.data_root.into())),\n                Root::Val(Some(proof.replica_root.into())),\n            )\n        };\n\n        let replica_id = public_inputs.replica_id;\n\n        let replica_parents: Vec<_> = proof\n            .replica_parents\n            .iter()\n            .map(|parents| {\n                parents\n                    .iter()\n                    .map(|(_, parent)| Some(parent.data.into()))\n                    .collect()\n            })\n            .collect();\n\n        let replica_parents_paths: Vec<Vec<_>> = proof\n            .replica_parents\n            .iter()\n            .map(|parents| {\n                let p: Vec<_> = parents\n                    .iter()\n                    .map(|(_, parent)| parent.proof.as_options())\n                    .collect();\n                p\n            })\n            .collect();\n\n        let data_nodes: Vec<_> = proof\n            .nodes\n            .iter()\n            .map(|node| Some(node.data.into()))\n            .collect();\n\n        let data_nodes_paths: Vec<_> = proof\n            .nodes\n            .iter()\n            .map(|node| node.proof.as_options())\n            .collect();\n\n        ensure!(\n            public_inputs.tau.is_none() == public_params.private,\n            \"inconsistent private state\"\n        );\n\n        Ok(DrgPoRepCircuit {\n            replica_nodes,\n            replica_nodes_paths,\n            replica_root,\n            replica_parents,\n            replica_parents_paths,\n            data_nodes,\n            data_nodes_paths,\n            data_root,\n            replica_id: replica_id.map(Into::into),\n            private: public_params.private,\n            _h: Default::default(),\n        })\n    }\n\n    fn blank_circuit(\n        public_params: &<DrgPoRep<'a, H, G> as ProofScheme<'a>>::PublicParams,\n    ) -> DrgPoRepCircuit<'a, H> {\n        let depth = public_params.graph.merkle_tree_depth::<U2>() as usize;\n        let degree = public_params.graph.degree();\n        let arity = 2;\n\n        let challenges_count = public_params.challenges_count;\n\n        let replica_nodes = vec![None; challenges_count];\n        let replica_nodes_paths =\n            vec![vec![(vec![None; arity - 1], None); depth - 1]; challenges_count];\n\n        let replica_root = Root::Val(None);\n        let replica_parents = vec![vec![None; degree]; challenges_count];\n        let replica_parents_paths =\n            vec![vec![vec![(vec![None; arity - 1], None); depth - 1]; degree]; challenges_count];\n        let data_nodes = vec![None; challenges_count];\n        let data_nodes_paths =\n            vec![vec![(vec![None; arity - 1], None); depth - 1]; challenges_count];\n        let data_root = Root::Val(None);\n\n        DrgPoRepCircuit {\n            replica_nodes,\n            replica_nodes_paths,\n            replica_root,\n            replica_parents,\n            replica_parents_paths,\n            data_nodes,\n            data_nodes_paths,\n            data_root,\n            replica_id: None,\n            private: public_params.private,\n            _h: Default::default(),\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/drg/mod.rs",
    "content": "mod circuit;\nmod compound;\nmod vanilla;\n\npub use circuit::*;\npub use compound::*;\npub use vanilla::*;\n"
  },
  {
    "path": "storage-proofs-porep/src/drg/vanilla.rs",
    "content": "use std::marker::PhantomData;\nuse std::path::PathBuf;\n\nuse anyhow::{ensure, Context};\nuse filecoin_hashers::{Domain, HashFunction, Hasher, PoseidonArity};\nuse fr32::bytes_into_fr_repr_safe;\nuse generic_array::typenum::U2;\nuse merkletree::store::{ReplicaConfig, StoreConfig};\nuse rayon::prelude::{IntoParallelIterator, ParallelIterator};\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    cache_key::CacheKey,\n    crypto::sloth,\n    drgraph::Graph,\n    error::Result,\n    merkle::{\n        create_base_lcmerkle_tree, create_base_merkle_tree, BinaryLCMerkleTree, BinaryMerkleTree,\n        LCMerkleTree, MerkleProof, MerkleProofTrait, MerkleTreeTrait,\n    },\n    parameter_cache::ParameterSetMetadata,\n    proof::{NoRequirements, ProofScheme},\n    util::{data_at_node, data_at_node_offset, NODE_SIZE},\n    Data,\n};\n\nuse crate::{encode, PoRep};\n\n#[derive(Debug, Clone, Copy, Serialize, Deserialize)]\npub struct Tau<T> {\n    pub comm_r: T,\n    pub comm_d: T,\n}\n\nimpl<T: Domain> Tau<T> {\n    pub fn new(comm_d: T, comm_r: T) -> Self {\n        Tau { comm_d, comm_r }\n    }\n}\n\n#[derive(Debug)]\npub struct ProverAux<H: Hasher> {\n    pub tree_d: BinaryMerkleTree<H>,\n    pub tree_r: BinaryLCMerkleTree<H>,\n}\n\nimpl<H: Hasher> ProverAux<H> {\n    pub fn new(tree_d: BinaryMerkleTree<H>, tree_r: BinaryLCMerkleTree<H>) -> Self {\n        ProverAux { tree_d, tree_r }\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct PublicInputs<T: Domain> {\n    #[serde(bound = \"\")]\n    pub replica_id: Option<T>,\n    pub challenges: Vec<usize>,\n    #[serde(bound = \"\")]\n    pub tau: Option<Tau<T>>,\n}\n\n#[derive(Debug)]\npub struct PrivateInputs<'a, H: Hasher> {\n    pub tree_d: &'a BinaryMerkleTree<H>,\n    pub tree_r: &'a BinaryLCMerkleTree<H>,\n    pub tree_r_config_rows_to_discard: usize,\n}\n\n#[derive(Clone, Debug)]\npub struct SetupParams {\n    pub drg: DrgParams,\n    pub private: bool,\n    pub challenges_count: usize,\n    pub api_version: ApiVersion,\n}\n\n#[derive(Debug, Clone)]\npub struct DrgParams {\n    // Number of nodes\n    pub nodes: usize,\n\n    // Base degree of DRG\n    pub degree: usize,\n\n    pub expansion_degree: usize,\n\n    pub porep_id: [u8; 32],\n}\n\n#[derive(Debug, Clone)]\npub struct PublicParams<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata,\n{\n    pub graph: G,\n    pub private: bool,\n    pub challenges_count: usize,\n\n    _h: PhantomData<H>,\n}\n\nimpl<H, G> PublicParams<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata,\n{\n    pub fn new(graph: G, private: bool, challenges_count: usize) -> Self {\n        PublicParams {\n            graph,\n            private,\n            challenges_count,\n            _h: PhantomData,\n        }\n    }\n}\n\nimpl<H, G> ParameterSetMetadata for PublicParams<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata,\n{\n    fn identifier(&self) -> String {\n        format!(\n            \"drgporep::PublicParams{{graph: {}}}\",\n            self.graph.identifier(),\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        self.graph.sector_size()\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct DataProof<H: Hasher, U: PoseidonArity> {\n    #[serde(bound(\n        serialize = \"MerkleProof<H, U>: Serialize\",\n        deserialize = \"MerkleProof<H, U>: Deserialize<'de>\"\n    ))]\n    pub proof: MerkleProof<H, U>,\n    pub data: H::Domain,\n}\n\nimpl<H: Hasher, U: 'static + PoseidonArity> DataProof<H, U> {\n    pub fn new(n: usize) -> Self {\n        DataProof {\n            proof: MerkleProof::new(n),\n            data: Default::default(),\n        }\n    }\n\n    /// proves_challenge returns true if this self.proof corresponds to challenge.\n    /// This is useful for verifying that a supplied proof is actually relevant to a given challenge.\n    pub fn proves_challenge(&self, challenge: usize) -> bool {\n        self.proof.proves_challenge(challenge)\n    }\n}\n\npub type ReplicaParents<H> = Vec<(u32, DataProof<H, U2>)>;\n\n#[derive(Default, Debug, Clone, Serialize, Deserialize)]\npub struct Proof<H: Hasher> {\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    pub data_root: H::Domain,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    pub replica_root: H::Domain,\n    #[serde(bound(\n        serialize = \"DataProof<H, U2>: Serialize\",\n        deserialize = \"DataProof<H, U2>: Deserialize<'de>\"\n    ))]\n    pub replica_nodes: Vec<DataProof<H, U2>>,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    pub replica_parents: Vec<ReplicaParents<H>>,\n    #[serde(bound(\n        serialize = \"H::Domain: Serialize\",\n        deserialize = \"H::Domain: Deserialize<'de>\"\n    ))]\n    pub nodes: Vec<DataProof<H, U2>>,\n}\n\nimpl<H: Hasher> Proof<H> {\n    pub fn new_empty(height: usize, degree: usize, challenges: usize) -> Proof<H> {\n        Proof {\n            data_root: Default::default(),\n            replica_root: Default::default(),\n            replica_nodes: vec![DataProof::new(height); challenges],\n            replica_parents: vec![vec![(0, DataProof::new(height)); degree]; challenges],\n            nodes: vec![DataProof::new(height); challenges],\n        }\n    }\n\n    pub fn new(\n        replica_nodes: Vec<DataProof<H, U2>>,\n        replica_parents: Vec<ReplicaParents<H>>,\n        nodes: Vec<DataProof<H, U2>>,\n    ) -> Proof<H> {\n        Proof {\n            data_root: nodes[0].proof.root(),\n            replica_root: replica_nodes[0].proof.root(),\n            replica_nodes,\n            replica_parents,\n            nodes,\n        }\n    }\n}\n\nimpl<'a, H: Hasher> From<&'a Proof<H>> for Proof<H> {\n    fn from(p: &Proof<H>) -> Proof<H> {\n        Proof {\n            data_root: p.nodes[0].proof.root(),\n            replica_root: p.replica_nodes[0].proof.root(),\n            replica_nodes: p.replica_nodes.clone(),\n            replica_parents: p.replica_parents.clone(),\n            nodes: p.nodes.clone(),\n        }\n    }\n}\n\n#[derive(Default)]\npub struct DrgPoRep<'a, H, G>\nwhere\n    H: Hasher,\n    G: 'a + Graph<H>,\n{\n    _h: PhantomData<&'a H>,\n    _g: PhantomData<G>,\n}\n\nimpl<'a, H, G> ProofScheme<'a> for DrgPoRep<'a, H, G>\nwhere\n    H: 'static + Hasher,\n    G: 'a + Graph<H> + ParameterSetMetadata,\n{\n    type PublicParams = PublicParams<H, G>;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<<H as Hasher>::Domain>;\n    type PrivateInputs = PrivateInputs<'a, H>;\n    type Proof = Proof<H>;\n    type Requirements = NoRequirements;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        let graph = G::new(\n            sp.drg.nodes,\n            sp.drg.degree,\n            sp.drg.expansion_degree,\n            sp.drg.porep_id,\n            sp.api_version,\n        )?;\n\n        Ok(PublicParams::new(graph, sp.private, sp.challenges_count))\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        let len = pub_inputs.challenges.len();\n        ensure!(\n            len <= pub_params.challenges_count,\n            \"too many challenges {} > {}\",\n            len,\n            pub_params.challenges_count\n        );\n\n        let mut replica_nodes = Vec::with_capacity(len);\n        let mut replica_parents = Vec::with_capacity(len);\n        let mut data_nodes: Vec<DataProof<H, U2>> = Vec::with_capacity(len);\n\n        for i in 0..len {\n            let challenge = pub_inputs.challenges[i] % pub_params.graph.size();\n            ensure!(challenge != 0, \"cannot prove the first node\");\n\n            let tree_d = &priv_inputs.tree_d;\n            let tree_r = &priv_inputs.tree_r;\n            let tree_r_config_rows_to_discard = priv_inputs.tree_r_config_rows_to_discard;\n\n            let data = tree_r.read_at(challenge)?;\n            let tree_proof =\n                tree_r.gen_cached_proof(challenge, Some(tree_r_config_rows_to_discard))?;\n            replica_nodes.push(DataProof {\n                proof: tree_proof,\n                data,\n            });\n\n            let mut parents = vec![0; pub_params.graph.degree()];\n            pub_params.graph.parents(challenge, &mut parents)?;\n            let mut replica_parentsi = Vec::with_capacity(parents.len());\n\n            for p in &parents {\n                replica_parentsi.push((*p, {\n                    let proof = tree_r\n                        .gen_cached_proof(*p as usize, Some(tree_r_config_rows_to_discard))?;\n                    DataProof {\n                        proof,\n                        data: tree_r.read_at(*p as usize)?,\n                    }\n                }));\n            }\n\n            replica_parents.push(replica_parentsi);\n\n            let node_proof = tree_d.gen_proof(challenge)?;\n\n            {\n                // TODO: use this again, I can't make lifetimes work though atm and I do not know why\n                // let extracted = Self::extract(\n                //     pub_params,\n                //     &pub_inputs.replica_id.into_bytes(),\n                //     &replica,\n                //     challenge,\n                // )?;\n\n                let extracted = decode_domain_block::<H>(\n                    &pub_inputs.replica_id.context(\"missing replica_id\")?,\n                    tree_r,\n                    challenge,\n                    tree_r.read_at(challenge)?,\n                    &parents,\n                )?;\n                data_nodes.push(DataProof {\n                    data: extracted,\n                    proof: node_proof,\n                });\n            }\n        }\n\n        let proof = Proof::new(replica_nodes, replica_parents, data_nodes);\n\n        Ok(proof)\n    }\n\n    fn verify(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        proof: &Self::Proof,\n    ) -> Result<bool> {\n        let mut hasher = Sha256::new();\n\n        for i in 0..pub_inputs.challenges.len() {\n            {\n                // This was verify_proof_meta.\n                if pub_inputs.challenges[i] >= pub_params.graph.size() {\n                    return Ok(false);\n                }\n\n                if !(proof.nodes[i].proves_challenge(pub_inputs.challenges[i])) {\n                    return Ok(false);\n                }\n\n                if !(proof.replica_nodes[i].proves_challenge(pub_inputs.challenges[i])) {\n                    return Ok(false);\n                }\n\n                let mut expected_parents = vec![0; pub_params.graph.degree()];\n                pub_params\n                    .graph\n                    .parents(pub_inputs.challenges[i], &mut expected_parents)?;\n                if proof.replica_parents[i].len() != expected_parents.len() {\n                    println!(\n                        \"proof parents were not the same length as in public parameters: {} != {}\",\n                        proof.replica_parents[i].len(),\n                        expected_parents.len()\n                    );\n                    return Ok(false);\n                }\n\n                let parents_as_expected = proof.replica_parents[i]\n                    .iter()\n                    .zip(&expected_parents)\n                    .all(|(actual, expected)| actual.0 == *expected);\n\n                if !parents_as_expected {\n                    println!(\"proof parents were not those provided in public parameters\");\n                    return Ok(false);\n                }\n            }\n\n            let challenge = pub_inputs.challenges[i] % pub_params.graph.size();\n            ensure!(challenge != 0, \"cannot prove the first node\");\n\n            if !proof.replica_nodes[i].proof.validate(challenge) {\n                return Ok(false);\n            }\n\n            for (parent_node, p) in &proof.replica_parents[i] {\n                if !p.proof.validate(*parent_node as usize) {\n                    return Ok(false);\n                }\n            }\n\n            let key = {\n                let prover_bytes = pub_inputs.replica_id.context(\"missing replica_id\")?;\n                hasher.update(AsRef::<[u8]>::as_ref(&prover_bytes));\n\n                for p in proof.replica_parents[i].iter() {\n                    hasher.update(AsRef::<[u8]>::as_ref(&p.1.data));\n                }\n\n                let hash = hasher.finalize_reset();\n                bytes_into_fr_repr_safe(hash.as_ref()).into()\n            };\n\n            let unsealed = encode::decode(key, proof.replica_nodes[i].data);\n\n            if unsealed != proof.nodes[i].data {\n                return Ok(false);\n            }\n\n            if !proof.nodes[i].proof.validate_data(unsealed) {\n                println!(\"invalid data for merkle path {:?}\", unsealed);\n                return Ok(false);\n            }\n        }\n\n        Ok(true)\n    }\n}\n\nimpl<'a, H, G> PoRep<'a, H, H> for DrgPoRep<'a, H, G>\nwhere\n    H: 'static + Hasher,\n    G::Key: AsRef<<H as Hasher>::Domain>,\n    G: 'a + Graph<H> + ParameterSetMetadata + Sync + Send,\n{\n    type Tau = Tau<<H as Hasher>::Domain>;\n    type ProverAux = ProverAux<H>;\n\n    fn replicate(\n        pp: &Self::PublicParams,\n        replica_id: &<H as Hasher>::Domain,\n        mut data: Data<'a>,\n        data_tree: Option<BinaryMerkleTree<H>>,\n        config: StoreConfig,\n        replica_path: PathBuf,\n    ) -> Result<(Self::Tau, Self::ProverAux)> {\n        let tree_d = match data_tree {\n            Some(tree) => tree,\n            None => create_base_merkle_tree::<BinaryMerkleTree<H>>(\n                Some(config.clone()),\n                pp.graph.size(),\n                data.as_ref(),\n            )?,\n        };\n\n        let graph = &pp.graph;\n        // encode(&pp.graph, replica_id, data, None)?;\n        // Because a node always follows all of its parents in the data,\n        // the nodes are by definition already topologically sorted.\n        // Therefore, if we simply traverse the data in order, encoding each node in place,\n        // we can always get each parent's encodings with a simple lookup --\n        // since we will already have encoded the parent earlier in the traversal.\n\n        let mut parents = vec![0; graph.degree()];\n        for node in 0..graph.size() {\n            graph.parents(node, &mut parents)?;\n            let key = graph.create_key(replica_id, node, &parents, data.as_ref(), None)?;\n            let start = data_at_node_offset(node);\n            let end = start + NODE_SIZE;\n\n            let node_data = <H as Hasher>::Domain::try_from_bytes(&data.as_ref()[start..end])?;\n            let encoded: H::Domain = sloth_encode::<H>(key.as_ref(), &node_data)?;\n\n            encoded.write_bytes(&mut data.as_mut()[start..end])?;\n        }\n\n        let replica_config = ReplicaConfig {\n            path: replica_path,\n            offsets: vec![0],\n        };\n        let tree_r_last_config =\n            StoreConfig::from_config(&config, CacheKey::CommRLastTree.to_string(), None);\n        let tree_r =\n            create_base_lcmerkle_tree::<H, <BinaryLCMerkleTree<H> as MerkleTreeTrait>::Arity>(\n                tree_r_last_config,\n                pp.graph.size(),\n                &data.as_ref(),\n                &replica_config,\n            )?;\n\n        let comm_d = tree_d.root();\n        let comm_r = tree_r.root();\n\n        Ok((Tau::new(comm_d, comm_r), ProverAux::new(tree_d, tree_r)))\n    }\n\n    fn extract_all<'b>(\n        pp: &'b Self::PublicParams,\n        replica_id: &'b <H as Hasher>::Domain,\n        data: &'b mut [u8],\n        _config: Option<StoreConfig>,\n    ) -> Result<()> {\n        decode(&pp.graph, replica_id, data, None)\n    }\n\n    fn extract(\n        pp: &Self::PublicParams,\n        replica_id: &<H as Hasher>::Domain,\n        data: &mut [u8],\n        node: usize,\n        _config: Option<StoreConfig>,\n    ) -> Result<()> {\n        let block = decode_block(&pp.graph, replica_id, &data, None, node)?;\n        let start = node * NODE_SIZE;\n        let end = start + NODE_SIZE;\n        let dest = &mut data[start..end];\n        dest.copy_from_slice(AsRef::<[u8]>::as_ref(&block));\n\n        Ok(())\n    }\n}\n\npub fn decode<'a, H, G>(\n    graph: &'a G,\n    replica_id: &'a <H as Hasher>::Domain,\n    data: &'a mut [u8],\n    exp_parents_data: Option<&'a [u8]>,\n) -> Result<()>\nwhere\n    H: Hasher,\n    G::Key: AsRef<H::Domain>,\n    G: Graph<H> + Sync,\n{\n    // TODO: proper error handling\n    let result: Vec<u8> = (0..graph.size())\n        .into_par_iter()\n        .flat_map(|i| {\n            decode_block::<H, G>(graph, replica_id, data, exp_parents_data, i)\n                .expect(\"decode block failure\")\n                .into_bytes()\n        })\n        .collect();\n\n    data.copy_from_slice(&result);\n    Ok(())\n}\n\npub fn decode_block<'a, H, G>(\n    graph: &'a G,\n    replica_id: &'a <H as Hasher>::Domain,\n    data: &'a [u8],\n    exp_parents_data: Option<&'a [u8]>,\n    v: usize,\n) -> Result<<H as Hasher>::Domain>\nwhere\n    H: Hasher,\n    G::Key: AsRef<H::Domain>,\n    G: Graph<H>,\n{\n    let mut parents = vec![0; graph.degree()];\n    graph.parents(v, &mut parents)?;\n    let key = graph.create_key(replica_id, v, &parents, &data, exp_parents_data)?;\n    let node_data = <H as Hasher>::Domain::try_from_bytes(&data_at_node(data, v)?)?;\n\n    Ok(encode::decode(*key.as_ref(), node_data))\n}\n\npub fn decode_domain_block<H: Hasher>(\n    replica_id: &H::Domain,\n    tree: &BinaryLCMerkleTree<H>,\n    node: usize,\n    node_data: H::Domain,\n    parents: &[u32],\n) -> Result<H::Domain>\nwhere\n    H: Hasher,\n{\n    let key = create_key_from_tree::<H, _>(replica_id, node, parents, tree)?;\n\n    Ok(encode::decode(key, node_data))\n}\n\n/// Creates the encoding key from a `MerkleTree`.\n/// The algorithm for that is `Blake2s(id | encodedParentNode1 | encodedParentNode1 | ...)`.\n/// It is only public so that it can be used for benchmarking\npub fn create_key_from_tree<H: Hasher, U: 'static + PoseidonArity>(\n    id: &H::Domain,\n    node: usize,\n    parents: &[u32],\n    tree: &LCMerkleTree<H, U>,\n) -> Result<H::Domain> {\n    let mut hasher = Sha256::new();\n    hasher.update(AsRef::<[u8]>::as_ref(&id));\n\n    // The hash is about the parents, hence skip if a node doesn't have any parents\n    if node != parents[0] as usize {\n        let mut scratch: [u8; NODE_SIZE] = [0; NODE_SIZE];\n        for parent in parents.iter() {\n            tree.read_into(*parent as usize, &mut scratch)?;\n            hasher.update(&scratch);\n        }\n    }\n\n    let hash = hasher.finalize();\n    Ok(bytes_into_fr_repr_safe(hash.as_ref()).into())\n}\n\npub fn replica_id<H: Hasher>(prover_id: [u8; 32], sector_id: [u8; 32]) -> H::Domain {\n    let mut to_hash = [0; 64];\n    to_hash[..32].copy_from_slice(&prover_id);\n    to_hash[32..].copy_from_slice(&sector_id);\n\n    H::Function::hash_leaf(&to_hash)\n}\n\nfn sloth_encode<H: Hasher>(key: &H::Domain, ciphertext: &H::Domain) -> Result<H::Domain> {\n    // TODO: validate this is how sloth should work in this case\n    let k = (*key).into();\n    let c = (*ciphertext).into();\n\n    Ok(sloth::encode(&k, &c).into())\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/encode.rs",
    "content": "use bellperson::bls::Fr;\nuse ff::Field;\nuse filecoin_hashers::Domain;\n\npub fn encode<T: Domain>(key: T, value: T) -> T {\n    let mut result: Fr = value.into();\n    let key: Fr = key.into();\n\n    result.add_assign(&key);\n    result.into()\n}\n\npub fn decode<T: Domain>(key: T, value: T) -> T {\n    let mut result: Fr = value.into();\n    let key: Fr = key.into();\n\n    result.sub_assign(&key);\n    result.into()\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/lib.rs",
    "content": "#![deny(clippy::all, clippy::perf, clippy::correctness, rust_2018_idioms)]\n#![warn(clippy::unwrap_used)]\n#![cfg_attr(target_arch = \"aarch64\", feature(stdsimd))]\n#![warn(clippy::unnecessary_wraps)]\n\nuse std::path::PathBuf;\n\nuse filecoin_hashers::Hasher;\nuse merkletree::store::StoreConfig;\nuse storage_proofs_core::{error::Result, merkle::BinaryMerkleTree, proof::ProofScheme, Data};\n\npub mod drg;\npub mod stacked;\n\nmod encode;\n\npub const MAX_LEGACY_POREP_REGISTERED_PROOF_ID: u64 = 4;\n\npub trait PoRep<'a, H: Hasher, G: Hasher>: ProofScheme<'a> {\n    type Tau;\n    type ProverAux;\n\n    fn replicate(\n        pub_params: &'a Self::PublicParams,\n        replica_id: &H::Domain,\n        data: Data<'a>,\n        data_tree: Option<BinaryMerkleTree<G>>,\n        config: StoreConfig,\n        replica_path: PathBuf,\n    ) -> Result<(Self::Tau, Self::ProverAux)>;\n\n    fn extract_all(\n        pub_params: &'a Self::PublicParams,\n        replica_id: &H::Domain,\n        data: &mut [u8],\n        config: Option<StoreConfig>,\n    ) -> Result<()>;\n\n    fn extract(\n        pub_params: &'a Self::PublicParams,\n        replica_id: &H::Domain,\n        data: &mut [u8],\n        node: usize,\n        config: Option<StoreConfig>,\n    ) -> Result<()>;\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/circuit/column.rs",
    "content": "use bellperson::{\n    bls::{Bls12, Fr},\n    gadgets::num::AllocatedNum,\n    ConstraintSystem, SynthesisError,\n};\nuse filecoin_hashers::Hasher;\nuse storage_proofs_core::merkle::MerkleTreeTrait;\n\nuse crate::stacked::{circuit::hash::hash_single_column, Column as VanillaColumn, PublicParams};\n\n#[derive(Debug, Clone)]\npub struct Column {\n    rows: Vec<Option<Fr>>,\n}\n\n#[derive(Clone)]\npub struct AllocatedColumn {\n    rows: Vec<AllocatedNum<Bls12>>,\n}\n\nimpl<H: Hasher> From<VanillaColumn<H>> for Column {\n    fn from(other: VanillaColumn<H>) -> Self {\n        let VanillaColumn { rows, .. } = other;\n\n        Column {\n            rows: rows.into_iter().map(|r| Some(r.into())).collect(),\n        }\n    }\n}\n\nimpl Column {\n    /// Create an empty `Column`, used in `blank_circuit`s.\n    pub fn empty<Tree: MerkleTreeTrait>(params: &PublicParams<Tree>) -> Self {\n        Column {\n            rows: vec![None; params.layer_challenges.layers()],\n        }\n    }\n\n    /// Consume this column, and allocate its values in the circuit.\n    pub fn alloc<CS: ConstraintSystem<Bls12>>(\n        self,\n        mut cs: CS,\n    ) -> Result<AllocatedColumn, SynthesisError> {\n        let Self { rows } = self;\n\n        let rows = rows\n            .into_iter()\n            .enumerate()\n            .map(|(i, val)| {\n                AllocatedNum::alloc(cs.namespace(|| format!(\"column_num_row_{}\", i)), || {\n                    val.ok_or(SynthesisError::AssignmentMissing)\n                })\n            })\n            .collect::<Result<Vec<_>, _>>()?;\n\n        Ok(AllocatedColumn { rows })\n    }\n}\n\nimpl AllocatedColumn {\n    pub fn len(&self) -> usize {\n        self.rows.len()\n    }\n\n    /// Creates the column hash of this column.\n    pub fn hash<CS: ConstraintSystem<Bls12>>(\n        &self,\n        cs: CS,\n    ) -> Result<AllocatedNum<Bls12>, SynthesisError> {\n        hash_single_column(cs, &self.rows)\n    }\n\n    pub fn get_value(&self, layer: usize) -> &AllocatedNum<Bls12> {\n        assert!(layer > 0, \"layers are 1 indexed\");\n        assert!(\n            layer <= self.rows.len(),\n            \"layer {} out of range: 1..={}\",\n            layer,\n            self.rows.len()\n        );\n        &self.rows[layer - 1]\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/circuit/column_proof.rs",
    "content": "use bellperson::{bls::Bls12, ConstraintSystem, SynthesisError};\nuse filecoin_hashers::{Hasher, PoseidonArity};\nuse storage_proofs_core::{\n    drgraph::Graph,\n    gadgets::por::AuthPath,\n    merkle::{MerkleProofTrait, MerkleTreeTrait, Store},\n};\n\nuse crate::stacked::{\n    circuit::column::{AllocatedColumn, Column},\n    vanilla::{ColumnProof as VanillaColumnProof, PublicParams},\n};\n\n#[derive(Debug, Clone)]\npub struct ColumnProof<\n    H: Hasher,\n    U: 'static + PoseidonArity,\n    V: 'static + PoseidonArity,\n    W: 'static + PoseidonArity,\n> {\n    column: Column,\n    inclusion_path: AuthPath<H, U, V, W>,\n}\n\nimpl<\n        H: 'static + Hasher,\n        U: 'static + PoseidonArity,\n        V: 'static + PoseidonArity,\n        W: 'static + PoseidonArity,\n    > ColumnProof<H, U, V, W>\n{\n    /// Create an empty `ColumnProof`, used in `blank_circuit`s.\n    pub fn empty<\n        S: Store<H::Domain>,\n        Tree: MerkleTreeTrait<Hasher = H, Store = S, Arity = U, SubTreeArity = V, TopTreeArity = W>,\n    >(\n        params: &PublicParams<Tree>,\n    ) -> Self {\n        ColumnProof {\n            column: Column::empty(params),\n            inclusion_path: AuthPath::blank(params.graph.size()),\n        }\n    }\n\n    /// Allocate the private inputs for this column proof, and return the inclusion path for verification.\n    pub fn alloc<CS: ConstraintSystem<Bls12>>(\n        self,\n        mut cs: CS,\n    ) -> Result<(AllocatedColumn, AuthPath<H, U, V, W>), SynthesisError> {\n        let ColumnProof {\n            inclusion_path,\n            column,\n        } = self;\n\n        let column = column.alloc(cs.namespace(|| \"column\"))?;\n\n        Ok((column, inclusion_path))\n    }\n}\n\nimpl<Proof: MerkleProofTrait> From<VanillaColumnProof<Proof>>\n    for ColumnProof<Proof::Hasher, Proof::Arity, Proof::SubTreeArity, Proof::TopTreeArity>\n{\n    fn from(vanilla_proof: VanillaColumnProof<Proof>) -> Self {\n        let VanillaColumnProof {\n            column,\n            inclusion_proof,\n        } = vanilla_proof;\n\n        ColumnProof {\n            column: column.into(),\n            inclusion_path: inclusion_proof.as_options().into(),\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/circuit/create_label.rs",
    "content": "use bellperson::{\n    bls::Engine,\n    gadgets::{\n        boolean::Boolean, multipack, num::AllocatedNum, sha256::sha256 as sha256_circuit,\n        uint32::UInt32,\n    },\n    ConstraintSystem, SynthesisError,\n};\nuse ff::PrimeField;\nuse storage_proofs_core::{gadgets::uint64::UInt64, util::reverse_bit_numbering};\n\nuse crate::stacked::vanilla::TOTAL_PARENTS;\n\n/// Compute a single label.\npub fn create_label_circuit<E, CS>(\n    mut cs: CS,\n    replica_id: &[Boolean],\n    parents: Vec<Vec<Boolean>>,\n    layer_index: UInt32,\n    node: UInt64,\n) -> Result<AllocatedNum<E>, SynthesisError>\nwhere\n    E: Engine,\n    CS: ConstraintSystem<E>,\n{\n    assert!(replica_id.len() >= 32, \"replica id is too small\");\n    assert!(replica_id.len() <= 256, \"replica id is too large\");\n    assert_eq!(parents.len(), TOTAL_PARENTS, \"invalid sized parents\");\n\n    // ciphertexts will become a buffer of the layout\n    // id | node | parent_node_0 | parent_node_1 | ...\n\n    let mut ciphertexts = replica_id.to_vec();\n\n    // pad to 32 bytes\n    while ciphertexts.len() < 256 {\n        ciphertexts.push(Boolean::constant(false));\n    }\n\n    ciphertexts.extend_from_slice(&layer_index.into_bits_be());\n    ciphertexts.extend_from_slice(&node.to_bits_be());\n    // pad to 64 bytes\n    while ciphertexts.len() < 512 {\n        ciphertexts.push(Boolean::constant(false));\n    }\n\n    for parent in parents.iter() {\n        ciphertexts.extend_from_slice(parent);\n\n        // pad such that each parents take 32 bytes\n        while ciphertexts.len() % 256 != 0 {\n            ciphertexts.push(Boolean::constant(false));\n        }\n    }\n\n    // 32b replica id\n    // 32b layer_index + node\n    // 37 * 32b  = 1184b parents\n    assert_eq!(ciphertexts.len(), (1 + 1 + TOTAL_PARENTS) * 32 * 8);\n\n    // Compute Sha256\n    let alloc_bits = sha256_circuit(cs.namespace(|| \"hash\"), &ciphertexts[..])?;\n\n    // Convert the hash result into a single Fr.\n    let bits = reverse_bit_numbering(alloc_bits);\n    multipack::pack_bits(\n        cs.namespace(|| \"result_num\"),\n        &bits[0..(E::Fr::CAPACITY as usize)],\n    )\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use bellperson::{\n        bls::{Bls12, Fr},\n        util_cs::test_cs::TestConstraintSystem,\n    };\n    use ff::Field;\n    use filecoin_hashers::sha256::Sha256Hasher;\n    use fr32::{bytes_into_fr, fr_into_bytes};\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::{\n        api_version::ApiVersion,\n        drgraph::{Graph, BASE_DEGREE},\n        util::{bytes_into_boolean_vec_be, data_at_node, NODE_SIZE},\n        TEST_SEED,\n    };\n\n    use crate::stacked::vanilla::{create_label, StackedBucketGraph, EXP_DEGREE};\n\n    #[test]\n    fn test_create_label() {\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n        let size = 64;\n        let porep_id = [32; 32];\n\n        let graph = StackedBucketGraph::<Sha256Hasher>::new_stacked(\n            size,\n            BASE_DEGREE,\n            EXP_DEGREE,\n            porep_id,\n            ApiVersion::V1_1_0,\n        )\n        .unwrap();\n\n        let id_fr = Fr::random(rng);\n        let id: Vec<u8> = fr_into_bytes(&id_fr);\n        let layer = 3;\n        let node = 22;\n\n        let mut data: Vec<u8> = (0..2 * size)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n\n        let mut parents = vec![0; BASE_DEGREE + EXP_DEGREE];\n        graph.parents(node, &mut parents).unwrap();\n\n        let raw_parents_bytes: Vec<Vec<u8>> = parents\n            .iter()\n            .enumerate()\n            .map(|(i, p)| {\n                if i < BASE_DEGREE {\n                    // base\n                    data_at_node(&data[..size * NODE_SIZE], *p as usize)\n                        .unwrap()\n                        .to_vec()\n                } else {\n                    // exp\n                    data_at_node(&data[size * NODE_SIZE..], *p as usize)\n                        .unwrap()\n                        .to_vec()\n                }\n            })\n            .collect();\n\n        let mut parents_bytes = raw_parents_bytes.clone(); // 14\n        parents_bytes.extend_from_slice(&raw_parents_bytes); // 28\n        parents_bytes.extend_from_slice(&raw_parents_bytes[..9]); // 37\n\n        assert_eq!(parents_bytes.len(), TOTAL_PARENTS);\n        let parents_bits: Vec<Vec<Boolean>> = parents_bytes\n            .iter()\n            .enumerate()\n            .map(|(i, p)| {\n                let mut cs = cs.namespace(|| format!(\"parents {}\", i));\n                bytes_into_boolean_vec_be(&mut cs, Some(p), p.len()).unwrap()\n            })\n            .collect();\n\n        let id_bits: Vec<Boolean> = {\n            let mut cs = cs.namespace(|| \"id\");\n            bytes_into_boolean_vec_be(&mut cs, Some(id.as_slice()), id.len()).unwrap()\n        };\n\n        let layer_alloc = UInt32::constant(layer as u32);\n        let node_alloc = UInt64::constant(node as u64);\n\n        let out = create_label_circuit(\n            cs.namespace(|| \"create_label\"),\n            &id_bits,\n            parents_bits,\n            layer_alloc,\n            node_alloc,\n        )\n        .expect(\"key derivation function failed\");\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n        assert_eq!(cs.num_constraints(), 532_025);\n\n        let (l1, l2) = data.split_at_mut(size * NODE_SIZE);\n        create_label::single::create_label_exp(\n            &graph,\n            None,\n            fr_into_bytes(&id_fr),\n            &*l2,\n            l1,\n            layer,\n            node,\n        )\n        .unwrap();\n\n        let expected_raw = data_at_node(&l1, node).unwrap();\n        let expected = bytes_into_fr(expected_raw).unwrap();\n\n        assert_eq!(\n            expected,\n            out.get_value().unwrap(),\n            \"circuit and non circuit do not match\"\n        );\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/circuit/hash.rs",
    "content": "use bellperson::{bls::Bls12, gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError};\nuse filecoin_hashers::{POSEIDON_CONSTANTS_11, POSEIDON_CONSTANTS_2};\nuse generic_array::typenum::{U11, U2};\nuse neptune::circuit::poseidon_hash;\n\n/// Hash a list of bits.\npub fn hash_single_column<CS>(\n    cs: CS,\n    column: &[AllocatedNum<Bls12>],\n) -> Result<AllocatedNum<Bls12>, SynthesisError>\nwhere\n    CS: ConstraintSystem<Bls12>,\n{\n    match column.len() {\n        2 => poseidon_hash::<CS, Bls12, U2>(cs, column.to_vec(), &*POSEIDON_CONSTANTS_2),\n        11 => poseidon_hash::<CS, Bls12, U11>(cs, column.to_vec(), &*POSEIDON_CONSTANTS_11),\n        _ => panic!(\"unsupported column size: {}\", column.len()),\n    }\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use bellperson::{bls::Fr, util_cs::test_cs::TestConstraintSystem};\n    use ff::Field;\n    use filecoin_hashers::{poseidon::PoseidonHasher, HashFunction, Hasher};\n    use rand::SeedableRng;\n    use rand_xorshift::XorShiftRng;\n    use storage_proofs_core::TEST_SEED;\n\n    use crate::stacked::vanilla::hash::hash_single_column as vanilla_hash_single_column;\n\n    #[test]\n    fn test_hash2_circuit() {\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n        for _ in 0..10 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let a = Fr::random(rng);\n            let b = Fr::random(rng);\n\n            let a_num = {\n                let mut cs = cs.namespace(|| \"a\");\n                AllocatedNum::alloc(&mut cs, || Ok(a)).unwrap()\n            };\n\n            let b_num = {\n                let mut cs = cs.namespace(|| \"b\");\n                AllocatedNum::alloc(&mut cs, || Ok(b)).unwrap()\n            };\n\n            let out = <PoseidonHasher as Hasher>::Function::hash2_circuit(\n                cs.namespace(|| \"hash2\"),\n                &a_num,\n                &b_num,\n            )\n            .expect(\"hash2 function failed\");\n\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n            assert_eq!(cs.num_constraints(), 311);\n\n            let expected: Fr =\n                <PoseidonHasher as Hasher>::Function::hash2(&a.into(), &b.into()).into();\n\n            assert_eq!(\n                expected,\n                out.get_value().unwrap(),\n                \"circuit and non circuit do not match\"\n            );\n        }\n    }\n\n    #[test]\n    fn test_hash_single_column_circuit() {\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n        for _ in 0..1 {\n            let mut cs = TestConstraintSystem::<Bls12>::new();\n\n            let vals = vec![Fr::random(rng); 11];\n            let vals_opt = vals\n                .iter()\n                .enumerate()\n                .map(|(i, v)| {\n                    AllocatedNum::alloc(cs.namespace(|| format!(\"num_{}\", i)), || Ok(*v)).unwrap()\n                })\n                .collect::<Vec<_>>();\n\n            let out = hash_single_column(cs.namespace(|| \"hash_single_column\"), &vals_opt)\n                .expect(\"hash_single_column function failed\");\n\n            assert!(cs.is_satisfied(), \"constraints not satisfied\");\n            assert_eq!(cs.num_constraints(), 598);\n\n            let expected: Fr = vanilla_hash_single_column(&vals);\n\n            assert_eq!(\n                expected,\n                out.get_value().unwrap(),\n                \"circuit and non circuit do not match\"\n            );\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/circuit/mod.rs",
    "content": "mod column;\nmod column_proof;\nmod create_label;\nmod hash;\nmod params;\nmod proof;\n\npub use create_label::*;\npub use proof::{StackedCircuit, StackedCompound};\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/circuit/params.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellperson::{\n    bls::{Bls12, Fr},\n    gadgets::{boolean::Boolean, num::AllocatedNum, uint32::UInt32},\n    ConstraintSystem, SynthesisError,\n};\nuse filecoin_hashers::{Hasher, PoseidonArity};\nuse generic_array::typenum::{U0, U2};\nuse storage_proofs_core::{\n    drgraph::Graph,\n    gadgets::por::{AuthPath, PoRCircuit},\n    gadgets::{encode::encode, uint64::UInt64, variables::Root},\n    merkle::{DiskStore, MerkleProofTrait, MerkleTreeTrait, MerkleTreeWrapper},\n    util::reverse_bit_numbering,\n};\n\nuse crate::stacked::{\n    circuit::{column_proof::ColumnProof, create_label_circuit, hash::hash_single_column},\n    vanilla::{\n        Proof as VanillaProof, PublicParams, ReplicaColumnProof as VanillaReplicaColumnProof,\n    },\n};\n\ntype TreeAuthPath<T> = AuthPath<\n    <T as MerkleTreeTrait>::Hasher,\n    <T as MerkleTreeTrait>::Arity,\n    <T as MerkleTreeTrait>::SubTreeArity,\n    <T as MerkleTreeTrait>::TopTreeArity,\n>;\n\ntype TreeColumnProof<T> = ColumnProof<\n    <T as MerkleTreeTrait>::Hasher,\n    <T as MerkleTreeTrait>::Arity,\n    <T as MerkleTreeTrait>::SubTreeArity,\n    <T as MerkleTreeTrait>::TopTreeArity,\n>;\n\n/// Proof for a single challenge.\n#[derive(Debug)]\npub struct Proof<Tree: MerkleTreeTrait, G: Hasher> {\n    /// Inclusion path for the challenged data node in tree D.\n    pub comm_d_path: AuthPath<G, U2, U0, U0>,\n    /// The value of the challenged data node.\n    pub data_leaf: Option<Fr>,\n    /// The index of the challenged node.\n    pub challenge: Option<u64>,\n    /// Inclusion path of the challenged replica node in tree R.\n    pub comm_r_last_path: TreeAuthPath<Tree>,\n    /// Inclusion path of the column hash of the challenged node  in tree C.\n    pub comm_c_path: TreeAuthPath<Tree>,\n    /// Column proofs for the drg parents.\n    pub drg_parents_proofs: Vec<TreeColumnProof<Tree>>,\n    /// Column proofs for the expander parents.\n    pub exp_parents_proofs: Vec<TreeColumnProof<Tree>>,\n    _t: PhantomData<Tree>,\n}\n\n// We must manually implement Clone for all types generic over MerkleTreeTrait (instead of using\n// #[derive(Clone)]) because derive(Clone) will only expand for MerkleTreeTrait types that also\n// implement Clone. Not every MerkleTreeTrait type is Clone-able because not all merkel Store's are\n// Clone-able, therefore deriving Clone would impl Clone for less than all possible Tree types.\nimpl<Tree: MerkleTreeTrait, G: 'static + Hasher> Clone for Proof<Tree, G> {\n    fn clone(&self) -> Self {\n        Proof {\n            comm_d_path: self.comm_d_path.clone(),\n            data_leaf: self.data_leaf,\n            challenge: self.challenge,\n            comm_r_last_path: self.comm_r_last_path.clone(),\n            comm_c_path: self.comm_c_path.clone(),\n            drg_parents_proofs: self.drg_parents_proofs.clone(),\n            exp_parents_proofs: self.exp_parents_proofs.clone(),\n            _t: self._t,\n        }\n    }\n}\n\nimpl<Tree: MerkleTreeTrait, G: 'static + Hasher> Proof<Tree, G> {\n    /// Create an empty proof, used in `blank_circuit`s.\n    pub fn empty(params: &PublicParams<Tree>) -> Self {\n        Proof {\n            comm_d_path: AuthPath::blank(params.graph.size()),\n            data_leaf: None,\n            challenge: None,\n            comm_r_last_path: AuthPath::blank(params.graph.size()),\n            comm_c_path: AuthPath::blank(params.graph.size()),\n            drg_parents_proofs: vec![\n                ColumnProof::empty(params);\n                params.graph.base_graph().degree()\n            ],\n            exp_parents_proofs: vec![ColumnProof::empty(params); params.graph.expansion_degree()],\n            _t: PhantomData,\n        }\n    }\n\n    /// Circuit synthesis.\n    #[allow(clippy::too_many_arguments)]\n    pub fn synthesize<CS: ConstraintSystem<Bls12>>(\n        self,\n        mut cs: CS,\n        layers: usize,\n        comm_d: &AllocatedNum<Bls12>,\n        comm_c: &AllocatedNum<Bls12>,\n        comm_r_last: &AllocatedNum<Bls12>,\n        replica_id: &[Boolean],\n    ) -> Result<(), SynthesisError> {\n        let Proof {\n            comm_d_path,\n            data_leaf,\n            challenge,\n            comm_r_last_path,\n            comm_c_path,\n            drg_parents_proofs,\n            exp_parents_proofs,\n            ..\n        } = self;\n\n        assert!(!drg_parents_proofs.is_empty());\n        assert!(!exp_parents_proofs.is_empty());\n\n        // -- verify initial data layer\n\n        // PrivateInput: data_leaf\n        let data_leaf_num = AllocatedNum::alloc(cs.namespace(|| \"data_leaf\"), || {\n            data_leaf.ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        // enforce inclusion of the data leaf in the tree D\n        enforce_inclusion(\n            cs.namespace(|| \"comm_d_inclusion\"),\n            comm_d_path,\n            comm_d,\n            &data_leaf_num,\n        )?;\n\n        // -- verify replica column openings\n\n        // Private Inputs for the DRG parent nodes.\n        let mut drg_parents = Vec::with_capacity(layers);\n\n        for (i, parent) in drg_parents_proofs.into_iter().enumerate() {\n            let (parent_col, inclusion_path) =\n                parent.alloc(cs.namespace(|| format!(\"drg_parent_{}_num\", i)))?;\n            assert_eq!(layers, parent_col.len());\n\n            // calculate column hash\n            let val = parent_col.hash(cs.namespace(|| format!(\"drg_parent_{}_constraint\", i)))?;\n            // enforce inclusion of the column hash in the tree C\n            enforce_inclusion(\n                cs.namespace(|| format!(\"drg_parent_{}_inclusion\", i)),\n                inclusion_path,\n                comm_c,\n                &val,\n            )?;\n            drg_parents.push(parent_col);\n        }\n\n        // Private Inputs for the Expander parent nodes.\n        let mut exp_parents = Vec::new();\n\n        for (i, parent) in exp_parents_proofs.into_iter().enumerate() {\n            let (parent_col, inclusion_path) =\n                parent.alloc(cs.namespace(|| format!(\"exp_parent_{}_num\", i)))?;\n            assert_eq!(layers, parent_col.len());\n\n            // calculate column hash\n            let val = parent_col.hash(cs.namespace(|| format!(\"exp_parent_{}_constraint\", i)))?;\n            // enforce inclusion of the column hash in the tree C\n            enforce_inclusion(\n                cs.namespace(|| format!(\"exp_parent_{}_inclusion\", i)),\n                inclusion_path,\n                comm_c,\n                &val,\n            )?;\n            exp_parents.push(parent_col);\n        }\n\n        // -- Verify labeling and encoding\n\n        // stores the labels of the challenged column\n        let mut column_labels = Vec::new();\n\n        // PublicInput: challenge index\n        let challenge_num = UInt64::alloc(cs.namespace(|| \"challenge\"), challenge)?;\n        challenge_num.pack_into_input(cs.namespace(|| \"challenge input\"))?;\n\n        for layer in 1..=layers {\n            let layer_num = UInt32::constant(layer as u32);\n\n            let mut cs = cs.namespace(|| format!(\"labeling_{}\", layer));\n\n            // Collect the parents\n            let mut parents = Vec::new();\n\n            // all layers have drg parents\n            for parent_col in &drg_parents {\n                let parent_val_num = parent_col.get_value(layer);\n                let parent_val_bits =\n                    reverse_bit_numbering(parent_val_num.to_bits_le(\n                        cs.namespace(|| format!(\"drg_parent_{}_bits\", parents.len())),\n                    )?);\n                parents.push(parent_val_bits);\n            }\n\n            // the first layer does not contain expander parents\n            if layer > 1 {\n                for parent_col in &exp_parents {\n                    // subtract 1 from the layer index, as the exp parents, are shifted by one, as they\n                    // do not store a value for the first layer\n                    let parent_val_num = parent_col.get_value(layer - 1);\n                    let parent_val_bits = reverse_bit_numbering(parent_val_num.to_bits_le(\n                        cs.namespace(|| format!(\"exp_parent_{}_bits\", parents.len())),\n                    )?);\n                    parents.push(parent_val_bits);\n                }\n            }\n\n            // Duplicate parents, according to the hashing algorithm.\n            let mut expanded_parents = parents.clone();\n            if layer > 1 {\n                expanded_parents.extend_from_slice(&parents); // 28\n                expanded_parents.extend_from_slice(&parents[..9]); // 37\n            } else {\n                // layer 1 only has drg parents\n                expanded_parents.extend_from_slice(&parents); // 12\n                expanded_parents.extend_from_slice(&parents); // 18\n                expanded_parents.extend_from_slice(&parents); // 24\n                expanded_parents.extend_from_slice(&parents); // 30\n                expanded_parents.extend_from_slice(&parents); // 36\n                expanded_parents.push(parents[0].clone()); // 37\n            };\n\n            // Reconstruct the label\n            let label = create_label_circuit(\n                cs.namespace(|| \"create_label\"),\n                replica_id,\n                expanded_parents,\n                layer_num,\n                challenge_num.clone(),\n            )?;\n            column_labels.push(label);\n        }\n\n        // -- encoding node\n        {\n            // encode the node\n\n            // key is the last label\n            let key = &column_labels[column_labels.len() - 1];\n            let encoded_node = encode(cs.namespace(|| \"encode_node\"), key, &data_leaf_num)?;\n\n            // verify inclusion of the encoded node\n            enforce_inclusion(\n                cs.namespace(|| \"comm_r_last_data_inclusion\"),\n                comm_r_last_path,\n                comm_r_last,\n                &encoded_node,\n            )?;\n        }\n\n        // -- ensure the column hash of the labels is included\n        {\n            // calculate column_hash\n            let column_hash =\n                hash_single_column(cs.namespace(|| \"c_x_column_hash\"), &column_labels)?;\n\n            // enforce inclusion of the column hash in the tree C\n            enforce_inclusion(\n                cs.namespace(|| \"c_x_inclusion\"),\n                comm_c_path,\n                comm_c,\n                &column_hash,\n            )?;\n        }\n\n        Ok(())\n    }\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> From<VanillaProof<Tree, G>> for Proof<Tree, G>\nwhere\n    Tree::Hasher: 'static,\n{\n    fn from(vanilla_proof: VanillaProof<Tree, G>) -> Self {\n        let VanillaProof {\n            comm_d_proofs,\n            comm_r_last_proof,\n            replica_column_proofs,\n            labeling_proofs,\n            ..\n        } = vanilla_proof;\n        let VanillaReplicaColumnProof {\n            c_x,\n            drg_parents,\n            exp_parents,\n        } = replica_column_proofs;\n\n        let data_leaf = Some(comm_d_proofs.leaf().into());\n\n        Proof {\n            comm_d_path: comm_d_proofs.as_options().into(),\n            data_leaf,\n            challenge: Some(labeling_proofs[0].node),\n            comm_r_last_path: comm_r_last_proof.as_options().into(),\n            comm_c_path: c_x.inclusion_proof.as_options().into(),\n            drg_parents_proofs: drg_parents.into_iter().map(|p| p.into()).collect(),\n            exp_parents_proofs: exp_parents.into_iter().map(|p| p.into()).collect(),\n            _t: PhantomData,\n        }\n    }\n}\n\n/// Enforce the inclusion of the given path, to the given leaf and the root.\nfn enforce_inclusion<H, U, V, W, CS: ConstraintSystem<Bls12>>(\n    cs: CS,\n    path: AuthPath<H, U, V, W>,\n    root: &AllocatedNum<Bls12>,\n    leaf: &AllocatedNum<Bls12>,\n) -> Result<(), SynthesisError>\nwhere\n    H: 'static + Hasher,\n    U: 'static + PoseidonArity,\n    V: 'static + PoseidonArity,\n    W: 'static + PoseidonArity,\n{\n    let root = Root::from_allocated::<CS>(root.clone());\n    let leaf = Root::from_allocated::<CS>(leaf.clone());\n\n    PoRCircuit::<MerkleTreeWrapper<H, DiskStore<H::Domain>, U, V, W>>::synthesize(\n        cs, leaf, path, root, true,\n    )?;\n\n    Ok(())\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/circuit/proof.rs",
    "content": "use std::marker::PhantomData;\n\nuse anyhow::ensure;\nuse bellperson::{\n    bls::{Bls12, Fr},\n    gadgets::num::AllocatedNum,\n    Circuit, ConstraintSystem, SynthesisError,\n};\nuse filecoin_hashers::{HashFunction, Hasher};\nuse fr32::u64_into_fr;\nuse storage_proofs_core::{\n    compound_proof::{CircuitComponent, CompoundProof},\n    drgraph::Graph,\n    error::Result,\n    gadgets::{constraint, por::PoRCompound},\n    merkle::{BinaryMerkleTree, MerkleTreeTrait},\n    parameter_cache::{CacheableParameters, ParameterSetMetadata},\n    por::{self, PoR},\n    proof::ProofScheme,\n    util::reverse_bit_numbering,\n};\n\nuse crate::stacked::{circuit::params::Proof, StackedDrg};\n\n/// Stacked DRG based Proof of Replication.\n///\n/// # Fields\n///\n/// * `params` - parameters for the curve\n///\npub struct StackedCircuit<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> {\n    public_params: <StackedDrg<'a, Tree, G> as ProofScheme<'a>>::PublicParams,\n    replica_id: Option<<Tree::Hasher as Hasher>::Domain>,\n    comm_d: Option<G::Domain>,\n    comm_r: Option<<Tree::Hasher as Hasher>::Domain>,\n    comm_r_last: Option<<Tree::Hasher as Hasher>::Domain>,\n    comm_c: Option<<Tree::Hasher as Hasher>::Domain>,\n\n    // one proof per challenge\n    proofs: Vec<Proof<Tree, G>>,\n}\n\n// We must manually implement Clone for all types generic over MerkleTreeTrait (instead of using\n// #[derive(Clone)]) because derive(Clone) will only expand for MerkleTreeTrait types that also\n// implement Clone. Not every MerkleTreeTrait type is Clone-able because not all merkel Store's are\n// Clone-able, therefore deriving Clone would impl Clone for less than all possible Tree types.\nimpl<'a, Tree: MerkleTreeTrait, G: Hasher> Clone for StackedCircuit<'a, Tree, G> {\n    fn clone(&self) -> Self {\n        StackedCircuit {\n            public_params: self.public_params.clone(),\n            replica_id: self.replica_id,\n            comm_d: self.comm_d,\n            comm_r: self.comm_r,\n            comm_r_last: self.comm_r_last,\n            comm_c: self.comm_c,\n            proofs: self.proofs.clone(),\n        }\n    }\n}\n\nimpl<'a, Tree: MerkleTreeTrait, G: Hasher> CircuitComponent for StackedCircuit<'a, Tree, G> {\n    type ComponentPrivateInputs = ();\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedCircuit<'a, Tree, G> {\n    #[allow(clippy::too_many_arguments)]\n    pub fn synthesize<CS>(\n        mut cs: CS,\n        public_params: <StackedDrg<'a, Tree, G> as ProofScheme<'a>>::PublicParams,\n        replica_id: Option<<Tree::Hasher as Hasher>::Domain>,\n        comm_d: Option<G::Domain>,\n        comm_r: Option<<Tree::Hasher as Hasher>::Domain>,\n        comm_r_last: Option<<Tree::Hasher as Hasher>::Domain>,\n        comm_c: Option<<Tree::Hasher as Hasher>::Domain>,\n        proofs: Vec<Proof<Tree, G>>,\n    ) -> Result<(), SynthesisError>\n    where\n        CS: ConstraintSystem<Bls12>,\n    {\n        let circuit = StackedCircuit::<'a, Tree, G> {\n            public_params,\n            replica_id,\n            comm_d,\n            comm_r,\n            comm_r_last,\n            comm_c,\n            proofs,\n        };\n\n        circuit.synthesize(&mut cs)\n    }\n}\n\nimpl<'a, Tree: MerkleTreeTrait, G: Hasher> Circuit<Bls12> for StackedCircuit<'a, Tree, G> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let StackedCircuit {\n            public_params,\n            proofs,\n            replica_id,\n            comm_r,\n            comm_d,\n            comm_r_last,\n            comm_c,\n            ..\n        } = self;\n\n        // Allocate replica_id\n        let replica_id_num = AllocatedNum::alloc(cs.namespace(|| \"replica_id\"), || {\n            replica_id\n                .map(Into::into)\n                .ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        // make replica_id a public input\n        replica_id_num.inputize(cs.namespace(|| \"replica_id_input\"))?;\n\n        let replica_id_bits =\n            reverse_bit_numbering(replica_id_num.to_bits_le(cs.namespace(|| \"replica_id_bits\"))?);\n\n        // Allocate comm_d as Fr\n        let comm_d_num = AllocatedNum::alloc(cs.namespace(|| \"comm_d\"), || {\n            comm_d\n                .map(Into::into)\n                .ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        // make comm_d a public input\n        comm_d_num.inputize(cs.namespace(|| \"comm_d_input\"))?;\n\n        // Allocate comm_r as Fr\n        let comm_r_num = AllocatedNum::alloc(cs.namespace(|| \"comm_r\"), || {\n            comm_r\n                .map(Into::into)\n                .ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        // make comm_r a public input\n        comm_r_num.inputize(cs.namespace(|| \"comm_r_input\"))?;\n\n        // Allocate comm_r_last as Fr\n        let comm_r_last_num = AllocatedNum::alloc(cs.namespace(|| \"comm_r_last\"), || {\n            comm_r_last\n                .map(Into::into)\n                .ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        // Allocate comm_c as Fr\n        let comm_c_num = AllocatedNum::alloc(cs.namespace(|| \"comm_c\"), || {\n            comm_c\n                .map(Into::into)\n                .ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        // Verify comm_r = H(comm_c || comm_r_last)\n        {\n            let hash_num = <Tree::Hasher as Hasher>::Function::hash2_circuit(\n                cs.namespace(|| \"H_comm_c_comm_r_last\"),\n                &comm_c_num,\n                &comm_r_last_num,\n            )?;\n\n            // Check actual equality\n            constraint::equal(\n                cs,\n                || \"enforce comm_r = H(comm_c || comm_r_last)\",\n                &comm_r_num,\n                &hash_num,\n            );\n        }\n\n        for (i, proof) in proofs.into_iter().enumerate() {\n            proof.synthesize(\n                &mut cs.namespace(|| format!(\"challenge_{}\", i)),\n                public_params.layer_challenges.layers(),\n                &comm_d_num,\n                &comm_c_num,\n                &comm_r_last_num,\n                &replica_id_bits,\n            )?;\n        }\n\n        Ok(())\n    }\n}\n\n#[allow(dead_code)]\npub struct StackedCompound<Tree: MerkleTreeTrait, G: Hasher> {\n    partitions: Option<usize>,\n    _t: PhantomData<Tree>,\n    _g: PhantomData<G>,\n}\n\nimpl<C: Circuit<Bls12>, P: ParameterSetMetadata, Tree: MerkleTreeTrait, G: Hasher>\n    CacheableParameters<C, P> for StackedCompound<Tree, G>\n{\n    fn cache_prefix() -> String {\n        format!(\n            \"stacked-proof-of-replication-{}-{}\",\n            Tree::display(),\n            G::name()\n        )\n    }\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher>\n    CompoundProof<'a, StackedDrg<'a, Tree, G>, StackedCircuit<'a, Tree, G>>\n    for StackedCompound<Tree, G>\n{\n    fn generate_public_inputs(\n        pub_in: &<StackedDrg<'_, Tree, G> as ProofScheme<'_>>::PublicInputs,\n        pub_params: &<StackedDrg<'_, Tree, G> as ProofScheme<'_>>::PublicParams,\n        k: Option<usize>,\n    ) -> Result<Vec<Fr>> {\n        let graph = &pub_params.graph;\n\n        let mut inputs = Vec::new();\n\n        let replica_id = pub_in.replica_id;\n        inputs.push(replica_id.into());\n\n        let comm_d = pub_in.tau.as_ref().expect(\"missing tau\").comm_d;\n        inputs.push(comm_d.into());\n\n        let comm_r = pub_in.tau.as_ref().expect(\"missing tau\").comm_r;\n        inputs.push(comm_r.into());\n\n        let por_setup_params = por::SetupParams {\n            leaves: graph.size(),\n            private: true,\n        };\n\n        let por_params = PoR::<Tree>::setup(&por_setup_params)?;\n        let por_params_d = PoR::<BinaryMerkleTree<G>>::setup(&por_setup_params)?;\n\n        let all_challenges = pub_in.challenges(&pub_params.layer_challenges, graph.size(), k);\n\n        for challenge in all_challenges.into_iter() {\n            // comm_d inclusion proof for the data leaf\n            inputs.extend(generate_inclusion_inputs::<BinaryMerkleTree<G>>(\n                &por_params_d,\n                challenge,\n                k,\n            )?);\n\n            // drg parents\n            let mut drg_parents = vec![0; graph.base_graph().degree()];\n            graph.base_graph().parents(challenge, &mut drg_parents)?;\n\n            // Inclusion Proofs: drg parent node in comm_c\n            for parent in drg_parents.into_iter() {\n                inputs.extend(generate_inclusion_inputs::<Tree>(\n                    &por_params,\n                    parent as usize,\n                    k,\n                )?);\n            }\n\n            // exp parents\n            let mut exp_parents = vec![0; graph.expansion_degree()];\n            graph.expanded_parents(challenge, &mut exp_parents)?;\n\n            // Inclusion Proofs: expander parent node in comm_c\n            for parent in exp_parents.into_iter() {\n                inputs.extend(generate_inclusion_inputs::<Tree>(\n                    &por_params,\n                    parent as usize,\n                    k,\n                )?);\n            }\n\n            inputs.push(u64_into_fr(challenge as u64));\n\n            // Inclusion Proof: encoded node in comm_r_last\n            inputs.extend(generate_inclusion_inputs::<Tree>(\n                &por_params,\n                challenge,\n                k,\n            )?);\n\n            // Inclusion Proof: column hash of the challenged node in comm_c\n            inputs.extend(generate_inclusion_inputs::<Tree>(\n                &por_params,\n                challenge,\n                k,\n            )?);\n        }\n\n        Ok(inputs)\n    }\n\n    fn circuit<'b>(\n        public_inputs: &'b <StackedDrg<'_, Tree, G> as ProofScheme<'_>>::PublicInputs,\n        _component_private_inputs: <StackedCircuit<'a, Tree, G> as CircuitComponent>::ComponentPrivateInputs,\n        vanilla_proof: &'b <StackedDrg<'_, Tree, G> as ProofScheme<'_>>::Proof,\n        public_params: &'b <StackedDrg<'_, Tree, G> as ProofScheme<'_>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Result<StackedCircuit<'a, Tree, G>> {\n        ensure!(\n            !vanilla_proof.is_empty(),\n            \"Cannot create a circuit with no vanilla proofs\"\n        );\n\n        let comm_r_last = vanilla_proof[0].comm_r_last();\n        let comm_c = vanilla_proof[0].comm_c();\n\n        // ensure consistency\n        ensure!(\n            vanilla_proof.iter().all(|p| p.comm_r_last() == comm_r_last),\n            \"inconsistent comm_r_lasts\"\n        );\n        ensure!(\n            vanilla_proof.iter().all(|p| p.comm_c() == comm_c),\n            \"inconsistent comm_cs\"\n        );\n\n        Ok(StackedCircuit {\n            public_params: public_params.clone(),\n            replica_id: Some(public_inputs.replica_id),\n            comm_d: public_inputs.tau.as_ref().map(|t| t.comm_d),\n            comm_r: public_inputs.tau.as_ref().map(|t| t.comm_r),\n            comm_r_last: Some(comm_r_last),\n            comm_c: Some(comm_c),\n            proofs: vanilla_proof.iter().cloned().map(|p| p.into()).collect(),\n        })\n    }\n\n    fn blank_circuit(\n        public_params: &<StackedDrg<'_, Tree, G> as ProofScheme<'_>>::PublicParams,\n    ) -> StackedCircuit<'a, Tree, G> {\n        StackedCircuit {\n            public_params: public_params.clone(),\n            replica_id: None,\n            comm_d: None,\n            comm_r: None,\n            comm_r_last: None,\n            comm_c: None,\n            proofs: (0..public_params.layer_challenges.challenges_count_all())\n                .map(|_challenge_index| Proof::empty(public_params))\n                .collect(),\n        }\n    }\n}\n\n/// Helper to generate public inputs for inclusion proofs.\nfn generate_inclusion_inputs<Tree: 'static + MerkleTreeTrait>(\n    por_params: &por::PublicParams,\n    challenge: usize,\n    k: Option<usize>,\n) -> Result<Vec<Fr>> {\n    let pub_inputs = por::PublicInputs::<<Tree::Hasher as Hasher>::Domain> {\n        challenge,\n        commitment: None,\n    };\n\n    PoRCompound::<Tree>::generate_public_inputs(&pub_inputs, por_params, k)\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/mod.rs",
    "content": "mod circuit;\nmod vanilla;\n\npub use circuit::*;\npub use vanilla::*;\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/cache.rs",
    "content": "use std::collections::BTreeMap;\nuse std::fs::{remove_file, File};\nuse std::io;\nuse std::path::{Path, PathBuf};\n\nuse anyhow::{bail, ensure, Context};\nuse byteorder::{ByteOrder, LittleEndian};\nuse filecoin_hashers::Hasher;\nuse lazy_static::lazy_static;\nuse log::{info, trace};\nuse mapr::{Mmap, MmapOptions};\nuse rayon::prelude::{IndexedParallelIterator, ParallelIterator, ParallelSliceMut};\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\nuse storage_proofs_core::{\n    drgraph::{Graph, BASE_DEGREE},\n    error::Result,\n    parameter_cache::{with_exclusive_lock, LockedFile, ParameterSetMetadata, VERSION},\n    settings::SETTINGS,\n    util::NODE_SIZE,\n};\n\nuse crate::stacked::vanilla::graph::{StackedGraph, DEGREE};\n\n/// u32 = 4 bytes\nconst NODE_BYTES: usize = 4;\n\npub const PARENT_CACHE_DATA: &str = include_str!(\"../../../parent_cache.json\");\n\npub type ParentCacheDataMap = BTreeMap<String, ParentCacheData>;\n\n#[derive(Debug, Deserialize, Serialize)]\npub struct ParentCacheData {\n    pub digest: String,\n    pub sector_size: u64,\n}\n\nlazy_static! {\n    pub static ref PARENT_CACHE: ParentCacheDataMap =\n        serde_json::from_str(PARENT_CACHE_DATA).expect(\"Invalid parent_cache.json\");\n}\n\n// StackedGraph will hold two different (but related) `ParentCache`,\n#[derive(Debug)]\npub struct ParentCache {\n    /// Disk path for the cache.\n    pub path: PathBuf,\n    /// The total number of cache entries.\n    num_cache_entries: u32,\n    cache: CacheData,\n    pub sector_size: usize,\n    pub digest: String,\n}\n\n#[derive(Debug)]\nstruct CacheData {\n    /// This is a large list of fixed (parent) sized arrays.\n    data: Mmap,\n    /// Offset in nodes.\n    offset: u32,\n    /// Len in nodes.\n    len: u32,\n    /// The underlyling file.\n    file: LockedFile,\n}\n\nimpl CacheData {\n    /// Change the cache to point to the newly passed in offset.\n    ///\n    /// The `new_offset` must be set, such that `new_offset + len` does not\n    /// overflow the underlying data.\n    fn shift(&mut self, new_offset: u32) -> Result<()> {\n        if self.offset == new_offset {\n            return Ok(());\n        }\n\n        let offset = new_offset as usize * DEGREE * NODE_BYTES;\n        let len = self.len as usize * DEGREE * NODE_BYTES;\n\n        self.data = unsafe {\n            MmapOptions::new()\n                .offset(offset as u64)\n                .len(len)\n                .map(self.file.as_ref())\n                .context(\"could not shift mmap}\")?\n        };\n        self.offset = new_offset;\n\n        Ok(())\n    }\n\n    /// Returns true if this node is in the cached range.\n    fn contains(&self, node: u32) -> bool {\n        node >= self.offset && node < self.offset + self.len\n    }\n\n    /// Read the parents for the given node from cache.\n    ///\n    /// Panics if the `node` is not in the cache.\n    fn read(&self, node: u32) -> [u32; DEGREE] {\n        assert!(node >= self.offset, \"node not in cache\");\n        let start = (node - self.offset) as usize * DEGREE * NODE_BYTES;\n        let end = start + DEGREE * NODE_BYTES;\n\n        let mut res = [0u32; DEGREE];\n        LittleEndian::read_u32_into(&self.data[start..end], &mut res);\n        res\n    }\n\n    fn reset(&mut self) -> Result<()> {\n        if self.offset == 0 {\n            return Ok(());\n        }\n\n        self.shift(0)\n    }\n\n    fn open(offset: u32, len: u32, path: &Path) -> Result<Self> {\n        let min_cache_size = (offset + len) as usize * DEGREE * NODE_BYTES;\n\n        let file = LockedFile::open_shared_read(path)\n            .with_context(|| format!(\"could not open path={}\", path.display()))?;\n\n        let actual_len = file.as_ref().metadata()?.len();\n        if actual_len < min_cache_size as u64 {\n            bail!(\n                \"corrupted cache: {}, expected at least {}, got {} bytes\",\n                path.display(),\n                min_cache_size,\n                actual_len\n            );\n        }\n\n        let data = unsafe {\n            MmapOptions::new()\n                .offset((offset as usize * DEGREE * NODE_BYTES) as u64)\n                .len(len as usize * DEGREE * NODE_BYTES)\n                .map(file.as_ref())\n                .with_context(|| format!(\"could not mmap path={}\", path.display()))?\n        };\n\n        Ok(Self {\n            data,\n            file,\n            len,\n            offset,\n        })\n    }\n}\n\nimpl ParentCache {\n    pub fn new<H, G>(len: u32, cache_entries: u32, graph: &StackedGraph<H, G>) -> Result<Self>\n    where\n        H: Hasher,\n        G: Graph<H> + ParameterSetMetadata + Send + Sync,\n    {\n        let path = cache_path(cache_entries, graph);\n        if path.exists() {\n            Self::open(len, cache_entries, graph, &path)\n        } else {\n            match Self::generate(len, cache_entries, graph, &path) {\n                Ok(c) => Ok(c),\n                Err(err) => {\n                    match err.downcast::<io::Error>() {\n                        Ok(error) if error.kind() == io::ErrorKind::AlreadyExists => {\n                            // cache was written from another process, just read it\n                            Self::open(len, cache_entries, graph, &path)\n                        }\n                        Ok(error) => Err(error.into()),\n                        Err(error) => Err(error),\n                    }\n                }\n            }\n        }\n    }\n\n    /// Opens an existing cache from disk.  If the verify_cache option\n    /// is enabled, we rehash the data and compare with the persisted\n    /// hash file.  If the persisted hash file does not exist, we\n    /// re-generate the cache file, which will create it.\n    pub fn open<H, G>(\n        len: u32,\n        cache_entries: u32,\n        graph: &StackedGraph<H, G>,\n        path: &Path,\n    ) -> Result<Self>\n    where\n        H: Hasher,\n        G: Graph<H> + ParameterSetMetadata + Send + Sync,\n    {\n        // Check if current entry is part of the official parent cache manifest.  If not, we're\n        // dealing with some kind of test sector.  If verify has been requested but it's not a\n        // production entry in the manifest, we'll calculate the digest so that it can be returned,\n        // although we don't attempt to match it up to anything.  This is useful for the case of\n        // generating new additions to the parent cache manifest since a valid digest is required.\n        let (parent_cache_data, verify_cache, is_production, mut digest_hex) =\n            match get_parent_cache_data(&path) {\n                None => {\n                    info!(\"[open] Parent cache data is not supported in production\");\n\n                    (\n                        None,\n                        SETTINGS.verify_cache,\n                        false, // not production since not in manifest\n                        \"\".to_string(),\n                    )\n                }\n                Some(pcd) => (\n                    Some(pcd),\n                    SETTINGS.verify_cache,\n                    true, // is_production since it exists in the manifest\n                    pcd.digest.clone(),\n                ),\n            };\n\n        info!(\n            \"parent cache: opening {}, verify enabled: {}\",\n            path.display(),\n            verify_cache\n        );\n\n        if verify_cache {\n            // Always check all of the data for integrity checks, even\n            // if we're only opening a portion of it.\n            let mut hasher = Sha256::new();\n            info!(\"[open] parent cache: calculating consistency digest\");\n            let file = File::open(&path)?;\n            let data = unsafe {\n                MmapOptions::new()\n                    .map(&file)\n                    .with_context(|| format!(\"could not mmap path={}\", path.display()))?\n            };\n            hasher.update(&data);\n            drop(data);\n\n            let hash = hasher.finalize();\n            digest_hex = hash.iter().map(|x| format!(\"{:01$x}\", x, 2)).collect();\n\n            info!(\n                \"[open] parent cache: calculated consistency digest: {:?}\",\n                digest_hex\n            );\n\n            if is_production {\n                let parent_cache_data = parent_cache_data.expect(\"parent_cache_data failure\");\n\n                trace!(\n                    \"[{}] Comparing {:?} to {:?}\",\n                    graph.size() * NODE_SIZE,\n                    digest_hex,\n                    parent_cache_data.digest\n                );\n\n                if digest_hex == parent_cache_data.digest {\n                    info!(\"[open] parent cache: cache is verified!\");\n                } else {\n                    info!(\n                        \"[!!!] Parent cache digest mismatch detected.  Regenerating {}\",\n                        path.display()\n                    );\n                    // delete invalid cache\n                    remove_file(path)?;\n                    ensure!(\n                        Self::generate(len, graph.size() as u32, graph, path).is_ok(),\n                        \"Failed to generate parent cache\"\n                    );\n\n                    // Note that if we wanted the user to manually terminate after repeated\n                    // generation attemps, we could recursively return Self::open(...) here.\n                }\n            }\n        }\n\n        Ok(ParentCache {\n            cache: CacheData::open(0, len, &path)?,\n            path: path.to_path_buf(),\n            num_cache_entries: cache_entries,\n            sector_size: graph.size() * NODE_SIZE,\n            digest: digest_hex,\n        })\n    }\n\n    /// Generates a new cache and stores it on disk.\n    pub fn generate<H, G>(\n        len: u32,\n        cache_entries: u32,\n        graph: &StackedGraph<H, G>,\n        path: &Path,\n    ) -> Result<Self>\n    where\n        H: Hasher,\n        G: Graph<H> + ParameterSetMetadata + Send + Sync,\n    {\n        info!(\"parent cache: generating {}\", path.display());\n        let mut digest_hex: String = \"\".to_string();\n        let sector_size = graph.size() * NODE_SIZE;\n\n        with_exclusive_lock(&path.to_path_buf(), |file| {\n            let cache_size = cache_entries as usize * NODE_BYTES * DEGREE;\n            file.as_ref()\n                .set_len(cache_size as u64)\n                .with_context(|| format!(\"failed to set length: {}\", cache_size))?;\n\n            let mut data = unsafe {\n                MmapOptions::new()\n                    .map_mut(file.as_ref())\n                    .with_context(|| format!(\"could not mmap path={}\", path.display()))?\n            };\n\n            data.par_chunks_mut(DEGREE * NODE_BYTES)\n                .enumerate()\n                .try_for_each(|(node, entry)| -> Result<()> {\n                    let mut parents = [0u32; DEGREE];\n                    graph\n                        .base_graph()\n                        .parents(node, &mut parents[..BASE_DEGREE])?;\n                    graph.generate_expanded_parents(node, &mut parents[BASE_DEGREE..]);\n\n                    LittleEndian::write_u32_into(&parents, entry);\n                    Ok(())\n                })?;\n\n            info!(\"parent cache: generated\");\n            data.flush().context(\"failed to flush parent cache\")?;\n\n            info!(\"[generate] parent cache: generating consistency digest\");\n            let mut hasher = Sha256::new();\n            hasher.update(&data);\n            let hash = hasher.finalize();\n            digest_hex = hash.iter().map(|x| format!(\"{:01$x}\", x, 2)).collect();\n            info!(\n                \"[generate] parent cache: generated consistency digest: {:?}\",\n                digest_hex\n            );\n\n            // Check if current entry is part of the official manifest and verify\n            // that what we just generated matches what we expect for this entry\n            // (if found). If not, we're dealing with some kind of test sector.\n            match get_parent_cache_data(&path) {\n                None => {\n                    info!(\"[generate] Parent cache data is not supported in production\");\n                }\n                Some(pcd) => {\n                    ensure!(\n                        digest_hex == pcd.digest,\n                        \"Newly generated parent cache is invalid\"\n                    );\n                }\n            };\n\n            drop(data);\n\n            info!(\"parent cache: written to disk\");\n            Ok(())\n        })?;\n\n        Ok(ParentCache {\n            cache: CacheData::open(0, len, &path)?,\n            path: path.to_path_buf(),\n            num_cache_entries: cache_entries,\n            sector_size,\n            digest: digest_hex,\n        })\n    }\n\n    /// Read a single cache element at position `node`.\n    pub fn read(&mut self, node: u32) -> Result<[u32; DEGREE]> {\n        if self.cache.contains(node) {\n            return Ok(self.cache.read(node));\n        }\n\n        // not in memory, shift cache\n        ensure!(\n            node >= self.cache.offset + self.cache.len,\n            \"cache must be read in ascending order {} < {} + {}\",\n            node,\n            self.cache.offset,\n            self.cache.len,\n        );\n\n        // Shift cache by its current size.\n        let new_offset =\n            (self.num_cache_entries - self.cache.len).min(self.cache.offset + self.cache.len);\n        self.cache.shift(new_offset)?;\n\n        Ok(self.cache.read(node))\n    }\n\n    /// Resets the partial cache to the beginning.\n    pub fn reset(&mut self) -> Result<()> {\n        self.cache.reset()\n    }\n}\n\nfn parent_cache_dir_name() -> String {\n    SETTINGS.parent_cache.clone()\n}\n\nfn parent_cache_id(path: &Path) -> String {\n    Path::new(&path)\n        .file_stem()\n        .expect(\"parent_cache_id file_stem failure\")\n        .to_str()\n        .expect(\"parent_cache_id to_str failure\")\n        .to_string()\n}\n\n/// Get the correct parent cache data for a given cache id.\nfn get_parent_cache_data(path: &Path) -> Option<&ParentCacheData> {\n    PARENT_CACHE.get(&parent_cache_id(path))\n}\n\nfn cache_path<H, G>(cache_entries: u32, graph: &StackedGraph<H, G>) -> PathBuf\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata + Send + Sync,\n{\n    let mut hasher = Sha256::default();\n\n    hasher.update(H::name());\n    hasher.update(graph.identifier());\n    for key in &graph.feistel_keys {\n        hasher.update(key.to_le_bytes());\n    }\n    hasher.update(cache_entries.to_le_bytes());\n    let h = hasher.finalize();\n    PathBuf::from(parent_cache_dir_name()).join(format!(\n        \"v{}-sdr-parent-{}.cache\",\n        VERSION,\n        hex::encode(h),\n    ))\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use filecoin_hashers::poseidon::PoseidonHasher;\n    use storage_proofs_core::api_version::ApiVersion;\n\n    use crate::stacked::vanilla::graph::{StackedBucketGraph, EXP_DEGREE};\n\n    #[test]\n    fn test_read_full_range() {\n        let nodes = 24u32;\n        let graph = StackedBucketGraph::<PoseidonHasher>::new_stacked(\n            nodes as usize,\n            BASE_DEGREE,\n            EXP_DEGREE,\n            [0u8; 32],\n            ApiVersion::V1_0_0,\n        )\n        .expect(\"new_stacked failure\");\n\n        let mut cache = ParentCache::new(nodes, nodes, &graph).expect(\"parent cache new failure\");\n\n        for node in 0..nodes {\n            let mut expected_parents = [0; DEGREE];\n            graph\n                .parents(node as usize, &mut expected_parents)\n                .expect(\"graph parents failure\");\n            let parents = cache.read(node).expect(\"cache read failure\");\n\n            assert_eq!(expected_parents, parents);\n        }\n    }\n\n    #[test]\n    fn test_read_partial_range() {\n        let nodes = 48u32;\n        let graph = StackedBucketGraph::<PoseidonHasher>::new_stacked(\n            nodes as usize,\n            BASE_DEGREE,\n            EXP_DEGREE,\n            [0u8; 32],\n            ApiVersion::V1_0_0,\n        )\n        .expect(\"new_stacked failure\");\n\n        let mut half_cache =\n            ParentCache::new(nodes / 2, nodes, &graph).expect(\"parent cache new failure\");\n        let mut quarter_cache =\n            ParentCache::new(nodes / 4, nodes, &graph).expect(\"parent cache new failure\");\n\n        for node in 0..nodes {\n            let mut expected_parents = [0; DEGREE];\n            graph\n                .parents(node as usize, &mut expected_parents)\n                .expect(\"graph parents failure\");\n\n            let parents = half_cache.read(node).expect(\"half cache read failure\");\n            assert_eq!(expected_parents, parents);\n\n            let parents = quarter_cache\n                .read(node)\n                .expect(\"quarter cache read failure\");\n            assert_eq!(expected_parents, parents);\n\n            // some internal checks to make sure the cache works as expected\n            assert_eq!(\n                half_cache.cache.data.len() / DEGREE / NODE_BYTES,\n                nodes as usize / 2\n            );\n            assert_eq!(\n                quarter_cache.cache.data.len() / DEGREE / NODE_BYTES,\n                nodes as usize / 4\n            );\n        }\n\n        half_cache.reset().expect(\"half cache reset failure\");\n        quarter_cache.reset().expect(\"quarter cache reset failure\");\n\n        for node in 0..nodes {\n            let mut expected_parents = [0; DEGREE];\n            graph\n                .parents(node as usize, &mut expected_parents)\n                .expect(\"graph parents failure\");\n\n            let parents = half_cache.read(node).expect(\"half cache read failure\");\n            assert_eq!(expected_parents, parents);\n\n            let parents = quarter_cache\n                .read(node)\n                .expect(\"quarter cache read failure\");\n            assert_eq!(expected_parents, parents);\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/challenges.rs",
    "content": "use filecoin_hashers::Domain;\nuse num_bigint::BigUint;\nuse num_traits::cast::ToPrimitive;\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct LayerChallenges {\n    /// How many layers we are generating challenges for.\n    layers: usize,\n    /// The maximum count of challenges\n    max_count: usize,\n}\n\nimpl LayerChallenges {\n    pub const fn new(layers: usize, max_count: usize) -> Self {\n        LayerChallenges { layers, max_count }\n    }\n\n    pub fn layers(&self) -> usize {\n        self.layers\n    }\n\n    pub fn challenges_count_all(&self) -> usize {\n        self.max_count\n    }\n\n    /// Derive all challenges.\n    pub fn derive<D: Domain>(\n        &self,\n        leaves: usize,\n        replica_id: &D,\n        seed: &[u8; 32],\n        k: u8,\n    ) -> Vec<usize> {\n        self.derive_internal(self.challenges_count_all(), leaves, replica_id, seed, k)\n    }\n\n    pub fn derive_internal<D: Domain>(\n        &self,\n        challenges_count: usize,\n        leaves: usize,\n        replica_id: &D,\n        seed: &[u8; 32],\n        k: u8,\n    ) -> Vec<usize> {\n        assert!(leaves > 2, \"Too few leaves: {}\", leaves);\n\n        (0..challenges_count)\n            .map(|i| {\n                let j: u32 = ((challenges_count * k as usize) + i) as u32;\n\n                let hash = Sha256::new()\n                    .chain(replica_id.into_bytes())\n                    .chain(seed)\n                    .chain(&j.to_le_bytes())\n                    .finalize();\n\n                let big_challenge = BigUint::from_bytes_le(hash.as_ref());\n\n                // We cannot try to prove the first node, so make sure the challenge\n                // can never be 0.\n                let big_mod_challenge = big_challenge % (leaves - 1);\n                let big_mod_challenge = big_mod_challenge\n                    .to_usize()\n                    .expect(\"`big_mod_challenge` exceeds size of `usize`\");\n                big_mod_challenge + 1\n            })\n            .collect()\n    }\n}\n\n#[derive(Debug, Default)]\npub struct ChallengeRequirements {\n    pub minimum_challenges: usize,\n}\n\n#[cfg(test)]\nmod test {\n    use super::*;\n\n    use std::collections::HashMap;\n\n    use filecoin_hashers::sha256::Sha256Domain;\n    use rand::{thread_rng, Rng};\n\n    #[test]\n    fn test_calculate_fixed_challenges() {\n        let layer_challenges = LayerChallenges::new(10, 333);\n        let expected = 333;\n\n        let calculated_count = layer_challenges.challenges_count_all();\n        assert_eq!(expected as usize, calculated_count);\n    }\n\n    #[test]\n    fn challenge_derivation() {\n        let n = 200;\n        let layers = 100;\n\n        let challenges = LayerChallenges::new(layers, n);\n        let leaves = 1 << 30;\n        let rng = &mut thread_rng();\n        let replica_id: Sha256Domain = Sha256Domain::random(rng);\n        let seed: [u8; 32] = rng.gen();\n        let partitions = 5;\n        let total_challenges = partitions * n;\n\n        let mut layers_with_duplicates = 0;\n\n        for _layer in 1..=layers {\n            let mut histogram = HashMap::new();\n            for k in 0..partitions {\n                let challenges = challenges.derive(leaves, &replica_id, &seed, k as u8);\n\n                for challenge in challenges {\n                    let counter = histogram.entry(challenge).or_insert(0);\n                    *counter += 1;\n                }\n            }\n            let unique_challenges = histogram.len();\n            if unique_challenges < total_challenges {\n                layers_with_duplicates += 1;\n            }\n        }\n\n        // If we generate 100 layers with 1,000 challenges in each, at most two layers can contain\n        // any duplicates for this assertion to succeed.\n        //\n        // This test could randomly fail (anything's possible), but if it happens regularly something is wrong.\n        assert!(layers_with_duplicates < 3);\n    }\n\n    #[test]\n    // This test shows that partitioning (k = 0..partitions) generates the same challenges as\n    // generating the same number of challenges with only one partition (k = 0).\n    fn challenge_partition_equivalence() {\n        let n = 40;\n        let leaves = 1 << 30;\n        let rng = &mut thread_rng();\n        let replica_id: Sha256Domain = Sha256Domain::random(rng);\n        let seed: [u8; 32] = rng.gen();\n        let partitions = 5;\n        let layers = 100;\n        let total_challenges = n * partitions;\n\n        for _layer in 1..=layers {\n            let one_partition_challenges = LayerChallenges::new(layers, total_challenges).derive(\n                leaves,\n                &replica_id,\n                &seed,\n                0,\n            );\n            let many_partition_challenges = (0..partitions)\n                .flat_map(|k| {\n                    LayerChallenges::new(layers, n).derive(leaves, &replica_id, &seed, k as u8)\n                })\n                .collect::<Vec<_>>();\n\n            assert_eq!(one_partition_challenges, many_partition_challenges);\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/column.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellperson::bls::Fr;\nuse filecoin_hashers::Hasher;\nuse serde::{Deserialize, Serialize};\nuse storage_proofs_core::{\n    error::Result,\n    merkle::{MerkleTreeTrait, Store},\n};\n\nuse crate::stacked::vanilla::{column_proof::ColumnProof, hash::hash_single_column};\n\n#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]\npub struct Column<H: Hasher> {\n    pub(crate) index: u32,\n    pub(crate) rows: Vec<H::Domain>,\n    _h: PhantomData<H>,\n}\n\nimpl<H: Hasher> Column<H> {\n    pub fn new(index: u32, rows: Vec<H::Domain>) -> Result<Self> {\n        Ok(Column {\n            index,\n            rows,\n            _h: PhantomData,\n        })\n    }\n\n    pub fn with_capacity(index: u32, capacity: usize) -> Result<Self> {\n        Column::new(index, Vec::with_capacity(capacity))\n    }\n\n    pub fn rows(&self) -> &[H::Domain] {\n        &self.rows\n    }\n\n    pub fn index(&self) -> u32 {\n        self.index\n    }\n\n    /// Calculate the column hashes `C_i = H(E_i, O_i)` for the passed in column.\n    pub fn hash(&self) -> Fr {\n        hash_single_column(\n            &self\n                .rows\n                .iter()\n                .copied()\n                .map(Into::into)\n                .collect::<Vec<_>>(),\n        )\n    }\n\n    pub fn get_node_at_layer(&self, layer: usize) -> Result<&H::Domain> {\n        assert!(layer > 0, \"layer must be greater than 0\");\n        let row_index = layer - 1;\n\n        Ok(&self.rows[row_index])\n    }\n\n    /// Create a column proof for this column.\n    pub fn into_proof<S: Store<H::Domain>, Tree: MerkleTreeTrait<Hasher = H, Store = S>>(\n        self,\n        tree_c: &Tree,\n    ) -> Result<ColumnProof<Tree::Proof>> {\n        let inclusion_proof = tree_c.gen_proof(self.index() as usize)?;\n        ColumnProof::<Tree::Proof>::from_column(self, inclusion_proof)\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/column_proof.rs",
    "content": "use bellperson::bls::Fr;\nuse filecoin_hashers::Hasher;\nuse log::trace;\nuse serde::{de::DeserializeOwned, Deserialize, Serialize};\nuse storage_proofs_core::{error::Result, merkle::MerkleProofTrait};\n\nuse crate::stacked::vanilla::Column;\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ColumnProof<Proof: MerkleProofTrait> {\n    #[serde(bound(\n        serialize = \"Column<Proof::Hasher>: Serialize\",\n        deserialize = \"Column<Proof::Hasher>: Deserialize<'de>\"\n    ))]\n    pub(crate) column: Column<Proof::Hasher>,\n    #[serde(bound(\n        serialize = \"Proof: Serialize\",\n        deserialize = \"Proof: DeserializeOwned\"\n    ))]\n    pub(crate) inclusion_proof: Proof,\n}\n\nimpl<Proof: MerkleProofTrait> ColumnProof<Proof> {\n    pub fn from_column(column: Column<Proof::Hasher>, inclusion_proof: Proof) -> Result<Self> {\n        Ok(ColumnProof {\n            column,\n            inclusion_proof,\n        })\n    }\n\n    pub fn root(&self) -> <Proof::Hasher as Hasher>::Domain {\n        self.inclusion_proof.root()\n    }\n\n    fn column(&self) -> &Column<Proof::Hasher> {\n        &self.column\n    }\n\n    pub fn get_node_at_layer(&self, layer: usize) -> Result<&<Proof::Hasher as Hasher>::Domain> {\n        self.column().get_node_at_layer(layer)\n    }\n\n    pub fn column_hash(&self) -> Fr {\n        self.column.hash()\n    }\n\n    pub fn verify(\n        &self,\n        challenge: u32,\n        expected_root: &<Proof::Hasher as Hasher>::Domain,\n    ) -> bool {\n        let c_i = self.column_hash();\n\n        check_eq!(&self.inclusion_proof.root(), expected_root);\n        check!(self.inclusion_proof.validate_data(c_i.into()));\n        check!(self.inclusion_proof.validate(challenge as usize));\n\n        true\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/cores.rs",
    "content": "use std::sync::{Mutex, MutexGuard};\n\nuse anyhow::{format_err, Result};\nuse hwloc::{Bitmap, ObjectType, Topology, TopologyObject, CPUBIND_THREAD};\nuse lazy_static::lazy_static;\nuse log::{debug, info, warn};\nuse storage_proofs_core::settings::SETTINGS;\n\ntype CoreGroup = Vec<CoreIndex>;\nlazy_static! {\n    pub static ref TOPOLOGY: Mutex<Topology> = Mutex::new(Topology::new());\n    pub static ref CORE_GROUPS: Option<Vec<Mutex<CoreGroup>>> = {\n        let num_producers = &SETTINGS.multicore_sdr_producers;\n        let cores_per_unit = num_producers + 1;\n\n        core_groups(cores_per_unit)\n    };\n}\n\n#[derive(Clone, Copy, Debug, PartialEq)]\n/// `CoreIndex` is a simple wrapper type for indexes into the set of vixible cores. A `CoreIndex` should only ever be\n/// created with a value known to be less than the number of visible cores.\npub struct CoreIndex(usize);\n\npub fn checkout_core_group() -> Option<MutexGuard<'static, CoreGroup>> {\n    match &*CORE_GROUPS {\n        Some(groups) => {\n            for (i, group) in groups.iter().enumerate() {\n                match group.try_lock() {\n                    Ok(guard) => {\n                        debug!(\"checked out core group {}\", i);\n                        return Some(guard);\n                    }\n                    Err(_) => debug!(\"core group {} locked, could not checkout\", i),\n                }\n            }\n            None\n        }\n        None => None,\n    }\n}\n\n#[cfg(not(target_os = \"windows\"))]\npub type ThreadId = libc::pthread_t;\n\n#[cfg(target_os = \"windows\")]\npub type ThreadId = winapi::winnt::HANDLE;\n\n/// Helper method to get the thread id through libc, with current rust stable (1.5.0) its not\n/// possible otherwise I think.\n#[cfg(not(target_os = \"windows\"))]\nfn get_thread_id() -> ThreadId {\n    unsafe { libc::pthread_self() }\n}\n\n#[cfg(target_os = \"windows\")]\nfn get_thread_id() -> ThreadId {\n    unsafe { kernel32::GetCurrentThread() }\n}\n\npub struct Cleanup {\n    tid: ThreadId,\n    prior_state: Option<Bitmap>,\n}\n\nimpl Drop for Cleanup {\n    fn drop(&mut self) {\n        if let Some(prior) = self.prior_state.take() {\n            let child_topo = &TOPOLOGY;\n            let mut locked_topo = child_topo.lock().expect(\"poisded lock\");\n            let _ = locked_topo.set_cpubind_for_thread(self.tid, prior, CPUBIND_THREAD);\n        }\n    }\n}\n\npub fn bind_core(core_index: CoreIndex) -> Result<Cleanup> {\n    let child_topo = &TOPOLOGY;\n    let tid = get_thread_id();\n    let mut locked_topo = child_topo.lock().expect(\"poisoned lock\");\n    let core = get_core_by_index(&locked_topo, core_index)\n        .map_err(|err| format_err!(\"failed to get core at index {}: {:?}\", core_index.0, err))?;\n\n    let cpuset = core\n        .allowed_cpuset()\n        .ok_or_else(|| format_err!(\"no allowed cpuset for core at index {}\", core_index.0,))?;\n    debug!(\"allowed cpuset: {:?}\", cpuset);\n    let mut bind_to = cpuset;\n\n    // Get only one logical processor (in case the core is SMT/hyper-threaded).\n    bind_to.singlify();\n\n    // Thread binding before explicit set.\n    let before = locked_topo.get_cpubind_for_thread(tid, CPUBIND_THREAD);\n\n    debug!(\"binding to {:?}\", bind_to);\n    // Set the binding.\n    let result = locked_topo\n        .set_cpubind_for_thread(tid, bind_to, CPUBIND_THREAD)\n        .map_err(|err| format_err!(\"failed to bind CPU: {:?}\", err));\n\n    if result.is_err() {\n        warn!(\"error in bind_core, {:?}\", result);\n    }\n\n    Ok(Cleanup {\n        tid,\n        prior_state: before,\n    })\n}\n\nfn get_core_by_index(topo: &Topology, index: CoreIndex) -> Result<&TopologyObject> {\n    let idx = index.0;\n\n    match topo.objects_with_type(&ObjectType::Core) {\n        Ok(all_cores) if idx < all_cores.len() => Ok(all_cores[idx]),\n        Ok(all_cores) => Err(format_err!(\n            \"idx ({}) out of range for {} cores\",\n            idx,\n            all_cores.len()\n        )),\n        _e => Err(format_err!(\"failed to get core by index {}\", idx,)),\n    }\n}\n\nfn core_groups(cores_per_unit: usize) -> Option<Vec<Mutex<Vec<CoreIndex>>>> {\n    let topo = TOPOLOGY.lock().expect(\"poisoned lock\");\n\n    let core_depth = match topo.depth_or_below_for_type(&ObjectType::Core) {\n        Ok(depth) => depth,\n        Err(_) => return None,\n    };\n    let all_cores = topo.objects_with_type(&ObjectType::Core).unwrap();\n    let core_count = all_cores.len();\n\n    let mut cache_depth = core_depth;\n    let mut cache_count = 1;\n\n    while cache_depth > 0 {\n        let objs = topo.objects_at_depth(cache_depth);\n        let obj_count = objs.len();\n        if obj_count < core_count {\n            cache_count = obj_count;\n            break;\n        }\n\n        cache_depth -= 1;\n    }\n\n    assert_eq!(0, core_count % cache_count);\n    let mut group_size = core_count / cache_count;\n    let mut group_count = cache_count;\n\n    if cache_count <= 1 {\n        // If there are not more than one shared caches, there is no benefit in trying to group cores by cache.\n        // In that case, prefer more groups so we can still bind cores and also get some parallelism.\n        // Create as many full groups as possible. The last group may not be full.\n        group_count = core_count / cores_per_unit;\n        group_size = cores_per_unit;\n\n        info!(\n            \"found only {} shared cache(s), heuristically grouping cores into {} groups\",\n            cache_count, group_count\n        );\n    } else {\n        debug!(\n            \"Cores: {}, Shared Caches: {}, cores per cache (group_size): {}\",\n            core_count, cache_count, group_size\n        );\n    }\n\n    let core_groups = (0..group_count)\n        .map(|i| {\n            (0..group_size)\n                .map(|j| {\n                    let core_index = i * group_size + j;\n                    assert!(core_index < core_count);\n                    CoreIndex(core_index)\n                })\n                .collect::<Vec<_>>()\n        })\n        .collect::<Vec<_>>();\n\n    Some(\n        core_groups\n            .iter()\n            .map(|group| Mutex::new(group.clone()))\n            .collect::<Vec<_>>(),\n    )\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_cores() {\n        core_groups(2);\n    }\n\n    #[test]\n    #[cfg(feature = \"single-threaded\")]\n    fn test_checkout_cores() {\n        let checkout1 = checkout_core_group();\n        dbg!(&checkout1);\n        let checkout2 = checkout_core_group();\n        dbg!(&checkout2);\n\n        // This test might fail if run on a machine with fewer than four cores.\n        match (checkout1, checkout2) {\n            (Some(c1), Some(c2)) => assert!(*c1 != *c2),\n            _ => panic!(\"failed to get two checkouts\"),\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/create_label/mod.rs",
    "content": "use std::fs::{self, create_dir_all, remove_file, rename, File};\nuse std::io::{self, BufReader};\n\nuse anyhow::Context;\nuse filecoin_hashers::Hasher;\nuse log::{info, warn};\nuse merkletree::{merkle::Element, store::StoreConfig};\nuse storage_proofs_core::{\n    cache_key::CacheKey, drgraph::Graph, error::Result, merkle::MerkleTreeTrait,\n};\n\nuse crate::stacked::vanilla::{proof::LayerState, StackedBucketGraph};\n\npub mod multi;\npub mod single;\n\n/// Prepares the necessary `StoreConfig`s with which the layers are stored.\n/// Also checks for already existing layers and marks them as such.\npub fn prepare_layers<Tree: 'static + MerkleTreeTrait>(\n    graph: &StackedBucketGraph<Tree::Hasher>,\n    config: &StoreConfig,\n    layers: usize,\n) -> Vec<LayerState> {\n    let label_configs = (1..=layers).map(|layer| {\n        StoreConfig::from_config(&config, CacheKey::label_layer(layer), Some(graph.size()))\n    });\n\n    let mut states = Vec::with_capacity(layers);\n    for (layer, label_config) in (1..=layers).zip(label_configs) {\n        // Clear possible left over tmp files\n        remove_tmp_layer(&label_config);\n\n        // Check if this layer is already on disk\n        let generated = is_layer_written::<Tree>(graph, &label_config).unwrap_or_default();\n        if generated {\n            // succesful load\n            info!(\"found valid labels for layer {}\", layer);\n        }\n\n        states.push(LayerState {\n            config: label_config,\n            generated,\n        });\n    }\n\n    states\n}\n\n/// Stores a layer atomically on disk, by writing first to `.tmp` and then renaming.\npub fn write_layer(data: &[u8], config: &StoreConfig) -> Result<()> {\n    let data_path = StoreConfig::data_path(&config.path, &config.id);\n    let tmp_data_path = data_path.with_extension(\".tmp\");\n\n    if let Some(parent) = data_path.parent() {\n        create_dir_all(parent).context(\"failed to create parent directories\")?;\n    }\n    fs::write(&tmp_data_path, data).context(\"failed to write layer data\")?;\n    rename(tmp_data_path, data_path).context(\"failed to rename tmp data\")?;\n\n    Ok(())\n}\n\n/// Reads a layer from disk, into the provided slice.\npub fn read_layer(config: &StoreConfig, mut data: &mut [u8]) -> Result<()> {\n    let data_path = StoreConfig::data_path(&config.path, &config.id);\n    let file = File::open(data_path).context(\"failed to open layer\")?;\n    let mut buffered = BufReader::new(file);\n    io::copy(&mut buffered, &mut data).context(\"failed to read layer\")?;\n\n    Ok(())\n}\n\npub fn remove_tmp_layer(config: &StoreConfig) {\n    let data_path = StoreConfig::data_path(&config.path, &config.id);\n    let tmp_data_path = data_path.with_extension(\".tmp\");\n    if tmp_data_path.exists() {\n        if let Err(err) = remove_file(tmp_data_path) {\n            warn!(\"failed to delete tmp file: {}\", err);\n        }\n    }\n}\n\n/// Checks if the given layer is already written and of the right size.\npub fn is_layer_written<Tree: 'static + MerkleTreeTrait>(\n    graph: &StackedBucketGraph<Tree::Hasher>,\n    config: &StoreConfig,\n) -> Result<bool> {\n    let data_path = StoreConfig::data_path(&config.path, &config.id);\n    if !data_path.exists() {\n        return Ok(false);\n    }\n\n    let file = File::open(&data_path)?;\n    let metadata = file.metadata()?;\n    let file_size = metadata.len() as usize;\n\n    if file_size != graph.size() * <Tree::Hasher as Hasher>::Domain::byte_len() {\n        return Ok(false);\n    }\n\n    Ok(true)\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/create_label/multi.rs",
    "content": "use std::convert::TryInto;\nuse std::marker::PhantomData;\nuse std::mem::{self, size_of};\nuse std::sync::{\n    atomic::{AtomicU64, Ordering::SeqCst},\n    Arc, MutexGuard,\n};\nuse std::thread;\nuse std::time::Duration;\n\nuse anyhow::{Context, Result};\nuse byte_slice_cast::{AsByteSlice, AsMutSliceOf};\nuse filecoin_hashers::Hasher;\nuse generic_array::{\n    typenum::{Unsigned, U64},\n    GenericArray,\n};\nuse log::{debug, info};\nuse mapr::MmapMut;\nuse merkletree::store::{DiskStore, Store, StoreConfig};\nuse storage_proofs_core::{\n    cache_key::CacheKey,\n    drgraph::{Graph, BASE_DEGREE},\n    merkle::MerkleTreeTrait,\n    settings::SETTINGS,\n    util::NODE_SIZE,\n};\n\nuse crate::stacked::vanilla::{\n    cache::ParentCache,\n    cores::{bind_core, checkout_core_group, CoreIndex},\n    create_label::{prepare_layers, read_layer, write_layer},\n    graph::{StackedBucketGraph, DEGREE, EXP_DEGREE},\n    memory_handling::{setup_create_label_memory, CacheReader},\n    params::{Labels, LabelsCache},\n    proof::LayerState,\n    utils::{memset, prepare_block, BitMask, RingBuf, UnsafeSlice},\n};\n\nconst MIN_BASE_PARENT_NODE: u64 = 2000;\n\nconst NODE_WORDS: usize = NODE_SIZE / size_of::<u32>();\nconst SHA_BLOCK_SIZE: usize = 64;\n\nconst SHA256_INITIAL_DIGEST: [u32; 8] = [\n    0x6a09_e667,\n    0xbb67_ae85,\n    0x3c6e_f372,\n    0xa54f_f53a,\n    0x510e_527f,\n    0x9b05_688c,\n    0x1f83_d9ab,\n    0x5be0_cd19,\n];\n\n#[inline]\nfn fill_buffer(\n    cur_node: u64,\n    parents_cache: &CacheReader<u32>,\n    mut cur_parent: &[u32], // parents for this node\n    layer_labels: &UnsafeSlice<'_, u32>,\n    exp_labels: Option<&UnsafeSlice<'_, u32>>, // None for layer0\n    buf: &mut [u8],\n    base_parent_missing: &mut BitMask,\n) {\n    let cur_node_swap = cur_node.to_be_bytes(); // Note switch to big endian\n    buf[36..44].copy_from_slice(&cur_node_swap); // update buf with current node\n\n    // Perform the first hash\n    let cur_node_ptr =\n        unsafe { &mut layer_labels.as_mut_slice()[cur_node as usize * NODE_WORDS as usize..] };\n\n    cur_node_ptr[..8].copy_from_slice(&SHA256_INITIAL_DIGEST);\n    compress256!(cur_node_ptr, buf, 1);\n\n    // Fill in the base parents\n    // Node 5 (prev node) will always be missing, and there tend to be\n    // frequent close references.\n    if cur_node > MIN_BASE_PARENT_NODE {\n        // Mark base parent 5 as missing\n        // base_parent_missing.set_all(0x20);\n        base_parent_missing.set(5);\n\n        // Skip the last base parent - it always points to the preceding node,\n        // which we know is not ready and will be filled in the main loop\n        for k in 0..BASE_DEGREE - 1 {\n            unsafe {\n                if cur_parent[0] as u64 >= parents_cache.get_consumer() {\n                    // Node is not ready\n                    base_parent_missing.set(k);\n                } else {\n                    let parent_data = {\n                        let offset = cur_parent[0] as usize * NODE_WORDS;\n                        &layer_labels.as_slice()[offset..offset + NODE_WORDS]\n                    };\n                    let a = SHA_BLOCK_SIZE + (NODE_SIZE * k);\n                    buf[a..a + NODE_SIZE].copy_from_slice(parent_data.as_byte_slice());\n                };\n\n                // Advance pointer for the last base parent\n                cur_parent = &cur_parent[1..];\n            }\n        }\n        // Advance pointer for the last base parent\n        cur_parent = &cur_parent[1..];\n    } else {\n        base_parent_missing.set_upto(BASE_DEGREE as u8);\n        cur_parent = &cur_parent[BASE_DEGREE..];\n    }\n\n    if let Some(exp_labels) = exp_labels {\n        // Read from each of the expander parent nodes\n        for k in BASE_DEGREE..DEGREE {\n            let parent_data = unsafe {\n                let offset = cur_parent[0] as usize * NODE_WORDS;\n                &exp_labels.as_slice()[offset..offset + NODE_WORDS]\n            };\n            let a = SHA_BLOCK_SIZE + (NODE_SIZE * k);\n            buf[a..a + NODE_SIZE].copy_from_slice(parent_data.as_byte_slice());\n            cur_parent = &cur_parent[1..];\n        }\n    }\n}\n\n// This implements a producer, i.e. a thread that pre-fills the buffer\n// with parent node data.\n// - cur_consumer - The node currently being processed (consumed) by the\n//                  hashing thread\n// - cur_producer - The next node to be filled in by producer threads. The\n//                  hashing thread can not yet work on this node.\n// - cur_awaiting - The first not not currently being filled by any producer\n//                  thread.\n// - stride       - Each producer fills in this many nodes at a time. Setting\n//                  this too small with cause a lot of time to be spent in\n//                  thread synchronization\n// - lookahead    - ring_buf size, in nodes\n// - base_parent_missing - Bit mask of any base parent nodes that could not\n//                         be filled in. This is an array of size lookahead.\n// - is_layer0    - Indicates first (no expander parents) or subsequent layer\n#[allow(clippy::too_many_arguments)]\nfn create_label_runner(\n    parents_cache: &CacheReader<u32>,\n    layer_labels: &UnsafeSlice<'_, u32>,\n    exp_labels: Option<&UnsafeSlice<'_, u32>>, // None for layer 0\n    num_nodes: u64,\n    cur_producer: &AtomicU64,\n    cur_awaiting: &AtomicU64,\n    stride: u64,\n    lookahead: u64,\n    ring_buf: &RingBuf,\n    base_parent_missing: &UnsafeSlice<'_, BitMask>,\n) -> Result<()> {\n    info!(\"created label runner\");\n    // Label data bytes per node\n    loop {\n        // Get next work items\n        let work = cur_awaiting.fetch_add(stride, SeqCst);\n        if work >= num_nodes {\n            break;\n        }\n        let count = if work + stride > num_nodes {\n            num_nodes - work\n        } else {\n            stride\n        };\n\n        // Do the work of filling the buffers\n        for cur_node in work..work + count {\n            // Determine which node slot in the ring_buffer to use\n            // Note that node 0 does not use a buffer slot\n            let cur_slot = (cur_node - 1) % lookahead;\n\n            // Don't overrun the buffer\n            while cur_node > (parents_cache.get_consumer() + lookahead - 1) {\n                thread::sleep(Duration::from_micros(10));\n            }\n\n            let buf = unsafe { ring_buf.slot_mut(cur_slot as usize) };\n            let bpm = unsafe { base_parent_missing.get_mut(cur_slot as usize) };\n\n            let pc = unsafe { parents_cache.slice_at(cur_node as usize * DEGREE as usize) };\n            fill_buffer(\n                cur_node,\n                parents_cache,\n                pc,\n                &layer_labels,\n                exp_labels,\n                buf,\n                bpm,\n            );\n        }\n\n        // Wait for the previous node to finish\n        while work > (cur_producer.load(SeqCst) + 1) {\n            thread::sleep(Duration::from_micros(10));\n        }\n\n        // Mark our work as done\n        cur_producer.fetch_add(count, SeqCst);\n    }\n\n    Ok(())\n}\n\nfn create_layer_labels(\n    parents_cache: &CacheReader<u32>,\n    replica_id: &[u8],\n    layer_labels: &mut MmapMut,\n    exp_labels: Option<&mut MmapMut>,\n    num_nodes: u64,\n    cur_layer: u32,\n    core_group: Arc<Option<MutexGuard<'_, Vec<CoreIndex>>>>,\n) -> Result<()> {\n    info!(\"Creating labels for layer {}\", cur_layer);\n    // num_producers is the number of producer threads\n    let (lookahead, num_producers, producer_stride) = {\n        let settings = &SETTINGS;\n        let lookahead = settings.multicore_sdr_lookahead;\n        let num_producers = settings.multicore_sdr_producers;\n        // NOTE: Stride must not exceed the number of nodes in parents_cache's window. If it does, the process will deadlock\n        // with producers and consumers waiting for each other.\n        let producer_stride = settings\n            .multicore_sdr_producer_stride\n            .min(parents_cache.window_nodes() as u64);\n\n        (lookahead, num_producers, producer_stride)\n    };\n\n    const BYTES_PER_NODE: usize = (NODE_SIZE * DEGREE) + SHA_BLOCK_SIZE;\n\n    let mut ring_buf = RingBuf::new(BYTES_PER_NODE, lookahead);\n    let mut base_parent_missing = vec![BitMask::default(); lookahead];\n\n    // Fill in the fixed portion of all buffers\n    for buf in ring_buf.iter_slot_mut() {\n        prepare_block(replica_id, cur_layer, buf);\n    }\n\n    // Highest node that is ready from the producer\n    let cur_producer = AtomicU64::new(0);\n    // Next node to be filled\n    let cur_awaiting = AtomicU64::new(1);\n\n    // These UnsafeSlices are managed through the 3 Atomics above, to minimize any locking overhead.\n    let layer_labels = UnsafeSlice::from_slice(\n        layer_labels\n            .as_mut_slice_of::<u32>()\n            .expect(\"failed as mut slice of\"),\n    );\n    let exp_labels = exp_labels.map(|m| {\n        UnsafeSlice::from_slice(m.as_mut_slice_of::<u32>().expect(\"failed as mut slice of\"))\n    });\n    let base_parent_missing = UnsafeSlice::from_slice(&mut base_parent_missing);\n\n    crossbeam::thread::scope(|s| {\n        let mut runners = Vec::with_capacity(num_producers);\n\n        for i in 0..num_producers {\n            let layer_labels = &layer_labels;\n            let exp_labels = exp_labels.as_ref();\n            let cur_producer = &cur_producer;\n            let cur_awaiting = &cur_awaiting;\n            let ring_buf = &ring_buf;\n            let base_parent_missing = &base_parent_missing;\n\n            let core_index = if let Some(cg) = &*core_group {\n                cg.get(i + 1)\n            } else {\n                None\n            };\n            runners.push(s.spawn(move |_| {\n                // This could fail, but we will ignore the error if so.\n                // It will be logged as a warning by `bind_core`.\n                debug!(\"binding core in producer thread {}\", i);\n                // When `_cleanup_handle` is dropped, the previous binding of thread will be restored.\n                let _cleanup_handle = core_index.map(|c| bind_core(*c));\n\n                create_label_runner(\n                    parents_cache,\n                    layer_labels,\n                    exp_labels,\n                    num_nodes,\n                    cur_producer,\n                    cur_awaiting,\n                    producer_stride,\n                    lookahead as u64,\n                    ring_buf,\n                    base_parent_missing,\n                )\n            }));\n        }\n\n        let mut cur_node_ptr = unsafe { layer_labels.as_mut_slice() };\n        let mut cur_parent_ptr = unsafe { parents_cache.consumer_slice_at(DEGREE) };\n        let mut cur_parent_ptr_offset = DEGREE;\n\n        // Calculate node 0 (special case with no parents)\n        // Which is replica_id || cur_layer || 0\n        // TODO - Hash and save intermediate result: replica_id || cur_layer\n        let mut buf = [0u8; (NODE_SIZE * DEGREE) + 64];\n        prepare_block(replica_id, cur_layer, &mut buf);\n\n        cur_node_ptr[..8].copy_from_slice(&SHA256_INITIAL_DIGEST);\n        compress256!(cur_node_ptr, buf, 2);\n\n        // Fix endianess\n        cur_node_ptr[..8].iter_mut().for_each(|x| *x = x.to_be());\n\n        cur_node_ptr[7] &= 0x3FFF_FFFF; // Strip last two bits to ensure in Fr\n\n        // Keep track of which node slot in the ring_buffer to use\n        let mut cur_slot = 0;\n        let mut _count_not_ready = 0;\n\n        // Calculate nodes 1 to n\n\n        // Skip first node.\n        parents_cache.store_consumer(1);\n        let mut i = 1;\n        while i < num_nodes {\n            // Ensure next buffer is ready\n            let mut printed = false;\n            let mut producer_val = cur_producer.load(SeqCst);\n\n            while producer_val < i {\n                if !printed {\n                    debug!(\"PRODUCER NOT READY! {}\", i);\n                    printed = true;\n                    _count_not_ready += 1;\n                }\n                thread::sleep(Duration::from_micros(10));\n                producer_val = cur_producer.load(SeqCst);\n            }\n\n            // Process as many nodes as are ready\n            let ready_count = producer_val - i + 1;\n            for _count in 0..ready_count {\n                // If we have used up the last cache window's parent data, get some more.\n                if cur_parent_ptr.is_empty() {\n                    // Safety: values read from `cur_parent_ptr` before calling `increment_consumer`\n                    // must not be read again after.\n                    unsafe {\n                        cur_parent_ptr = parents_cache.consumer_slice_at(cur_parent_ptr_offset);\n                    }\n                }\n\n                cur_node_ptr = &mut cur_node_ptr[8..];\n                // Grab the current slot of the ring_buf\n                let buf = unsafe { ring_buf.slot_mut(cur_slot) };\n                // Fill in the base parents\n                for k in 0..BASE_DEGREE {\n                    let bpm = unsafe { base_parent_missing.get(cur_slot) };\n                    if bpm.get(k) {\n                        let source = unsafe {\n                            let start = cur_parent_ptr[0] as usize * NODE_WORDS;\n                            let end = start + NODE_WORDS;\n                            &layer_labels.as_slice()[start..end]\n                        };\n\n                        buf[64 + (NODE_SIZE * k)..64 + (NODE_SIZE * (k + 1))]\n                            .copy_from_slice(source.as_byte_slice());\n                    }\n                    cur_parent_ptr = &cur_parent_ptr[1..];\n                    cur_parent_ptr_offset += 1;\n                }\n\n                // Expanders are already all filled in (layer 1 doesn't use expanders)\n                cur_parent_ptr = &cur_parent_ptr[EXP_DEGREE..];\n                cur_parent_ptr_offset += EXP_DEGREE;\n\n                if cur_layer == 1 {\n                    // Six rounds of all base parents\n                    for _j in 0..6 {\n                        compress256!(cur_node_ptr, &buf[64..], 3);\n                    }\n\n                    // round 7 is only first parent\n                    memset(&mut buf[96..128], 0); // Zero out upper half of last block\n                    buf[96] = 0x80; // Padding\n                    buf[126] = 0x27; // Length (0x2700 = 9984 bits -> 1248 bytes)\n                    compress256!(cur_node_ptr, &buf[64..], 1);\n                } else {\n                    // Two rounds of all parents\n                    let blocks = [\n                        *GenericArray::<u8, U64>::from_slice(&buf[64..128]),\n                        *GenericArray::<u8, U64>::from_slice(&buf[128..192]),\n                        *GenericArray::<u8, U64>::from_slice(&buf[192..256]),\n                        *GenericArray::<u8, U64>::from_slice(&buf[256..320]),\n                        *GenericArray::<u8, U64>::from_slice(&buf[320..384]),\n                        *GenericArray::<u8, U64>::from_slice(&buf[384..448]),\n                        *GenericArray::<u8, U64>::from_slice(&buf[448..512]),\n                    ];\n                    sha2::compress256((&mut cur_node_ptr[..8]).try_into().unwrap(), &blocks);\n                    sha2::compress256((&mut cur_node_ptr[..8]).try_into().unwrap(), &blocks);\n\n                    // Final round is only nine parents\n                    memset(&mut buf[352..384], 0); // Zero out upper half of last block\n                    buf[352] = 0x80; // Padding\n                    buf[382] = 0x27; // Length (0x2700 = 9984 bits -> 1248 bytes)\n                    compress256!(cur_node_ptr, &buf[64..], 5);\n                }\n\n                // Fix endianess\n                cur_node_ptr[..8].iter_mut().for_each(|x| *x = x.to_be());\n\n                cur_node_ptr[7] &= 0x3FFF_FFFF; // Strip last two bits to fit in Fr\n\n                // Safety:\n                // It's possible that this increment will trigger moving the cache window.\n                // In that case, we must not access `parents_cache` again but instead replace it.\n                // This will happen above because `parents_cache` will now be empty, if we have\n                // correctly advanced it so far.\n                unsafe {\n                    parents_cache.increment_consumer();\n                }\n                i += 1;\n                cur_slot = (cur_slot + 1) % lookahead;\n            }\n        }\n\n        for runner in runners {\n            runner.join().unwrap().unwrap();\n        }\n    })\n    .unwrap();\n\n    Ok(())\n}\n\n#[allow(clippy::type_complexity)]\npub fn create_labels_for_encoding<Tree: 'static + MerkleTreeTrait, T: AsRef<[u8]>>(\n    graph: &StackedBucketGraph<Tree::Hasher>,\n    parents_cache: &ParentCache,\n    layers: usize,\n    replica_id: T,\n    config: StoreConfig,\n) -> Result<(Labels<Tree>, Vec<LayerState>)> {\n    info!(\"create labels\");\n\n    let layer_states = prepare_layers::<Tree>(graph, &config, layers);\n\n    let sector_size = graph.size() * NODE_SIZE;\n    let node_count = graph.size() as u64;\n    let cache_window_nodes = SETTINGS.sdr_parents_cache_size as usize;\n\n    let default_cache_size = DEGREE * 4 * cache_window_nodes;\n\n    let core_group = Arc::new(checkout_core_group());\n\n    // When `_cleanup_handle` is dropped, the previous binding of thread will be restored.\n    let _cleanup_handle = (*core_group).as_ref().map(|group| {\n        // This could fail, but we will ignore the error if so.\n        // It will be logged as a warning by `bind_core`.\n        debug!(\"binding core in main thread\");\n        group.get(0).map(|core_index| bind_core(*core_index))\n    });\n\n    // NOTE: this means we currently keep 2x sector size around, to improve speed\n    let (parents_cache, mut layer_labels, mut exp_labels) = setup_create_label_memory(\n        sector_size,\n        DEGREE,\n        Some(default_cache_size as usize),\n        &parents_cache.path,\n    )?;\n\n    for (layer, layer_state) in (1..=layers).zip(layer_states.iter()) {\n        info!(\"Layer {}\", layer);\n\n        if layer_state.generated {\n            info!(\"skipping layer {}, already generated\", layer);\n\n            // load the already generated layer into exp_labels\n            read_layer(&layer_state.config, &mut exp_labels)?;\n            continue;\n        }\n\n        // Cache reset happens in two parts.\n        // The second part (the finish) happens before each layer but the first.\n        if layers != 1 {\n            parents_cache.finish_reset()?;\n        }\n\n        create_layer_labels(\n            &parents_cache,\n            &replica_id.as_ref(),\n            &mut layer_labels,\n            if layer == 1 {\n                None\n            } else {\n                Some(&mut exp_labels)\n            },\n            node_count,\n            layer as u32,\n            core_group.clone(),\n        )?;\n\n        // Cache reset happens in two parts.\n        // The first part (the start) happens after each layer but the last.\n        if layer != layers {\n            parents_cache.start_reset()?;\n        }\n\n        mem::swap(&mut layer_labels, &mut exp_labels);\n        {\n            let layer_config = &layer_state.config;\n\n            info!(\"  storing labels on disk\");\n            write_layer(&exp_labels, layer_config).context(\"failed to store labels\")?;\n\n            info!(\n                \"  generated layer {} store with id {}\",\n                layer, layer_config.id\n            );\n        }\n    }\n\n    Ok((\n        Labels::<Tree> {\n            labels: layer_states.iter().map(|s| s.config.clone()).collect(),\n            _h: PhantomData,\n        },\n        layer_states,\n    ))\n}\n\n#[allow(clippy::type_complexity)]\npub fn create_labels_for_decoding<Tree: 'static + MerkleTreeTrait, T: AsRef<[u8]>>(\n    graph: &StackedBucketGraph<Tree::Hasher>,\n    parents_cache: &ParentCache,\n    layers: usize,\n    replica_id: T,\n    config: StoreConfig,\n) -> Result<LabelsCache<Tree>> {\n    info!(\"create labels\");\n\n    // For now, we require it due to changes in encodings structure.\n    let mut labels: Vec<DiskStore<<Tree::Hasher as Hasher>::Domain>> = Vec::with_capacity(layers);\n    let mut label_configs: Vec<StoreConfig> = Vec::with_capacity(layers);\n\n    let sector_size = graph.size() * NODE_SIZE;\n    let node_count = graph.size() as u64;\n    let cache_window_nodes = (&SETTINGS.sdr_parents_cache_size / 2) as usize;\n\n    let default_cache_size = DEGREE * 4 * cache_window_nodes;\n\n    let core_group = Arc::new(checkout_core_group());\n\n    // When `_cleanup_handle` is dropped, the previous binding of thread will be restored.\n    let _cleanup_handle = (*core_group).as_ref().map(|group| {\n        // This could fail, but we will ignore the error if so.\n        // It will be logged as a warning by `bind_core`.\n        debug!(\"binding core in main thread\");\n        group.get(0).map(|core_index| bind_core(*core_index))\n    });\n\n    // NOTE: this means we currently keep 2x sector size around, to improve speed\n    let (parents_cache, mut layer_labels, mut exp_labels) = setup_create_label_memory(\n        sector_size,\n        DEGREE,\n        Some(default_cache_size as usize),\n        &parents_cache.path,\n    )?;\n\n    for layer in 1..=layers {\n        info!(\"Layer {}\", layer);\n\n        // Cache reset happens in two parts.\n        // The second part (the finish) happens before each layer but the first.\n        if layers != 1 {\n            parents_cache.finish_reset()?;\n        }\n\n        create_layer_labels(\n            &parents_cache,\n            &replica_id.as_ref(),\n            &mut layer_labels,\n            if layer == 1 {\n                None\n            } else {\n                Some(&mut exp_labels)\n            },\n            node_count,\n            layer as u32,\n            core_group.clone(),\n        )?;\n\n        // Cache reset happens in two parts.\n        // The first part (the start) happens after each layer but the last.\n        if layer != layers {\n            parents_cache.start_reset()?;\n        }\n\n        {\n            let layer_config =\n                StoreConfig::from_config(&config, CacheKey::label_layer(layer), Some(graph.size()));\n\n            info!(\"  storing labels on disk\");\n            // Construct and persist the layer data.\n            let layer_store: DiskStore<<Tree::Hasher as Hasher>::Domain> =\n                DiskStore::new_from_slice_with_config(\n                    graph.size(),\n                    Tree::Arity::to_usize(),\n                    &layer_labels,\n                    layer_config.clone(),\n                )?;\n            info!(\n                \"  generated layer {} store with id {}\",\n                layer, layer_config.id\n            );\n\n            mem::swap(&mut layer_labels, &mut exp_labels);\n\n            // Track the layer specific store and StoreConfig for later retrieval.\n            labels.push(layer_store);\n            label_configs.push(layer_config);\n        }\n    }\n    assert_eq!(\n        labels.len(),\n        layers,\n        \"Invalid amount of layers encoded expected\"\n    );\n\n    Ok(LabelsCache::<Tree> { labels })\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use bellperson::bls::{Fr, FrRepr};\n    use ff::PrimeField;\n    use filecoin_hashers::poseidon::PoseidonHasher;\n    use generic_array::typenum::{U0, U2, U8};\n    use storage_proofs_core::{api_version::ApiVersion, merkle::LCTree};\n    use tempfile::tempdir;\n\n    #[test]\n    fn test_create_labels() {\n        let layers = 11;\n        let nodes_2k = 1 << 11;\n        let nodes_4k = 1 << 12;\n        let replica_id = [9u8; 32];\n\n        // These PoRepIDs are only useful to distinguish legacy/new sectors.\n        // They do not correspond to registered proofs of the sizes used here.\n        let legacy_porep_id = [0; 32];\n        let new_porep_id = [123; 32];\n        test_create_labels_aux(\n            nodes_2k,\n            layers,\n            replica_id,\n            legacy_porep_id,\n            ApiVersion::V1_0_0,\n            Fr::from_repr(FrRepr([\n                0xd3faa96b9a0fba04,\n                0xea81a283d106485e,\n                0xe3d51b9afa5ac2b3,\n                0x0462f4f4f1a68d37,\n            ]))\n            .unwrap(),\n        );\n        test_create_labels_aux(\n            nodes_4k,\n            layers,\n            replica_id,\n            legacy_porep_id,\n            ApiVersion::V1_0_0,\n            Fr::from_repr(FrRepr([\n                0x7e191e52c4a8da86,\n                0x5ae8a1c9e6fac148,\n                0xce239f3b88a894b8,\n                0x234c00d1dc1d53be,\n            ]))\n            .unwrap(),\n        );\n\n        test_create_labels_aux(\n            nodes_2k,\n            layers,\n            replica_id,\n            new_porep_id,\n            ApiVersion::V1_1_0,\n            Fr::from_repr(FrRepr([\n                0xabb3f38bb70defcf,\n                0x777a2e4d7769119f,\n                0x3448959d495490bc,\n                0x06021188c7a71cb5,\n            ]))\n            .unwrap(),\n        );\n\n        test_create_labels_aux(\n            nodes_4k,\n            layers,\n            replica_id,\n            new_porep_id,\n            ApiVersion::V1_1_0,\n            Fr::from_repr(FrRepr([\n                0x22ab81cf68c4676d,\n                0x7a77a82fc7c9c189,\n                0xc6c03d32c1e42d23,\n                0x0f777c18cc2c55bd,\n            ]))\n            .unwrap(),\n        );\n    }\n\n    fn test_create_labels_aux(\n        sector_size: usize,\n        layers: usize,\n        replica_id: [u8; 32],\n        porep_id: [u8; 32],\n        api_version: ApiVersion,\n        expected_last_label: Fr,\n    ) {\n        let nodes = sector_size / NODE_SIZE;\n\n        let cache_dir = tempdir().expect(\"tempdir failure\");\n        let config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            nodes.trailing_zeros() as usize,\n        );\n\n        let graph = StackedBucketGraph::<PoseidonHasher>::new(\n            None,\n            nodes,\n            BASE_DEGREE,\n            EXP_DEGREE,\n            porep_id,\n            api_version,\n        )\n        .unwrap();\n        let cache = graph.parent_cache().unwrap();\n\n        let labels = create_labels_for_decoding::<LCTree<PoseidonHasher, U8, U0, U2>, _>(\n            &graph, &cache, layers, replica_id, config,\n        )\n        .unwrap();\n\n        let final_labels = labels.labels_for_last_layer().unwrap();\n        let last_label = final_labels.read_at(final_labels.len() - 1).unwrap();\n        dbg!(&last_label);\n        assert_eq!(expected_last_label.into_repr(), last_label.0);\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/create_label/single.rs",
    "content": "use std::marker::PhantomData;\nuse std::mem;\n\nuse anyhow::{Context, Result};\nuse filecoin_hashers::Hasher;\nuse generic_array::typenum::Unsigned;\nuse log::info;\nuse merkletree::store::{DiskStore, Store, StoreConfig};\nuse sha2raw::Sha256;\nuse storage_proofs_core::{\n    drgraph::Graph,\n    merkle::MerkleTreeTrait,\n    util::{data_at_node_offset, NODE_SIZE},\n};\n\nuse crate::stacked::vanilla::{\n    cache::ParentCache,\n    create_label::{prepare_layers, read_layer, write_layer},\n    proof::LayerState,\n    Labels, LabelsCache, StackedBucketGraph,\n};\n\n#[allow(clippy::type_complexity)]\npub fn create_labels_for_encoding<Tree: 'static + MerkleTreeTrait, T: AsRef<[u8]>>(\n    graph: &StackedBucketGraph<Tree::Hasher>,\n    parents_cache: &mut ParentCache,\n    layers: usize,\n    replica_id: T,\n    config: StoreConfig,\n) -> Result<(Labels<Tree>, Vec<LayerState>)> {\n    info!(\"generate labels\");\n\n    let layer_states = prepare_layers::<Tree>(graph, &config, layers);\n\n    let layer_size = graph.size() * NODE_SIZE;\n    // NOTE: this means we currently keep 2x sector size around, to improve speed.\n    let mut layer_labels = vec![0u8; layer_size]; // Buffer for labels of the current layer\n    let mut exp_labels = vec![0u8; layer_size]; // Buffer for labels of the previous layer, needed for expander parents\n\n    for (layer, layer_state) in (1..=layers).zip(layer_states.iter()) {\n        info!(\"generating layer: {}\", layer);\n        if layer_state.generated {\n            info!(\"skipping layer {}, already generated\", layer);\n\n            // load the already generated layer into exp_labels\n            read_layer(&layer_state.config, &mut exp_labels)?;\n            continue;\n        }\n\n        parents_cache.reset()?;\n\n        if layer == 1 {\n            for node in 0..graph.size() {\n                create_label(\n                    graph,\n                    Some(parents_cache),\n                    &replica_id,\n                    &mut layer_labels,\n                    layer,\n                    node,\n                )?;\n            }\n        } else {\n            for node in 0..graph.size() {\n                create_label_exp(\n                    graph,\n                    Some(parents_cache),\n                    &replica_id,\n                    &exp_labels,\n                    &mut layer_labels,\n                    layer,\n                    node,\n                )?;\n            }\n        }\n\n        // Write the result to disk to avoid keeping it in memory all the time.\n        let layer_config = &layer_state.config;\n\n        info!(\"  storing labels on disk\");\n        write_layer(&layer_labels, layer_config).context(\"failed to store labels\")?;\n\n        info!(\n            \"  generated layer {} store with id {}\",\n            layer, layer_config.id\n        );\n\n        info!(\"  setting exp parents\");\n        mem::swap(&mut layer_labels, &mut exp_labels);\n    }\n\n    Ok((\n        Labels::<Tree> {\n            labels: layer_states.iter().map(|s| s.config.clone()).collect(),\n            _h: PhantomData,\n        },\n        layer_states,\n    ))\n}\n\n#[allow(clippy::type_complexity)]\npub fn create_labels_for_decoding<Tree: 'static + MerkleTreeTrait, T: AsRef<[u8]>>(\n    graph: &StackedBucketGraph<Tree::Hasher>,\n    parents_cache: &mut ParentCache,\n    layers: usize,\n    replica_id: T,\n    config: StoreConfig,\n) -> Result<LabelsCache<Tree>> {\n    info!(\"generate labels\");\n\n    // For now, we require it due to changes in encodings structure.\n    let mut labels: Vec<DiskStore<<Tree::Hasher as Hasher>::Domain>> = Vec::with_capacity(layers);\n\n    let layer_size = graph.size() * NODE_SIZE;\n    // NOTE: this means we currently keep 2x sector size around, to improve speed.\n    let mut layer_labels = vec![0u8; layer_size]; // Buffer for labels of the current layer\n    let mut exp_labels = vec![0u8; layer_size]; // Buffer for labels of the previous layer, needed for expander parents\n\n    for layer in 1..=layers {\n        info!(\"generating layer: {}\", layer);\n\n        parents_cache.reset()?;\n\n        if layer == 1 {\n            for node in 0..graph.size() {\n                create_label(\n                    graph,\n                    Some(parents_cache),\n                    &replica_id,\n                    &mut layer_labels,\n                    layer,\n                    node,\n                )?;\n            }\n        } else {\n            for node in 0..graph.size() {\n                create_label_exp(\n                    graph,\n                    Some(parents_cache),\n                    &replica_id,\n                    &exp_labels,\n                    &mut layer_labels,\n                    layer,\n                    node,\n                )?;\n            }\n        }\n\n        // Write the result to disk to avoid keeping it in memory all the time.\n        info!(\"  storing labels on disk\");\n        write_layer(&layer_labels, &config)?;\n\n        let layer_store: DiskStore<<Tree::Hasher as Hasher>::Domain> =\n            DiskStore::new_from_disk(graph.size(), Tree::Arity::to_usize(), &config)?;\n        info!(\"  generated layer {} store with id {}\", layer, config.id);\n\n        info!(\"  setting exp parents\");\n        mem::swap(&mut layer_labels, &mut exp_labels);\n\n        // Track the layer specific store and StoreConfig for later retrieval.\n        labels.push(layer_store);\n    }\n\n    assert_eq!(\n        labels.len(),\n        layers,\n        \"Invalid amount of layers encoded expected\"\n    );\n\n    Ok(LabelsCache::<Tree> { labels })\n}\n\npub fn create_label<H: Hasher, T: AsRef<[u8]>>(\n    graph: &StackedBucketGraph<H>,\n    cache: Option<&mut ParentCache>,\n    replica_id: T,\n    layer_labels: &mut [u8],\n    layer_index: usize,\n    node: usize,\n) -> Result<()> {\n    let mut hasher = Sha256::new();\n    let mut buffer = [0u8; 32];\n\n    buffer[..4].copy_from_slice(&(layer_index as u32).to_be_bytes());\n    buffer[4..12].copy_from_slice(&(node as u64).to_be_bytes());\n    hasher.input(&[replica_id.as_ref(), &buffer[..]][..]);\n\n    // hash parents for all non 0 nodes\n    let hash = if node > 0 {\n        // prefetch previous node, which is always a parent\n        let prev = &layer_labels[(node - 1) * NODE_SIZE..node * NODE_SIZE];\n        prefetch!(prev.as_ptr() as *const i8);\n\n        graph.copy_parents_data(node as u32, &*layer_labels, hasher, cache)?\n    } else {\n        hasher.finish()\n    };\n\n    // store the newly generated key\n    let start = data_at_node_offset(node);\n    let end = start + NODE_SIZE;\n    layer_labels[start..end].copy_from_slice(&hash[..]);\n\n    // strip last two bits, to ensure result is in Fr.\n    layer_labels[end - 1] &= 0b0011_1111;\n\n    Ok(())\n}\n\npub fn create_label_exp<H: Hasher, T: AsRef<[u8]>>(\n    graph: &StackedBucketGraph<H>,\n    cache: Option<&mut ParentCache>,\n    replica_id: T,\n    exp_parents_data: &[u8],\n    layer_labels: &mut [u8],\n    layer_index: usize,\n    node: usize,\n) -> Result<()> {\n    let mut hasher = Sha256::new();\n    let mut buffer = [0u8; 32];\n\n    buffer[0..4].copy_from_slice(&(layer_index as u32).to_be_bytes());\n    buffer[4..12].copy_from_slice(&(node as u64).to_be_bytes());\n    hasher.input(&[replica_id.as_ref(), &buffer[..]][..]);\n\n    // hash parents for all non 0 nodes\n    let hash = if node > 0 {\n        // prefetch previous node, which is always a parent\n        let prev = &layer_labels[(node - 1) * NODE_SIZE..node * NODE_SIZE];\n        prefetch!(prev.as_ptr() as *const i8);\n\n        graph.copy_parents_data_exp(node as u32, &*layer_labels, exp_parents_data, hasher, cache)?\n    } else {\n        hasher.finish()\n    };\n\n    // store the newly generated key\n    let start = data_at_node_offset(node);\n    let end = start + NODE_SIZE;\n    layer_labels[start..end].copy_from_slice(&hash[..]);\n\n    // strip last two bits, to ensure result is in Fr.\n    layer_labels[end - 1] &= 0b0011_1111;\n\n    Ok(())\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/encoding_proof.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellperson::bls::Fr;\nuse filecoin_hashers::Hasher;\nuse fr32::bytes_into_fr_repr_safe;\nuse log::trace;\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\n\nuse crate::encode::encode;\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct EncodingProof<H: Hasher> {\n    pub(crate) parents: Vec<H::Domain>,\n    pub(crate) layer_index: u32,\n    pub(crate) node: u64,\n    #[serde(skip)]\n    _h: PhantomData<H>,\n}\n\nimpl<H: Hasher> EncodingProof<H> {\n    pub fn new(layer_index: u32, node: u64, parents: Vec<H::Domain>) -> Self {\n        EncodingProof {\n            layer_index,\n            node,\n            parents,\n            _h: PhantomData,\n        }\n    }\n\n    fn create_key(&self, replica_id: &H::Domain) -> H::Domain {\n        let mut hasher = Sha256::new();\n        let mut buffer = [0u8; 64];\n\n        // replica_id\n        buffer[..32].copy_from_slice(AsRef::<[u8]>::as_ref(replica_id));\n\n        // layer index\n        buffer[32..36].copy_from_slice(&(self.layer_index as u32).to_be_bytes());\n        // node id\n        buffer[36..44].copy_from_slice(&(self.node as u64).to_be_bytes());\n\n        hasher.update(&buffer[..]);\n\n        // parents\n        for parent in &self.parents {\n            hasher.update(AsRef::<[u8]>::as_ref(parent));\n        }\n\n        bytes_into_fr_repr_safe(hasher.finalize().as_ref()).into()\n    }\n\n    pub fn verify<G: Hasher>(\n        &self,\n        replica_id: &H::Domain,\n        exp_encoded_node: &H::Domain,\n        decoded_node: &G::Domain,\n    ) -> bool {\n        let key = self.create_key(replica_id);\n\n        let fr: Fr = (*decoded_node).into();\n        let encoded_node = encode(key, fr.into());\n\n        check_eq!(exp_encoded_node, &encoded_node);\n\n        true\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/graph.rs",
    "content": "use std::convert::{TryFrom, TryInto};\nuse std::fmt::{self, Debug, Formatter};\nuse std::marker::PhantomData;\n\nuse anyhow::ensure;\nuse filecoin_hashers::Hasher;\nuse log::info;\nuse sha2raw::Sha256;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    crypto::{\n        derive_porep_domain_seed,\n        feistel::{self, FeistelPrecomputed},\n        FEISTEL_DST,\n    },\n    drgraph::{BucketGraph, Graph, BASE_DEGREE},\n    error::Result,\n    parameter_cache::ParameterSetMetadata,\n    settings::SETTINGS,\n    util::NODE_SIZE,\n    PoRepID,\n};\n\nuse crate::stacked::vanilla::cache::ParentCache;\n\n/// The expansion degree used for Stacked Graphs.\npub const EXP_DEGREE: usize = 8;\n\npub(crate) const DEGREE: usize = BASE_DEGREE + EXP_DEGREE;\n\n#[derive(Clone)]\npub struct StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + 'static,\n{\n    expansion_degree: usize,\n    base_graph: G,\n    pub(crate) feistel_keys: [feistel::Index; 4],\n    feistel_precomputed: FeistelPrecomputed,\n    api_version: ApiVersion,\n    id: String,\n    _h: PhantomData<H>,\n}\n\nimpl<H, G> Debug for StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + 'static,\n{\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"StackedGraph\")\n            .field(\"expansion_degree\", &self.expansion_degree)\n            .field(\"base_graph\", &self.base_graph)\n            .field(\"feistel_precomputed\", &self.feistel_precomputed)\n            .field(\"id\", &self.id)\n            .finish()\n    }\n}\n\npub type StackedBucketGraph<H> = StackedGraph<H, BucketGraph<H>>;\n\n#[inline]\nfn prefetch(parents: &[u32], data: &[u8]) {\n    for parent in parents {\n        let start = *parent as usize * NODE_SIZE;\n        let end = start + NODE_SIZE;\n\n        prefetch!(data[start..end].as_ptr() as *const i8);\n    }\n}\n\n#[inline]\nfn read_node<'a>(i: usize, parents: &[u32], data: &'a [u8]) -> &'a [u8] {\n    let start = parents[i] as usize * NODE_SIZE;\n    let end = start + NODE_SIZE;\n    &data[start..end]\n}\n\npub fn derive_feistel_keys(porep_id: PoRepID) -> [u64; 4] {\n    let mut feistel_keys = [0u64; 4];\n    let raw_seed = derive_porep_domain_seed(FEISTEL_DST, porep_id);\n    feistel_keys[0] = u64::from_le_bytes(raw_seed[0..8].try_into().expect(\"from_le_bytes failure\"));\n    feistel_keys[1] =\n        u64::from_le_bytes(raw_seed[8..16].try_into().expect(\"from_le_bytes failure\"));\n    feistel_keys[2] =\n        u64::from_le_bytes(raw_seed[16..24].try_into().expect(\"from_le_bytes failure\"));\n    feistel_keys[3] =\n        u64::from_le_bytes(raw_seed[24..32].try_into().expect(\"from_le_bytes failure\"));\n    feistel_keys\n}\n\nimpl<H, G> StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata + Sync + Send,\n{\n    pub fn new(\n        base_graph: Option<G>,\n        nodes: usize,\n        base_degree: usize,\n        expansion_degree: usize,\n        porep_id: PoRepID,\n        api_version: ApiVersion,\n    ) -> Result<Self> {\n        assert_eq!(base_degree, BASE_DEGREE);\n        assert_eq!(expansion_degree, EXP_DEGREE);\n        ensure!(nodes <= u32::MAX as usize, \"too many nodes\");\n\n        let base_graph = match base_graph {\n            Some(graph) => graph,\n            None => G::new(nodes, base_degree, 0, porep_id, api_version)?,\n        };\n\n        let bg_id = base_graph.identifier();\n\n        let feistel_keys = derive_feistel_keys(porep_id);\n\n        let res = StackedGraph {\n            base_graph,\n            id: format!(\n                \"stacked_graph::StackedGraph{{expansion_degree: {} base_graph: {} }}\",\n                expansion_degree, bg_id,\n            ),\n            expansion_degree,\n            feistel_keys,\n            feistel_precomputed: feistel::precompute((expansion_degree * nodes) as feistel::Index),\n            api_version,\n            _h: PhantomData,\n        };\n\n        Ok(res)\n    }\n\n    /// Returns a reference to the parent cache.\n    pub fn parent_cache(&self) -> Result<ParentCache> {\n        // Number of nodes to be cached in memory\n        let default_cache_size = SETTINGS.sdr_parents_cache_size;\n        let cache_entries = self.size() as u32;\n        let cache_size = cache_entries.min(default_cache_size);\n\n        info!(\"using parent_cache[{} / {}]\", cache_size, cache_entries);\n\n        ParentCache::new(cache_size, cache_entries, self)\n    }\n    pub fn copy_parents_data_exp(\n        &self,\n        node: u32,\n        base_data: &[u8],\n        exp_data: &[u8],\n        hasher: Sha256,\n        mut cache: Option<&mut ParentCache>,\n    ) -> Result<[u8; 32]> {\n        if let Some(ref mut cache) = cache {\n            let cache_parents = cache.read(node as u32)?;\n            Ok(self.copy_parents_data_inner_exp(&cache_parents, base_data, exp_data, hasher))\n        } else {\n            let mut cache_parents = [0u32; DEGREE];\n\n            self.parents(node as usize, &mut cache_parents[..])\n                .expect(\"parents failure\");\n            Ok(self.copy_parents_data_inner_exp(&cache_parents, base_data, exp_data, hasher))\n        }\n    }\n\n    pub fn copy_parents_data(\n        &self,\n        node: u32,\n        base_data: &[u8],\n        hasher: Sha256,\n        mut cache: Option<&mut ParentCache>,\n    ) -> Result<[u8; 32]> {\n        if let Some(ref mut cache) = cache {\n            let cache_parents = cache.read(node as u32)?;\n            Ok(self.copy_parents_data_inner(&cache_parents, base_data, hasher))\n        } else {\n            let mut cache_parents = [0u32; DEGREE];\n\n            self.parents(node as usize, &mut cache_parents[..])\n                .expect(\"parents failure\");\n            Ok(self.copy_parents_data_inner(&cache_parents, base_data, hasher))\n        }\n    }\n\n    fn copy_parents_data_inner_exp(\n        &self,\n        cache_parents: &[u32],\n        base_data: &[u8],\n        exp_data: &[u8],\n        mut hasher: Sha256,\n    ) -> [u8; 32] {\n        prefetch(&cache_parents[..BASE_DEGREE], base_data);\n        prefetch(&cache_parents[BASE_DEGREE..], exp_data);\n\n        // fill buffer\n        let parents = [\n            read_node(0, cache_parents, base_data),\n            read_node(1, cache_parents, base_data),\n            read_node(2, cache_parents, base_data),\n            read_node(3, cache_parents, base_data),\n            read_node(4, cache_parents, base_data),\n            read_node(5, cache_parents, base_data),\n            read_node(6, cache_parents, exp_data),\n            read_node(7, cache_parents, exp_data),\n            read_node(8, cache_parents, exp_data),\n            read_node(9, cache_parents, exp_data),\n            read_node(10, cache_parents, exp_data),\n            read_node(11, cache_parents, exp_data),\n            read_node(12, cache_parents, exp_data),\n            read_node(13, cache_parents, exp_data),\n        ];\n\n        // round 1 (14)\n        hasher.input(&parents);\n\n        // round 2 (14)\n        hasher.input(&parents);\n\n        // round 3 (9)\n        hasher.input(&parents[..8]);\n        hasher.finish_with(&parents[8])\n    }\n\n    fn copy_parents_data_inner(\n        &self,\n        cache_parents: &[u32],\n        base_data: &[u8],\n        mut hasher: Sha256,\n    ) -> [u8; 32] {\n        prefetch(&cache_parents[..BASE_DEGREE], base_data);\n\n        // fill buffer\n        let parents = [\n            read_node(0, cache_parents, base_data),\n            read_node(1, cache_parents, base_data),\n            read_node(2, cache_parents, base_data),\n            read_node(3, cache_parents, base_data),\n            read_node(4, cache_parents, base_data),\n            read_node(5, cache_parents, base_data),\n        ];\n\n        // round 1 (0..6)\n        hasher.input(&parents);\n\n        // round 2 (6..12)\n        hasher.input(&parents);\n\n        // round 3 (12..18)\n        hasher.input(&parents);\n\n        // round 4 (18..24)\n        hasher.input(&parents);\n\n        // round 5 (24..30)\n        hasher.input(&parents);\n\n        // round 6 (30..36)\n        hasher.input(&parents);\n\n        // round 7 (37)\n        hasher.finish_with(parents[0])\n    }\n}\n\nimpl<H, G> ParameterSetMetadata for StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata,\n{\n    fn identifier(&self) -> String {\n        self.id.clone()\n    }\n\n    fn sector_size(&self) -> u64 {\n        self.base_graph.sector_size()\n    }\n}\n\nimpl<H, G> Graph<H> for StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata + Sync + Send,\n{\n    type Key = Vec<u8>;\n\n    fn size(&self) -> usize {\n        self.base_graph().size()\n    }\n\n    fn degree(&self) -> usize {\n        self.base_graph.degree() + self.expansion_degree\n    }\n\n    #[inline]\n    fn parents(&self, node: usize, parents: &mut [u32]) -> Result<()> {\n        self.base_parents(node, &mut parents[..self.base_graph().degree()])?;\n\n        // expanded_parents takes raw_node\n        self.expanded_parents(\n            node,\n            &mut parents\n                [self.base_graph().degree()..self.base_graph().degree() + self.expansion_degree()],\n        )?;\n\n        Ok(())\n    }\n\n    fn seed(&self) -> [u8; 28] {\n        self.base_graph().seed()\n    }\n\n    fn new(\n        nodes: usize,\n        base_degree: usize,\n        expansion_degree: usize,\n        porep_id: PoRepID,\n        api_version: ApiVersion,\n    ) -> Result<Self> {\n        Self::new_stacked(nodes, base_degree, expansion_degree, porep_id, api_version)\n    }\n\n    fn create_key(\n        &self,\n        _id: &H::Domain,\n        _node: usize,\n        _parents: &[u32],\n        _base_parents_data: &[u8],\n        _exp_parents_data: Option<&[u8]>,\n    ) -> Result<Self::Key> {\n        unimplemented!(\"not used\");\n    }\n}\n\nimpl<'a, H, G> StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H> + ParameterSetMetadata + Sync + Send,\n{\n    /// Assign one parent to `node` using a Chung's construction with a reversible\n    /// permutation function from a Feistel cipher (controlled by `invert_permutation`).\n    fn correspondent(&self, node: usize, i: usize) -> u32 {\n        // We can't just generate random values between `[0, size())`, we need to\n        // expand the search space (domain) to accommodate every unique parent assignment\n        // generated here. This can be visualized more clearly as a matrix where the each\n        // new parent of each new node is assigned a unique `index`:\n        //\n        //\n        //          | Parent 1 | Parent 2 | Parent 3 |\n        //\n        // | Node 1 |     0    |     1    |     2    |\n        //\n        // | Node 2 |     3    |     4    |     5    |\n        //\n        // | Node 3 |     6    |     7    |     8    |\n        //\n        // | Node 4 |     9    |     A    |     B    |\n        //\n        // This starting `index` will be shuffled to another position to generate a\n        // parent-child relationship, e.g., if generating the parents for the second node,\n        // `permute` would be called with values `[3; 4; 5]` that would be mapped to other\n        // indexes in the search space of `[0, B]`, say, values `[A; 0; 4]`, that would\n        // correspond to nodes numbered `[4; 1, 2]` which will become the parents of the\n        // second node. In a later pass invalid parents like 2, self-referencing, and parents\n        // with indexes bigger than 2 (if in the `forward` direction, smaller than 2 if the\n        // inverse), will be removed.\n        let a = (node * self.expansion_degree) as feistel::Index + i as feistel::Index;\n\n        let transformed = feistel::permute(\n            self.size() as feistel::Index * self.expansion_degree as feistel::Index,\n            a,\n            &self.feistel_keys,\n            self.feistel_precomputed,\n        );\n\n        match self.api_version {\n            ApiVersion::V1_0_0 => transformed as u32 / self.expansion_degree as u32,\n            ApiVersion::V1_1_0 => u32::try_from(transformed as u64 / self.expansion_degree as u64)\n                .expect(\"invalid transformation\"),\n        }\n\n        // Collapse the output in the matrix search space to the row of the corresponding\n        // node (losing the column information, that will be regenerated later when calling\n        // back this function in the `reversed` direction).\n    }\n\n    pub fn generate_expanded_parents(&self, node: usize, expanded_parents: &mut [u32]) {\n        debug_assert_eq!(expanded_parents.len(), self.expansion_degree);\n        for (i, el) in expanded_parents.iter_mut().enumerate() {\n            *el = self.correspondent(node, i);\n        }\n    }\n\n    pub fn new_stacked(\n        nodes: usize,\n        base_degree: usize,\n        expansion_degree: usize,\n        porep_id: PoRepID,\n        api_version: ApiVersion,\n    ) -> Result<Self> {\n        Self::new(\n            None,\n            nodes,\n            base_degree,\n            expansion_degree,\n            porep_id,\n            api_version,\n        )\n    }\n\n    pub fn base_graph(&self) -> &G {\n        &self.base_graph\n    }\n\n    pub fn expansion_degree(&self) -> usize {\n        self.expansion_degree\n    }\n\n    pub fn base_parents(&self, node: usize, parents: &mut [u32]) -> Result<()> {\n        // No cache usage, generate on demand.\n        self.base_graph().parents(node, parents)\n    }\n\n    /// Assign `self.expansion_degree` parents to `node` using an invertible permutation\n    /// that is applied one way for the forward layers and one way for the reversed\n    /// ones.\n    #[inline]\n    pub fn expanded_parents(&self, node: usize, parents: &mut [u32]) -> Result<()> {\n        // No cache usage, generate on demand.\n        self.generate_expanded_parents(node, parents);\n        Ok(())\n    }\n}\n\nimpl<H, G> PartialEq for StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H>,\n{\n    fn eq(&self, other: &StackedGraph<H, G>) -> bool {\n        self.base_graph == other.base_graph && self.expansion_degree == other.expansion_degree\n    }\n}\n\nimpl<H, G> Eq for StackedGraph<H, G>\nwhere\n    H: Hasher,\n    G: Graph<H>,\n{\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use std::collections::HashSet;\n\n    use filecoin_hashers::poseidon::PoseidonHasher;\n\n    // Test that 3 (or more) rounds of the Feistel cipher can be used\n    // as a pseudorandom permutation, that is, each input will be mapped\n    // to a unique output (and though not test here, since the cipher\n    // is symmetric, the decryption rounds also work as the inverse\n    // permutation), for more details see:\n    // https://en.wikipedia.org/wiki/Feistel_cipher#Theoretical_work.\n    #[test]\n    fn test_shuffle() {\n        let n = 2_u64.pow(10);\n        let d = EXP_DEGREE as u64;\n        // Use a relatively small value of `n` as Feistel is expensive (but big\n        // enough that `n >> d`).\n\n        let mut shuffled: HashSet<u64> = HashSet::with_capacity((n * d) as usize);\n\n        let feistel_keys = &[1, 2, 3, 4];\n        let feistel_precomputed = feistel::precompute((n * d) as feistel::Index);\n\n        for i in 0..n {\n            for k in 0..d {\n                let permuted =\n                    feistel::permute(n * d, i * d + k, feistel_keys, feistel_precomputed);\n\n                // Since the permutation implies a one-to-one correspondence,\n                // traversing the entire input space should generate the entire\n                // output space (in `shuffled`) without repetitions (since a duplicate\n                // output would imply there is another output that wasn't generated\n                // and the permutation would be incomplete).\n                assert!(shuffled.insert(permuted));\n            }\n        }\n\n        // Actually implied by the previous `assert!` this is left in place as an\n        // extra safety check that indeed the permutation preserved all the output\n        // space (of `n * d` nodes) without repetitions (which the `HashSet` would\n        // have skipped as duplicates).\n        assert_eq!(shuffled.len(), (n * d) as usize);\n    }\n\n    #[test]\n    /// The initial implementation had a bug which prevented parents from ever falling in the later half of a sector.\n    /// In fact, it is even worse than that, in the case of 64GiB sectors.\n    /// This test demonstrates conclusively that non-legacy graphs do not suffer from this pathology.\n    /// It also suggests, inconclusively, that legacy graphds do suffer from it (which we already know).\n    fn test_graph_distribution_pathology() {\n        let sector32_nodes: u32 = 1 << 30;\n        let sector64_nodes: u32 = 1 << 31;\n\n        let porep_id = |id: u8| {\n            let mut porep_id = [0u8; 32];\n            porep_id[0] = id;\n\n            porep_id\n        };\n\n        test_pathology_aux(porep_id(3), sector32_nodes, ApiVersion::V1_0_0);\n        test_pathology_aux(porep_id(4), sector64_nodes, ApiVersion::V1_0_0);\n\n        test_pathology_aux(porep_id(8), sector32_nodes, ApiVersion::V1_1_0);\n        test_pathology_aux(porep_id(9), sector64_nodes, ApiVersion::V1_1_0);\n    }\n\n    fn test_pathology_aux(porep_id: PoRepID, nodes: u32, api_version: ApiVersion) {\n        // In point of fact, the concrete graphs expected to be non-pathological\n        // appear to demonstrate this immediately (i.e. in the first node). We\n        // test more than that just to make the tentative diagnosis of pathology\n        // more convincing in the cases where we expect it. In the interest of\n        // keeping the tests brief, we keep this fairly small, though, since we\n        // already know the previous porep_ids exhibit the problem. The main\n        // reason to test those cases at all is to convince ourselves the test\n        // is sound.\n        let test_n = 1_000;\n\n        let expect_pathological = match api_version {\n            ApiVersion::V1_0_0 => true,\n            ApiVersion::V1_1_0 => false,\n        };\n\n        let graph = StackedBucketGraph::<PoseidonHasher>::new_stacked(\n            nodes as usize,\n            BASE_DEGREE,\n            EXP_DEGREE,\n            porep_id,\n            api_version,\n        )\n        .unwrap();\n\n        // If a parent index is not less than half the total node count, then\n        // the parent falls in the second half of the previous layer. By the\n        // definition of 'pathology' used here, that means the graph producing\n        // this parent is not pathological.\n        let demonstrably_large_enough = |p: &u32| *p >= (nodes / 2);\n\n        dbg!(&porep_id, &nodes, &expect_pathological);\n        for i in 0..test_n {\n            let mut expanded_parents = [0u32; EXP_DEGREE];\n            graph.expanded_parents(i, &mut expanded_parents).unwrap();\n\n            if expect_pathological {\n                // If we ever see a large-enough parent, then this graph is not\n                // pathological, so the test fails.\n                assert!(\n                    !expanded_parents.iter().any(demonstrably_large_enough),\n                    \"Expected pathological graph but found large-enough parent.\"\n                );\n            } else {\n                if expanded_parents.iter().any(demonstrably_large_enough) {\n                    // If we ever see a large-enough parent, then this graph is\n                    // not pathological, and the test succeeds. This is the only\n                    // way for a test expecting a non-pathological graph to\n                    // succeed, so there is no risk of false negatives (i.e.\n                    // failure to identify pathological graphs when unexpected).\n                    return;\n                }\n            }\n        }\n\n        // If we get here, we did not observe a parent large enough to conclude\n        // that the graph is not pathological. In that case, the test fails if we\n        // expected a non-pathological graph and succeeds otherwise. NOTE: this\n        // could lead us to conclude that an actually non-pathological graph is\n        // pathological, if `test_n` is set too low. Since the primary purpose\n        // of this test is to assure us that newer graphs are not pathological,\n        // it suffices to set `test_n` high enough to detect that.\n        assert!(expect_pathological, \"Did not expect pathological graph, but did not see large-enough parent to prove otherwise.\");\n    }\n\n    // Tests that the set of expander edges has not been truncated.\n    #[test]\n    fn test_high_parent_bits() {\n        // 64GiB sectors have 2^31 nodes.\n        const N_NODES: usize = 1 << 31;\n\n        // `u32` truncation would reduce the expander edge bit-length from 34 bits to 32 bits, thus\n        // the first parent truncated would be the node at index `2^32 / EXP_DEGREE = 2^29`.\n        const FIRST_TRUNCATED_PARENT: u32 = 1 << 29;\n\n        // The number of child nodes to test before failing. This value was chosen arbitrarily and\n        // can be changed.\n        const N_CHILDREN_SAMPLED: usize = 3;\n\n        // Non-legacy porep-id.\n        let mut porep_id = [0u8; 32];\n        porep_id[..8].copy_from_slice(&5u64.to_le_bytes());\n\n        let graph = StackedBucketGraph::<PoseidonHasher>::new_stacked(\n            N_NODES,\n            BASE_DEGREE,\n            EXP_DEGREE,\n            porep_id,\n            ApiVersion::V1_1_0,\n        )\n        .unwrap();\n\n        let mut exp_parents = [0u32; EXP_DEGREE];\n        for v in 0..N_CHILDREN_SAMPLED {\n            graph.expanded_parents(v, &mut exp_parents[..]).unwrap();\n            if exp_parents.iter().any(|u| *u >= FIRST_TRUNCATED_PARENT) {\n                return;\n            }\n        }\n        assert!(false);\n    }\n\n    // Checks that the distribution of parent node indexes within a sector is within a set bound.\n    #[test]\n    fn test_exp_parent_histogram() {\n        // 64GiB sectors have 2^31 nodes.\n        const N_NODES: usize = 1 << 31;\n\n        // The number of children used to construct the histogram. This value is chosen\n        // arbitrarily and can be changed.\n        const N_CHILDREN_SAMPLED: usize = 10000;\n\n        // The number of bins used to partition the set of sector nodes. This value was chosen\n        // arbitrarily and can be changed to any integer that is a multiple of `EXP_DEGREE` and\n        // evenly divides `N_NODES`.\n        const N_BINS: usize = 32;\n        const N_NODES_PER_BIN: u32 = (N_NODES / N_BINS) as u32;\n        const PARENT_COUNT_PER_BIN_UNIFORM: usize = N_CHILDREN_SAMPLED * EXP_DEGREE / N_BINS;\n\n        // This test will pass if every bin's parent count is within the bounds:\n        // `(1 +/- FAILURE_THRESHOLD) * PARENT_COUNT_PER_BIN_UNIFORM`.\n        const FAILURE_THRESHOLD: f32 = 0.4;\n        const MAX_PARENT_COUNT_ALLOWED: usize =\n            ((1.0 + FAILURE_THRESHOLD) * PARENT_COUNT_PER_BIN_UNIFORM as f32) as usize - 1;\n        const MIN_PARENT_COUNT_ALLOWED: usize =\n            ((1.0 - FAILURE_THRESHOLD) * PARENT_COUNT_PER_BIN_UNIFORM as f32) as usize + 1;\n\n        // Non-legacy porep-id.\n        let mut porep_id = [0u8; 32];\n        porep_id[..8].copy_from_slice(&5u64.to_le_bytes());\n\n        let graph = StackedBucketGraph::<PoseidonHasher>::new_stacked(\n            N_NODES,\n            BASE_DEGREE,\n            EXP_DEGREE,\n            porep_id,\n            ApiVersion::V1_1_0,\n        )\n        .unwrap();\n\n        // Count the number of parents in each bin.\n        let mut hist = [0usize; N_BINS];\n        let mut exp_parents = [0u32; EXP_DEGREE];\n        for sample_index in 0..N_CHILDREN_SAMPLED {\n            let v = sample_index * N_NODES / N_CHILDREN_SAMPLED;\n            graph.expanded_parents(v, &mut exp_parents[..]).unwrap();\n            for u in exp_parents.iter() {\n                let bin_index = (u / N_NODES_PER_BIN) as usize;\n                hist[bin_index] += 1;\n            }\n        }\n\n        let success = hist.iter().all(|&n_parents| {\n            n_parents >= MIN_PARENT_COUNT_ALLOWED && n_parents <= MAX_PARENT_COUNT_ALLOWED\n        });\n\n        assert!(success);\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/hash.rs",
    "content": "use bellperson::bls::Fr;\nuse filecoin_hashers::{POSEIDON_CONSTANTS_11, POSEIDON_CONSTANTS_2};\nuse neptune::poseidon::Poseidon;\n\n/// Hash all elements in the given column.\npub fn hash_single_column(column: &[Fr]) -> Fr {\n    match column.len() {\n        2 => {\n            let mut hasher = Poseidon::new_with_preimage(column, &*POSEIDON_CONSTANTS_2);\n            hasher.hash()\n        }\n        11 => {\n            let mut hasher = Poseidon::new_with_preimage(column, &*POSEIDON_CONSTANTS_11);\n            hasher.hash()\n        }\n        _ => panic!(\"unsupported column size: {}\", column.len()),\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/labeling_proof.rs",
    "content": "use std::marker::PhantomData;\n\nuse filecoin_hashers::Hasher;\nuse fr32::bytes_into_fr_repr_safe;\nuse log::trace;\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct LabelingProof<H: Hasher> {\n    pub(crate) parents: Vec<H::Domain>,\n    pub(crate) layer_index: u32,\n    pub(crate) node: u64,\n    #[serde(skip)]\n    _h: PhantomData<H>,\n}\n\nimpl<H: Hasher> LabelingProof<H> {\n    pub fn new(layer_index: u32, node: u64, parents: Vec<H::Domain>) -> Self {\n        LabelingProof {\n            node,\n            layer_index,\n            parents,\n            _h: PhantomData,\n        }\n    }\n\n    fn create_label(&self, replica_id: &H::Domain) -> H::Domain {\n        let mut hasher = Sha256::new();\n        let mut buffer = [0u8; 64];\n\n        // replica_id\n        buffer[..32].copy_from_slice(AsRef::<[u8]>::as_ref(replica_id));\n\n        // layer index\n        buffer[32..36].copy_from_slice(&(self.layer_index as u32).to_be_bytes());\n\n        // node id\n        buffer[36..44].copy_from_slice(&(self.node as u64).to_be_bytes());\n\n        hasher.update(&buffer[..]);\n\n        // parents\n        for parent in &self.parents {\n            let data = AsRef::<[u8]>::as_ref(parent);\n            hasher.update(data);\n        }\n\n        bytes_into_fr_repr_safe(hasher.finalize().as_ref()).into()\n    }\n\n    pub fn verify(&self, replica_id: &H::Domain, expected_label: &H::Domain) -> bool {\n        let label = self.create_label(replica_id);\n        check_eq!(expected_label, &label);\n\n        true\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/macros.rs",
    "content": "/// Checks that the two passed values are equal. If they are not equal it prints a trace and returns `false`.\nmacro_rules! check_eq {\n    ($left:expr , $right:expr,) => ({\n        check_eq!($left, $right)\n    });\n    ($left:expr , $right:expr) => ({\n        match (&($left), &($right)) {\n            (left_val, right_val) => {\n                if !(*left_val == *right_val) {\n                    trace!(\"check failed: `(left == right)`\\\n                          \\n\\\n                          \\n{}\\\n                          \\n\",\n                           pretty_assertions::Comparison::new(left_val, right_val));\n                    return false;\n                }\n            }\n        }\n    });\n    ($left:expr , $right:expr, $($arg:tt)*) => ({\n        match (&($left), &($right)) {\n            (left_val, right_val) => {\n                if !(*left_val == *right_val) {\n                    trace!(\"check failed: `(left == right)`: {}\\\n                          \\n\\\n                          \\n{}\\\n                          \\n\",\n                           format_args!($($arg)*),\n                           pretty_assertions::Comparison::new(left_val, right_val));\n                    return false;\n                }\n            }\n        }\n    });\n}\n\n/// Checks that the passed in value is true. If they are not equal it prints a trace and returns `false`.\nmacro_rules! check {\n    ($val:expr) => {\n        if !$val {\n            trace!(\"expected {:?} to be true\", dbg!($val));\n            return false;\n        }\n    };\n}\n\nmacro_rules! prefetch {\n    ($val:expr) => {\n        #[cfg(any(target_arch = \"x86_64\", target_arch = \"x86\"))]\n        unsafe {\n            #[cfg(target_arch = \"x86\")]\n            use std::arch::x86;\n            #[cfg(target_arch = \"x86_64\")]\n            use std::arch::x86_64 as x86;\n\n            x86::_mm_prefetch($val, x86::_MM_HINT_T0);\n        }\n        #[cfg(target_arch = \"aarch64\")]\n        unsafe {\n            use std::arch::aarch64::{_prefetch, _PREFETCH_LOCALITY3, _PREFETCH_READ};\n            _prefetch($val, _PREFETCH_READ, _PREFETCH_LOCALITY3);\n        }\n    };\n}\n\nmacro_rules! compress256 {\n    ($state:expr, $buf:expr, 1) => {\n        let blocks = [*GenericArray::<u8, U64>::from_slice(&$buf[..64])];\n        sha2::compress256((&mut $state[..8]).try_into().unwrap(), &blocks[..]);\n    };\n    ($state:expr, $buf:expr, 2) => {\n        let blocks = [\n            *GenericArray::<u8, U64>::from_slice(&$buf[..64]),\n            *GenericArray::<u8, U64>::from_slice(&$buf[64..128]),\n        ];\n        sha2::compress256((&mut $state[..8]).try_into().unwrap(), &blocks[..]);\n    };\n    ($state:expr, $buf:expr, 3) => {\n        let blocks = [\n            *GenericArray::<u8, U64>::from_slice(&$buf[..64]),\n            *GenericArray::<u8, U64>::from_slice(&$buf[64..128]),\n            *GenericArray::<u8, U64>::from_slice(&$buf[128..192]),\n        ];\n        sha2::compress256((&mut $state[..8]).try_into().unwrap(), &blocks[..]);\n    };\n    ($state:expr, $buf:expr, 5) => {\n        let blocks = [\n            *GenericArray::<u8, U64>::from_slice(&$buf[..64]),\n            *GenericArray::<u8, U64>::from_slice(&$buf[64..128]),\n            *GenericArray::<u8, U64>::from_slice(&$buf[128..192]),\n            *GenericArray::<u8, U64>::from_slice(&$buf[192..256]),\n            *GenericArray::<u8, U64>::from_slice(&$buf[256..320]),\n        ];\n        sha2::compress256((&mut $state[..8]).try_into().unwrap(), &blocks[..]);\n    };\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/memory_handling.rs",
    "content": "use std::cell::UnsafeCell;\nuse std::fs::File;\nuse std::hint::spin_loop;\nuse std::marker::{PhantomData, Sync};\nuse std::mem::size_of;\nuse std::path::Path;\nuse std::slice;\nuse std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};\n\nuse anyhow::Result;\nuse byte_slice_cast::{AsSliceOf, FromByteSlice};\nuse log::{info, warn};\nuse mapr::{Mmap, MmapMut, MmapOptions};\n\npub struct CacheReader<T> {\n    file: File,\n    bufs: UnsafeCell<[Mmap; 2]>,\n    size: usize,\n    degree: usize,\n    window_size: usize,\n    cursor: IncrementingCursor,\n    consumer: AtomicU64,\n    _t: PhantomData<T>,\n}\n\nunsafe impl<T> Sync for CacheReader<T> {}\n\nstruct IncrementingCursor {\n    cur: AtomicUsize,\n    cur_safe: AtomicUsize,\n}\n\nfn compare_and_swap(atomic: &AtomicUsize, before: usize, after: usize) -> usize {\n    match atomic.compare_exchange_weak(before, after, Ordering::SeqCst, Ordering::SeqCst) {\n        Ok(x) => {\n            assert_eq!(x, before);\n            before\n        }\n        _ => after,\n    }\n}\n\n/// IncrementingCursor provides an atomic variable which can be incremented such that only one thread attempting the\n/// increment is selected to perform actions required to effect the transition. Unselected threads wait until the\n/// transition has completed. Transition and wait condition are both specified by closures supplied by the caller.\nimpl IncrementingCursor {\n    fn new(val: usize) -> Self {\n        Self {\n            cur: AtomicUsize::new(val),\n            cur_safe: AtomicUsize::new(val),\n        }\n    }\n\n    fn store(&self, val: usize) {\n        self.cur.store(val, Ordering::SeqCst);\n        self.cur_safe.store(val, Ordering::SeqCst);\n    }\n\n    fn compare_and_swap(&self, before: usize, after: usize) {\n        compare_and_swap(&self.cur, before, after);\n        compare_and_swap(&self.cur_safe, before, after);\n    }\n\n    fn increment<F: Fn() -> bool, G: Fn()>(&self, target: usize, wait_fn: F, advance_fn: G) {\n        // Check using `cur_safe`, to ensure we wait until the current cursor value is safe to use.\n        // If we were to instead check `cur`, it could have been incremented but not yet safe.\n        let cur = self.cur_safe.load(Ordering::SeqCst);\n        if target > cur {\n            // Only one producer will successfully increment `cur`. We need this second atomic because we cannot\n            // increment `cur_safe` until after the underlying resource has been advanced.\n            let instant_cur = compare_and_swap(&self.cur, cur, cur + 1);\n            if instant_cur == cur {\n                // We successfully incremented `self.cur`, so we are responsible for advancing the resource.\n                {\n                    while wait_fn() {\n                        spin_loop()\n                    }\n                }\n\n                advance_fn();\n\n                // Now it is safe to use the new window.\n                self.cur_safe.fetch_add(1, Ordering::SeqCst);\n            } else {\n                // We failed to increment `self.cur_window`, so we must wait for the window to be advanced before\n                // continuing. Wait until it is safe to use the new current window.\n                while self.cur_safe.load(Ordering::SeqCst) != cur + 1 {\n                    spin_loop()\n                }\n            }\n        }\n    }\n}\n\nimpl<T: FromByteSlice> CacheReader<T> {\n    pub fn new(filename: &Path, window_size: Option<usize>, degree: usize) -> Result<Self> {\n        info!(\"initializing cache\");\n        let file = File::open(filename)?;\n        let size = File::metadata(&file)?.len() as usize;\n        let window_size = match window_size {\n            Some(s) => {\n                if s < size {\n                    assert_eq!(\n                        0,\n                        size % degree * size_of::<T>(),\n                        \"window size is not multiple of element size\"\n                    );\n                };\n                s\n            }\n            None => {\n                let num_windows = 8;\n                assert_eq!(0, size % num_windows);\n                size / num_windows\n            }\n        };\n\n        let buf0 = Self::map_buf(0, window_size, &file)?;\n        let buf1 = Self::map_buf(window_size as u64, window_size, &file)?;\n        Ok(Self {\n            file,\n            bufs: UnsafeCell::new([buf0, buf1]),\n            size,\n            degree,\n            window_size,\n            // The furthest window from which the cache has yet been read.\n            cursor: IncrementingCursor::new(0),\n            consumer: AtomicU64::new(0),\n            _t: PhantomData::<T>,\n        })\n    }\n\n    pub fn size(&self) -> usize {\n        self.size\n    }\n\n    pub fn window_nodes(&self) -> usize {\n        self.size() / (size_of::<T>() * self.degree)\n    }\n\n    /// Safety: incrementing the consumer at the end of a window will unblock the producer waiting to remap the\n    /// consumer's previous buffer. The buffer must not be accessed once this has happened.\n    pub unsafe fn increment_consumer(&self) {\n        self.consumer.fetch_add(1, Ordering::SeqCst);\n    }\n\n    pub fn store_consumer(&self, val: u64) {\n        self.consumer.store(val, Ordering::SeqCst);\n    }\n\n    pub fn get_consumer(&self) -> u64 {\n        self.consumer.load(Ordering::SeqCst)\n    }\n\n    #[inline]\n    fn get_bufs(&self) -> &[Mmap] {\n        unsafe { &std::slice::from_raw_parts((*self.bufs.get()).as_ptr(), 2) }\n    }\n\n    #[inline]\n    #[allow(clippy::mut_from_ref)]\n    unsafe fn get_mut_bufs(&self) -> &mut [Mmap] {\n        slice::from_raw_parts_mut((*self.bufs.get()).as_mut_ptr(), 2)\n    }\n\n    #[allow(dead_code)]\n    // This is unused, but included to document the meaning of its components.\n    // This allows splitting the reset in order to avoid a pause.\n    pub fn reset(&self) -> Result<()> {\n        self.start_reset()?;\n        self.finish_reset()\n    }\n\n    pub fn start_reset(&self) -> Result<()> {\n        let buf0 = Self::map_buf(0, self.window_size, &self.file)?;\n        let bufs = unsafe { self.get_mut_bufs() };\n        bufs[0] = buf0;\n        Ok(())\n    }\n\n    pub fn finish_reset(&self) -> Result<()> {\n        let buf1 = Self::map_buf(self.window_size as u64, self.window_size, &self.file)?;\n        let bufs = unsafe { self.get_mut_bufs() };\n        bufs[1] = buf1;\n        self.cursor.store(0);\n        Ok(())\n    }\n\n    fn map_buf(offset: u64, len: usize, file: &File) -> Result<Mmap> {\n        unsafe {\n            MmapOptions::new()\n                .offset(offset)\n                .len(len)\n                .private()\n                .map(file)\n                .map_err(|e| e.into())\n        }\n    }\n\n    #[inline]\n    fn window_element_count(&self) -> usize {\n        self.window_size / size_of::<T>()\n    }\n\n    /// `pos` is in units of `T`.\n    #[inline]\n    /// Safety: A returned slice must not be accessed once the buffer from which it has been derived is remapped. A\n    /// buffer will never be remapped until the `consumer` atomic contained in `self` has been advanced past the end of\n    /// the window. NOTE: each time `consumer` is incremented, `self.degrees` elements of the cache are invalidated.\n    /// This means callers should only access slice elements sequentially. They should only call `increment_consumer`\n    /// once the next `self.degree` elements of the cache will never be accessed again.\n    pub unsafe fn consumer_slice_at(&self, pos: usize) -> &[T] {\n        assert!(\n            pos < self.size,\n            \"pos {} out of range for buffer of size {}\",\n            pos,\n            self.size\n        );\n        let window = pos / self.window_element_count();\n        let pos = pos % self.window_element_count();\n        let targeted_buf = &self.get_bufs()[window % 2];\n\n        &targeted_buf.as_slice_of::<T>().unwrap()[pos..]\n    }\n\n    /// `pos` is in units of `T`.\n    #[inline]\n    /// Safety: This call may advance the rear buffer, making it unsafe to access slices derived from that buffer again.\n    /// It is the callers responsibility to ensure such illegal access is not attempted. This can be prevented if users\n    /// never access values past which the cache's `consumer` atomic has been incremented. NOTE: each time `consumer` is\n    /// incremented, `self.degrees` elements of the cache are invalidated.\n    pub unsafe fn slice_at(&self, pos: usize) -> &[T] {\n        assert!(\n            pos < self.size,\n            \"pos {} out of range for buffer of size {}\",\n            pos,\n            self.size\n        );\n        let window = pos / self.window_element_count();\n        if window == 1 {\n            self.cursor.compare_and_swap(0, 1);\n        }\n\n        let pos = pos % self.window_element_count();\n\n        let wait_fn = || {\n            let safe_consumer = (window - 1) * (self.window_element_count() / self.degree);\n            (self.consumer.load(Ordering::SeqCst) as usize) < safe_consumer\n        };\n\n        self.cursor\n            .increment(window, &wait_fn, &|| self.advance_rear_window(window));\n\n        let targeted_buf = &self.get_bufs()[window % 2];\n\n        &targeted_buf.as_slice_of::<T>().unwrap()[pos..]\n    }\n\n    fn advance_rear_window(&self, new_window: usize) {\n        assert!(new_window as usize * self.window_size < self.size);\n\n        let replace_idx = (new_window % 2) as usize;\n\n        let new_buf = Self::map_buf(\n            (new_window * self.window_size) as u64,\n            self.window_size as usize,\n            &self.file,\n        )\n        .unwrap();\n\n        unsafe {\n            self.get_mut_bufs()[replace_idx] = new_buf;\n        }\n    }\n}\n\nfn allocate_layer(sector_size: usize) -> Result<MmapMut> {\n    match MmapOptions::new()\n        .len(sector_size)\n        .private()\n        .clone()\n        .lock()\n        .map_anon()\n        .and_then(|mut layer| {\n            layer.mlock()?;\n            Ok(layer)\n        }) {\n        Ok(layer) => Ok(layer),\n        Err(err) => {\n            // fallback to not locked if permissions are not available\n            warn!(\"failed to lock map {:?}, falling back\", err);\n            let layer = MmapOptions::new().len(sector_size).private().map_anon()?;\n            Ok(layer)\n        }\n    }\n}\n\npub fn setup_create_label_memory(\n    sector_size: usize,\n    degree: usize,\n    window_size: Option<usize>,\n    cache_path: &Path,\n) -> Result<(CacheReader<u32>, MmapMut, MmapMut)> {\n    let parents_cache = CacheReader::new(cache_path, window_size, degree)?;\n    let layer_labels = allocate_layer(sector_size)?;\n    let exp_labels = allocate_layer(sector_size)?;\n\n    Ok((parents_cache, layer_labels, exp_labels))\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/mod.rs",
    "content": "#[macro_use]\nmod macros;\n\npub mod create_label;\npub(crate) mod hash;\n\nmod cache;\nmod challenges;\nmod column;\nmod column_proof;\nmod cores;\nmod encoding_proof;\nmod graph;\nmod labeling_proof;\nmod memory_handling;\nmod params;\nmod porep;\nmod proof;\nmod proof_scheme;\nmod utils;\n\npub use challenges::{ChallengeRequirements, LayerChallenges};\npub use column::Column;\npub use column_proof::ColumnProof;\npub use encoding_proof::EncodingProof;\npub use graph::{StackedBucketGraph, StackedGraph, EXP_DEGREE};\npub use labeling_proof::LabelingProof;\npub use params::*;\npub use proof::{StackedDrg, TOTAL_PARENTS};\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/params.rs",
    "content": "use std::fs::remove_file;\nuse std::marker::PhantomData;\nuse std::path::{Path, PathBuf};\n\nuse anyhow::Context;\nuse filecoin_hashers::{Domain, Hasher};\nuse fr32::bytes_into_fr_repr_safe;\nuse generic_array::typenum::{Unsigned, U2};\nuse log::trace;\nuse merkletree::{\n    merkle::get_merkle_tree_leafs,\n    store::{DiskStore, Store, StoreConfig},\n};\nuse serde::{Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    drgraph::Graph,\n    error::Result,\n    merkle::{\n        create_disk_tree, create_lc_tree, get_base_tree_count, split_config,\n        split_config_and_replica, BinaryMerkleTree, DiskTree, LCTree, MerkleProof,\n        MerkleProofTrait, MerkleTreeTrait,\n    },\n    parameter_cache::ParameterSetMetadata,\n    util::data_at_node,\n};\n\nuse crate::stacked::vanilla::{\n    Column, ColumnProof, EncodingProof, LabelingProof, LayerChallenges, StackedBucketGraph,\n};\n\npub const BINARY_ARITY: usize = 2;\npub const QUAD_ARITY: usize = 4;\npub const OCT_ARITY: usize = 8;\n\n#[derive(Debug, Clone)]\npub struct SetupParams {\n    // Number of nodes\n    pub nodes: usize,\n\n    // Base degree of DRG\n    pub degree: usize,\n\n    pub expansion_degree: usize,\n\n    pub porep_id: [u8; 32],\n    pub layer_challenges: LayerChallenges,\n    pub api_version: ApiVersion,\n}\n\n#[derive(Debug)]\npub struct PublicParams<Tree>\nwhere\n    Tree: 'static + MerkleTreeTrait,\n{\n    pub graph: StackedBucketGraph<Tree::Hasher>,\n    pub layer_challenges: LayerChallenges,\n    _t: PhantomData<Tree>,\n}\n\nimpl<Tree> Clone for PublicParams<Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    fn clone(&self) -> Self {\n        Self {\n            graph: self.graph.clone(),\n            layer_challenges: self.layer_challenges.clone(),\n            _t: Default::default(),\n        }\n    }\n}\n\nimpl<Tree> PublicParams<Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    pub fn new(graph: StackedBucketGraph<Tree::Hasher>, layer_challenges: LayerChallenges) -> Self {\n        PublicParams {\n            graph,\n            layer_challenges,\n            _t: PhantomData,\n        }\n    }\n}\n\nimpl<Tree> ParameterSetMetadata for PublicParams<Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    fn identifier(&self) -> String {\n        format!(\n            \"layered_drgporep::PublicParams{{ graph: {}, challenges: {:?}, tree: {} }}\",\n            self.graph.identifier(),\n            self.layer_challenges,\n            Tree::display()\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        self.graph.sector_size()\n    }\n}\n\nimpl<'a, Tree> From<&'a PublicParams<Tree>> for PublicParams<Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    fn from(other: &PublicParams<Tree>) -> PublicParams<Tree> {\n        PublicParams::new(other.graph.clone(), other.layer_challenges.clone())\n    }\n}\n\n#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]\npub struct PublicInputs<T: Domain, S: Domain> {\n    #[serde(bound = \"\")]\n    pub replica_id: T,\n    pub seed: [u8; 32],\n    #[serde(bound = \"\")]\n    pub tau: Option<Tau<T, S>>,\n    /// Partition index\n    pub k: Option<usize>,\n}\n\nimpl<T: Domain, S: Domain> PublicInputs<T, S> {\n    pub fn challenges(\n        &self,\n        layer_challenges: &LayerChallenges,\n        leaves: usize,\n        partition_k: Option<usize>,\n    ) -> Vec<usize> {\n        let k = partition_k.unwrap_or(0);\n\n        layer_challenges.derive::<T>(leaves, &self.replica_id, &self.seed, k as u8)\n    }\n}\n\n#[derive(Debug)]\npub struct PrivateInputs<Tree: MerkleTreeTrait, G: Hasher> {\n    pub p_aux: PersistentAux<<Tree::Hasher as Hasher>::Domain>,\n    pub t_aux: TemporaryAuxCache<Tree, G>,\n}\n\n#[derive(Debug, Serialize, Deserialize)]\npub struct Proof<Tree: MerkleTreeTrait, G: Hasher> {\n    #[serde(bound(\n        serialize = \"MerkleProof<G, U2>: Serialize\",\n        deserialize = \"MerkleProof<G, U2>: Deserialize<'de>\"\n    ))]\n    pub comm_d_proofs: MerkleProof<G, U2>,\n    #[serde(bound(\n        serialize = \"MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>: Serialize\",\n        deserialize = \"MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>: Deserialize<'de>\"\n    ))]\n    pub comm_r_last_proof:\n        MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n    #[serde(bound(\n        serialize = \"ReplicaColumnProof<MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,>: Serialize\",\n        deserialize = \"ReplicaColumnProof<MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>: Deserialize<'de>\"\n    ))]\n    pub replica_column_proofs: ReplicaColumnProof<\n        MerkleProof<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n    >,\n    #[serde(bound(\n        serialize = \"LabelingProof<Tree::Hasher>: Serialize\",\n        deserialize = \"LabelingProof<Tree::Hasher>: Deserialize<'de>\"\n    ))]\n    /// Indexed by layer in 1..layers.\n    pub labeling_proofs: Vec<LabelingProof<Tree::Hasher>>,\n    #[serde(bound(\n        serialize = \"EncodingProof<Tree::Hasher>: Serialize\",\n        deserialize = \"EncodingProof<Tree::Hasher>: Deserialize<'de>\"\n    ))]\n    pub encoding_proof: EncodingProof<Tree::Hasher>,\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> Clone for Proof<Tree, G> {\n    fn clone(&self) -> Self {\n        Self {\n            comm_d_proofs: self.comm_d_proofs.clone(),\n            comm_r_last_proof: self.comm_r_last_proof.clone(),\n            replica_column_proofs: self.replica_column_proofs.clone(),\n            labeling_proofs: self.labeling_proofs.clone(),\n            encoding_proof: self.encoding_proof.clone(),\n        }\n    }\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> Proof<Tree, G> {\n    pub fn comm_r_last(&self) -> <Tree::Hasher as Hasher>::Domain {\n        self.comm_r_last_proof.root()\n    }\n\n    pub fn comm_c(&self) -> <Tree::Hasher as Hasher>::Domain {\n        self.replica_column_proofs.c_x.root()\n    }\n\n    /// Verify the full proof.\n    pub fn verify(\n        &self,\n        pub_params: &PublicParams<Tree>,\n        pub_inputs: &PublicInputs<<Tree::Hasher as Hasher>::Domain, <G as Hasher>::Domain>,\n        challenge: usize,\n        graph: &StackedBucketGraph<Tree::Hasher>,\n    ) -> bool {\n        let replica_id = &pub_inputs.replica_id;\n\n        check!(challenge < graph.size());\n        check!(pub_inputs.tau.is_some());\n\n        // Verify initial data layer\n        trace!(\"verify initial data layer\");\n\n        check!(self.comm_d_proofs.proves_challenge(challenge));\n\n        if let Some(ref tau) = pub_inputs.tau {\n            check_eq!(&self.comm_d_proofs.root(), &tau.comm_d);\n        } else {\n            return false;\n        }\n\n        // Verify replica column openings\n        trace!(\"verify replica column openings\");\n        let mut parents = vec![0; graph.degree()];\n        graph\n            .parents(challenge, &mut parents)\n            .expect(\"graph parents failure\"); // FIXME: error handling\n        check!(self.replica_column_proofs.verify(challenge, &parents));\n\n        check!(self.verify_final_replica_layer(challenge));\n\n        check!(self.verify_labels(replica_id, &pub_params.layer_challenges));\n\n        trace!(\"verify encoding\");\n\n        check!(self.encoding_proof.verify::<G>(\n            replica_id,\n            &self.comm_r_last_proof.leaf(),\n            &self.comm_d_proofs.leaf()\n        ));\n\n        true\n    }\n\n    /// Verify all labels.\n    fn verify_labels(\n        &self,\n        replica_id: &<Tree::Hasher as Hasher>::Domain,\n        layer_challenges: &LayerChallenges,\n    ) -> bool {\n        // Verify Labels Layer 1..layers\n        for layer in 1..=layer_challenges.layers() {\n            trace!(\"verify labeling (layer: {})\", layer,);\n\n            check!(self.labeling_proofs.get(layer - 1).is_some());\n            let labeling_proof = &self\n                .labeling_proofs\n                .get(layer - 1)\n                .expect(\"labeling proofs get failure\");\n            let labeled_node = self\n                .replica_column_proofs\n                .c_x\n                .get_node_at_layer(layer)\n                .expect(\"get_node_at_layer failure\"); // FIXME: error handling\n            check!(labeling_proof.verify(replica_id, labeled_node));\n        }\n\n        true\n    }\n\n    /// Verify final replica layer openings\n    fn verify_final_replica_layer(&self, challenge: usize) -> bool {\n        trace!(\"verify final replica layer openings\");\n        check!(self.comm_r_last_proof.proves_challenge(challenge));\n\n        true\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct ReplicaColumnProof<Proof: MerkleProofTrait> {\n    #[serde(bound(\n        serialize = \"ColumnProof<Proof>: Serialize\",\n        deserialize = \"ColumnProof<Proof>: Deserialize<'de>\"\n    ))]\n    pub c_x: ColumnProof<Proof>,\n    #[serde(bound(\n        serialize = \"ColumnProof<Proof>: Serialize\",\n        deserialize = \"ColumnProof<Proof>: Deserialize<'de>\"\n    ))]\n    pub drg_parents: Vec<ColumnProof<Proof>>,\n    #[serde(bound(\n        serialize = \"ColumnProof<Proof>: Serialize\",\n        deserialize = \"ColumnProof<Proof>: Deserialize<'de>\"\n    ))]\n    pub exp_parents: Vec<ColumnProof<Proof>>,\n}\n\nimpl<Proof: MerkleProofTrait> ReplicaColumnProof<Proof> {\n    pub fn verify(&self, challenge: usize, parents: &[u32]) -> bool {\n        let expected_comm_c = self.c_x.root();\n\n        trace!(\"  verify c_x\");\n        check!(self.c_x.verify(challenge as u32, &expected_comm_c));\n\n        trace!(\"  verify drg_parents\");\n        for (proof, parent) in self.drg_parents.iter().zip(parents.iter()) {\n            check!(proof.verify(*parent, &expected_comm_c));\n        }\n\n        trace!(\"  verify exp_parents\");\n        for (proof, parent) in self\n            .exp_parents\n            .iter()\n            .zip(parents.iter().skip(self.drg_parents.len()))\n        {\n            check!(proof.verify(*parent, &expected_comm_c));\n        }\n\n        true\n    }\n}\n\npub type TransformedLayers<Tree, G> = (\n    Tau<<<Tree as MerkleTreeTrait>::Hasher as Hasher>::Domain, <G as Hasher>::Domain>,\n    PersistentAux<<<Tree as MerkleTreeTrait>::Hasher as Hasher>::Domain>,\n    TemporaryAux<Tree, G>,\n);\n\n/// Tau for a single parition.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\npub struct Tau<D: Domain, E: Domain> {\n    #[serde(bound = \"\")]\n    pub comm_d: E,\n    #[serde(bound = \"\")]\n    pub comm_r: D,\n}\n\n/// Stored along side the sector on disk.\n#[derive(Default, Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]\npub struct PersistentAux<D> {\n    pub comm_c: D,\n    pub comm_r_last: D,\n}\n\n#[derive(Debug, Serialize, Deserialize)]\npub struct TemporaryAux<Tree: MerkleTreeTrait, G: Hasher> {\n    /// The encoded nodes for 1..layers.\n    #[serde(bound(\n        serialize = \"StoreConfig: Serialize\",\n        deserialize = \"StoreConfig: Deserialize<'de>\"\n    ))]\n    pub labels: Labels<Tree>,\n    pub tree_d_config: StoreConfig,\n    pub tree_r_last_config: StoreConfig,\n    pub tree_c_config: StoreConfig,\n    pub _g: PhantomData<G>,\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> Clone for TemporaryAux<Tree, G> {\n    fn clone(&self) -> Self {\n        Self {\n            labels: self.labels.clone(),\n            tree_d_config: self.tree_d_config.clone(),\n            tree_r_last_config: self.tree_r_last_config.clone(),\n            tree_c_config: self.tree_c_config.clone(),\n            _g: Default::default(),\n        }\n    }\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> TemporaryAux<Tree, G> {\n    pub fn set_cache_path<P: AsRef<Path>>(&mut self, cache_path: P) {\n        let cp = cache_path.as_ref().to_path_buf();\n        for label in self.labels.labels.iter_mut() {\n            label.path = cp.clone();\n        }\n        self.tree_d_config.path = cp.clone();\n        self.tree_r_last_config.path = cp.clone();\n        self.tree_c_config.path = cp;\n    }\n\n    pub fn labels_for_layer(\n        &self,\n        layer: usize,\n    ) -> Result<DiskStore<<Tree::Hasher as Hasher>::Domain>> {\n        self.labels.labels_for_layer(layer)\n    }\n\n    pub fn domain_node_at_layer(\n        &self,\n        layer: usize,\n        node_index: u32,\n    ) -> Result<<Tree::Hasher as Hasher>::Domain> {\n        self.labels_for_layer(layer)?.read_at(node_index as usize)\n    }\n\n    pub fn column(&self, column_index: u32) -> Result<Column<Tree::Hasher>> {\n        self.labels.column(column_index)\n    }\n\n    // 'clear_temp' will discard all persisted merkle and layer data\n    // that is no longer required.\n    pub fn clear_temp(t_aux: TemporaryAux<Tree, G>) -> Result<()> {\n        let cached = |config: &StoreConfig| {\n            Path::new(&StoreConfig::data_path(&config.path, &config.id)).exists()\n        };\n\n        let delete_tree_c_store = |config: &StoreConfig, tree_c_size: usize| -> Result<()> {\n            let tree_c_store = DiskStore::<<Tree::Hasher as Hasher>::Domain>::new_from_disk(\n                tree_c_size,\n                Tree::Arity::to_usize(),\n                &config,\n            )\n            .context(\"tree_c\")?;\n            // Note: from_data_store requires the base tree leaf count\n            let tree_c = DiskTree::<\n                Tree::Hasher,\n                Tree::Arity,\n                Tree::SubTreeArity,\n                Tree::TopTreeArity,\n            >::from_data_store(\n                tree_c_store,\n                get_merkle_tree_leafs(tree_c_size, Tree::Arity::to_usize())?,\n            )\n            .context(\"tree_c\")?;\n            tree_c.delete(config.clone()).context(\"tree_c\")?;\n\n            Ok(())\n        };\n\n        if cached(&t_aux.tree_d_config) {\n            let tree_d_size = t_aux\n                .tree_d_config\n                .size\n                .context(\"tree_d config has no size\")?;\n            let tree_d_store: DiskStore<G::Domain> =\n                DiskStore::new_from_disk(tree_d_size, BINARY_ARITY, &t_aux.tree_d_config)\n                    .context(\"tree_d\")?;\n            // Note: from_data_store requires the base tree leaf count\n            let tree_d = BinaryMerkleTree::<G>::from_data_store(\n                tree_d_store,\n                get_merkle_tree_leafs(tree_d_size, BINARY_ARITY)?,\n            )\n            .context(\"tree_d\")?;\n\n            tree_d.delete(t_aux.tree_d_config).context(\"tree_d\")?;\n            trace!(\"tree d deleted\");\n        }\n\n        let tree_count = get_base_tree_count::<Tree>();\n        let tree_c_size = t_aux\n            .tree_c_config\n            .size\n            .context(\"tree_c config has no size\")?;\n        let configs = split_config(t_aux.tree_c_config.clone(), tree_count)?;\n\n        if cached(&t_aux.tree_c_config) {\n            delete_tree_c_store(&t_aux.tree_c_config, tree_c_size)?;\n        } else if cached(&configs[0]) {\n            for config in &configs {\n                // Trees with sub-trees cannot be instantiated and deleted via the existing tree interface since\n                // knowledge of how the base trees are split exists outside of merkle light.  For now, we manually\n                // remove each on disk tree file since we know where they are here.\n                let tree_c_path = StoreConfig::data_path(&config.path, &config.id);\n                remove_file(&tree_c_path)\n                    .with_context(|| format!(\"Failed to delete {:?}\", &tree_c_path))?\n            }\n        }\n        trace!(\"tree c deleted\");\n\n        for i in 0..t_aux.labels.labels.len() {\n            let cur_config = t_aux.labels.labels[i].clone();\n            if cached(&cur_config) {\n                DiskStore::<<Tree::Hasher as Hasher>::Domain>::delete(cur_config)\n                    .with_context(|| format!(\"labels {}\", i))?;\n                trace!(\"layer {} deleted\", i);\n            }\n        }\n\n        Ok(())\n    }\n}\n\n#[derive(Debug)]\npub struct TemporaryAuxCache<Tree: MerkleTreeTrait, G: Hasher> {\n    /// The encoded nodes for 1..layers.\n    pub labels: LabelsCache<Tree>,\n    pub tree_d: BinaryMerkleTree<G>,\n\n    // Notably this is a LevelCacheTree instead of a full merkle.\n    pub tree_r_last: LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n\n    // Store the 'rows_to_discard' value from the tree_r_last\n    // StoreConfig for later use (i.e. proof generation).\n    pub tree_r_last_config_rows_to_discard: usize,\n\n    pub tree_c: DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n    pub t_aux: TemporaryAux<Tree, G>,\n    pub replica_path: PathBuf,\n}\n\nimpl<Tree: MerkleTreeTrait, G: Hasher> TemporaryAuxCache<Tree, G> {\n    pub fn new(t_aux: &TemporaryAux<Tree, G>, replica_path: PathBuf) -> Result<Self> {\n        // tree_d_size stored in the config is the base tree size\n        let tree_d_size = t_aux.tree_d_config.size.expect(\"config size failure\");\n        let tree_d_leafs = get_merkle_tree_leafs(tree_d_size, BINARY_ARITY)?;\n        trace!(\n            \"Instantiating tree d with size {} and leafs {}\",\n            tree_d_size,\n            tree_d_leafs,\n        );\n        let tree_d_store: DiskStore<G::Domain> =\n            DiskStore::new_from_disk(tree_d_size, BINARY_ARITY, &t_aux.tree_d_config)\n                .context(\"tree_d_store\")?;\n        let tree_d =\n            BinaryMerkleTree::<G>::from_data_store(tree_d_store, tree_d_leafs).context(\"tree_d\")?;\n\n        let tree_count = get_base_tree_count::<Tree>();\n        let configs = split_config(t_aux.tree_c_config.clone(), tree_count)?;\n\n        // tree_c_size stored in the config is the base tree size\n        let tree_c_size = t_aux.tree_c_config.size.expect(\"config size failure\");\n        trace!(\n            \"Instantiating tree c [count {}] with size {} and arity {}\",\n            tree_count,\n            tree_c_size,\n            Tree::Arity::to_usize(),\n        );\n        let tree_c = create_disk_tree::<\n            DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n        >(tree_c_size, &configs)?;\n\n        // tree_r_last_size stored in the config is the base tree size\n        let tree_r_last_size = t_aux.tree_r_last_config.size.expect(\"config size failure\");\n        let tree_r_last_config_rows_to_discard = t_aux.tree_r_last_config.rows_to_discard;\n        let (configs, replica_config) = split_config_and_replica(\n            t_aux.tree_r_last_config.clone(),\n            replica_path.clone(),\n            get_merkle_tree_leafs(tree_r_last_size, Tree::Arity::to_usize())?,\n            tree_count,\n        )?;\n\n        trace!(\n            \"Instantiating tree r last [count {}] with size {} and arity {}, {}, {}\",\n            tree_count,\n            tree_r_last_size,\n            Tree::Arity::to_usize(),\n            Tree::SubTreeArity::to_usize(),\n            Tree::TopTreeArity::to_usize(),\n        );\n        let tree_r_last = create_lc_tree::<\n            LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n        >(tree_r_last_size, &configs, &replica_config)?;\n\n        Ok(TemporaryAuxCache {\n            labels: LabelsCache::new(&t_aux.labels).context(\"labels_cache\")?,\n            tree_d,\n            tree_r_last,\n            tree_r_last_config_rows_to_discard,\n            tree_c,\n            replica_path,\n            t_aux: t_aux.clone(),\n        })\n    }\n\n    pub fn labels_for_layer(&self, layer: usize) -> &DiskStore<<Tree::Hasher as Hasher>::Domain> {\n        self.labels.labels_for_layer(layer)\n    }\n\n    pub fn domain_node_at_layer(\n        &self,\n        layer: usize,\n        node_index: u32,\n    ) -> Result<<Tree::Hasher as Hasher>::Domain> {\n        self.labels_for_layer(layer).read_at(node_index as usize)\n    }\n\n    pub fn column(&self, column_index: u32) -> Result<Column<Tree::Hasher>> {\n        self.labels.column(column_index)\n    }\n}\n\ntype VerifyCallback = fn(&StoreConfig, usize, usize) -> Result<()>;\n\n#[derive(Debug, Serialize, Deserialize)]\npub struct Labels<Tree: MerkleTreeTrait> {\n    #[serde(bound(\n        serialize = \"StoreConfig: Serialize\",\n        deserialize = \"StoreConfig: Deserialize<'de>\"\n    ))]\n    pub labels: Vec<StoreConfig>,\n    pub _h: PhantomData<Tree>,\n}\n\nimpl<Tree: MerkleTreeTrait> Clone for Labels<Tree> {\n    fn clone(&self) -> Self {\n        Self {\n            labels: self.labels.clone(),\n            _h: Default::default(),\n        }\n    }\n}\n\nimpl<Tree: MerkleTreeTrait> Labels<Tree> {\n    pub fn new(labels: Vec<StoreConfig>) -> Self {\n        Labels {\n            labels,\n            _h: PhantomData,\n        }\n    }\n\n    pub fn len(&self) -> usize {\n        self.labels.len()\n    }\n\n    pub fn is_empty(&self) -> bool {\n        self.labels.is_empty()\n    }\n\n    pub fn verify_stores(&self, callback: VerifyCallback, cache_dir: &Path) -> Result<()> {\n        let updated_path_labels = self.labels.clone();\n        let required_configs = get_base_tree_count::<Tree>();\n        for mut label in updated_path_labels {\n            label.path = cache_dir.to_path_buf();\n            callback(&label, BINARY_ARITY, required_configs)?;\n        }\n\n        Ok(())\n    }\n\n    pub fn labels_for_layer(\n        &self,\n        layer: usize,\n    ) -> Result<DiskStore<<Tree::Hasher as Hasher>::Domain>> {\n        assert!(layer != 0, \"Layer cannot be 0\");\n        assert!(\n            layer <= self.layers(),\n            \"Layer {} is not available (only {} layers available)\",\n            layer,\n            self.layers()\n        );\n\n        let row_index = layer - 1;\n        let config = self.labels[row_index].clone();\n        assert!(config.size.is_some());\n\n        DiskStore::new_from_disk(\n            config.size.expect(\"config size failure\"),\n            Tree::Arity::to_usize(),\n            &config,\n        )\n    }\n\n    /// Returns label for the last layer.\n    pub fn labels_for_last_layer(&self) -> Result<DiskStore<<Tree::Hasher as Hasher>::Domain>> {\n        self.labels_for_layer(self.labels.len() - 1)\n    }\n\n    /// How many layers are available.\n    fn layers(&self) -> usize {\n        self.labels.len()\n    }\n\n    /// Build the column for the given node.\n    pub fn column(&self, node: u32) -> Result<Column<Tree::Hasher>> {\n        let rows = self\n            .labels\n            .iter()\n            .map(|label| {\n                assert!(label.size.is_some());\n                let store = DiskStore::new_from_disk(\n                    label.size.expect(\"label size failure\"),\n                    Tree::Arity::to_usize(),\n                    &label,\n                )?;\n                store.read_at(node as usize)\n            })\n            .collect::<Result<_>>()?;\n\n        Column::new(node, rows)\n    }\n\n    /// Update all configs to the new passed in root cache path.\n    pub fn update_root<P: AsRef<Path>>(&mut self, root: P) {\n        for config in &mut self.labels {\n            config.path = root.as_ref().into();\n        }\n    }\n}\n\n#[derive(Debug)]\npub struct LabelsCache<Tree: MerkleTreeTrait> {\n    pub labels: Vec<DiskStore<<Tree::Hasher as Hasher>::Domain>>,\n}\n\nimpl<Tree: MerkleTreeTrait> LabelsCache<Tree> {\n    pub fn new(labels: &Labels<Tree>) -> Result<Self> {\n        let mut disk_store_labels: Vec<DiskStore<<Tree::Hasher as Hasher>::Domain>> =\n            Vec::with_capacity(labels.len());\n        for i in 0..labels.len() {\n            disk_store_labels.push(labels.labels_for_layer(i + 1)?);\n        }\n\n        Ok(LabelsCache {\n            labels: disk_store_labels,\n        })\n    }\n\n    pub fn len(&self) -> usize {\n        self.labels.len()\n    }\n\n    pub fn is_empty(&self) -> bool {\n        self.labels.is_empty()\n    }\n\n    pub fn labels_for_layer(&self, layer: usize) -> &DiskStore<<Tree::Hasher as Hasher>::Domain> {\n        assert!(layer != 0, \"Layer cannot be 0\");\n        assert!(\n            layer <= self.layers(),\n            \"Layer {} is not available (only {} layers available)\",\n            layer,\n            self.layers()\n        );\n\n        let row_index = layer - 1;\n        &self.labels[row_index]\n    }\n\n    /// Returns the labels on the last layer.\n    pub fn labels_for_last_layer(&self) -> Result<&DiskStore<<Tree::Hasher as Hasher>::Domain>> {\n        Ok(&self.labels[self.labels.len() - 1])\n    }\n\n    /// How many layers are available.\n    fn layers(&self) -> usize {\n        self.labels.len()\n    }\n\n    /// Build the column for the given node.\n    pub fn column(&self, node: u32) -> Result<Column<Tree::Hasher>> {\n        let rows = self\n            .labels\n            .iter()\n            .map(|labels| labels.read_at(node as usize))\n            .collect::<Result<_>>()?;\n\n        Column::new(node, rows)\n    }\n}\n\npub fn get_node<H: Hasher>(data: &[u8], index: usize) -> Result<H::Domain> {\n    H::Domain::try_from_bytes(data_at_node(data, index).expect(\"invalid node math\"))\n}\n\n/// Generate the replica id as expected for Stacked DRG.\npub fn generate_replica_id<H: Hasher, T: AsRef<[u8]>>(\n    prover_id: &[u8; 32],\n    sector_id: u64,\n    ticket: &[u8; 32],\n    comm_d: T,\n    porep_seed: &[u8; 32],\n) -> H::Domain {\n    let hash = Sha256::new()\n        .chain(prover_id)\n        .chain(&sector_id.to_be_bytes()[..])\n        .chain(ticket)\n        .chain(AsRef::<[u8]>::as_ref(&comm_d))\n        .chain(porep_seed)\n        .finalize();\n\n    bytes_into_fr_repr_safe(hash.as_ref()).into()\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/porep.rs",
    "content": "use std::path::PathBuf;\n\nuse filecoin_hashers::Hasher;\nuse merkletree::store::StoreConfig;\nuse storage_proofs_core::{\n    error::Result,\n    merkle::{BinaryMerkleTree, MerkleTreeTrait},\n    Data,\n};\n\nuse crate::{\n    stacked::vanilla::{\n        params::{PersistentAux, PublicParams, Tau, TemporaryAux},\n        proof::StackedDrg,\n    },\n    PoRep,\n};\n\nimpl<'a, 'c, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> PoRep<'a, Tree::Hasher, G>\n    for StackedDrg<'a, Tree, G>\n{\n    type Tau = Tau<<Tree::Hasher as Hasher>::Domain, <G as Hasher>::Domain>;\n    type ProverAux = (\n        PersistentAux<<Tree::Hasher as Hasher>::Domain>,\n        TemporaryAux<Tree, G>,\n    );\n\n    fn replicate(\n        pp: &'a PublicParams<Tree>,\n        replica_id: &<Tree::Hasher as Hasher>::Domain,\n        data: Data<'a>,\n        data_tree: Option<BinaryMerkleTree<G>>,\n        config: StoreConfig,\n        replica_path: PathBuf,\n    ) -> Result<(Self::Tau, Self::ProverAux)> {\n        let (tau, p_aux, t_aux) = Self::transform_and_replicate_layers(\n            &pp.graph,\n            &pp.layer_challenges,\n            replica_id,\n            data,\n            data_tree,\n            config,\n            replica_path,\n        )?;\n\n        Ok((tau, (p_aux, t_aux)))\n    }\n\n    fn extract_all<'b>(\n        pp: &'b PublicParams<Tree>,\n        replica_id: &'b <Tree::Hasher as Hasher>::Domain,\n        data: &'b mut [u8],\n        config: Option<StoreConfig>,\n    ) -> Result<()> {\n        Self::extract_and_invert_transform_layers(\n            &pp.graph,\n            &pp.layer_challenges,\n            replica_id,\n            data,\n            config.expect(\"Missing store config\"),\n        )?;\n\n        Ok(())\n    }\n\n    fn extract(\n        _pp: &PublicParams<Tree>,\n        _replica_id: &<Tree::Hasher as Hasher>::Domain,\n        _data: &mut [u8],\n        _node: usize,\n        _config: Option<StoreConfig>,\n    ) -> Result<()> {\n        unimplemented!();\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/proof.rs",
    "content": "use std::fs;\nuse std::marker::PhantomData;\nuse std::panic::panic_any;\nuse std::path::{Path, PathBuf};\nuse std::sync::Mutex;\n\nuse anyhow::Context;\nuse bincode::deserialize;\nuse fdlimit::raise_fd_limit;\nuse filecoin_hashers::{Domain, HashFunction, Hasher, PoseidonArity};\nuse generic_array::typenum::{Unsigned, U0, U11, U2, U8};\nuse lazy_static::lazy_static;\nuse log::{error, info, trace};\nuse merkletree::{\n    merkle::{get_merkle_tree_len, is_merkle_tree_size_valid},\n    store::{Store, StoreConfig},\n};\nuse rayon::prelude::{\n    IndexedParallelIterator, IntoParallelIterator, ParallelIterator, ParallelSliceMut,\n};\nuse storage_proofs_core::{\n    cache_key::CacheKey,\n    data::Data,\n    drgraph::Graph,\n    error::Result,\n    measurements::{measure_op, Operation},\n    merkle::{\n        create_disk_tree, create_lc_tree, get_base_tree_count, split_config,\n        split_config_and_replica, BinaryMerkleTree, DiskTree, LCTree, MerkleProofTrait, MerkleTree,\n        MerkleTreeTrait,\n    },\n    settings::SETTINGS,\n    util::{default_rows_to_discard, NODE_SIZE},\n};\n\nuse crate::{\n    encode::{decode, encode},\n    stacked::vanilla::{\n        challenges::LayerChallenges,\n        column::Column,\n        create_label,\n        graph::StackedBucketGraph,\n        hash::hash_single_column,\n        params::{\n            get_node, Labels, LabelsCache, PersistentAux, Proof, PublicInputs, PublicParams,\n            ReplicaColumnProof, Tau, TemporaryAux, TemporaryAuxCache, TransformedLayers,\n            BINARY_ARITY,\n        },\n        EncodingProof, LabelingProof,\n    },\n    PoRep,\n};\n\npub const TOTAL_PARENTS: usize = 37;\n\nlazy_static! {\n    /// Ensure that only one `TreeBuilder` or `ColumnTreeBuilder` uses the GPU at a time.\n    /// Curently, this is accomplished by only instantiating at most one at a time.\n    /// It might be possible to relax this constraint, but in that case, only one builder\n    /// should actually be active at any given time, so the mutex should still be used.\n    static ref GPU_LOCK: Mutex<()> = Mutex::new(());\n}\n\n#[derive(Debug)]\npub struct StackedDrg<'a, Tree: MerkleTreeTrait, G: Hasher> {\n    _a: PhantomData<&'a Tree>,\n    _b: PhantomData<&'a G>,\n}\n\n#[derive(Debug)]\npub struct LayerState {\n    pub config: StoreConfig,\n    pub generated: bool,\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> StackedDrg<'a, Tree, G> {\n    #[allow(clippy::too_many_arguments)]\n    pub(crate) fn prove_layers(\n        graph: &StackedBucketGraph<Tree::Hasher>,\n        pub_inputs: &PublicInputs<<Tree::Hasher as Hasher>::Domain, <G as Hasher>::Domain>,\n        p_aux: &PersistentAux<<Tree::Hasher as Hasher>::Domain>,\n        t_aux: &TemporaryAuxCache<Tree, G>,\n        layer_challenges: &LayerChallenges,\n        layers: usize,\n        partition_count: usize,\n    ) -> Result<Vec<Vec<Proof<Tree, G>>>> {\n        assert!(layers > 0);\n        assert_eq!(t_aux.labels.len(), layers);\n\n        let graph_size = graph.size();\n\n        // Sanity checks on restored trees.\n        assert!(pub_inputs.tau.is_some());\n        assert_eq!(\n            pub_inputs.tau.as_ref().expect(\"as_ref failure\").comm_d,\n            t_aux.tree_d.root()\n        );\n\n        let get_drg_parents_columns = |x: usize| -> Result<Vec<Column<Tree::Hasher>>> {\n            let base_degree = graph.base_graph().degree();\n\n            let mut columns = Vec::with_capacity(base_degree);\n\n            let mut parents = vec![0; base_degree];\n            graph.base_parents(x, &mut parents)?;\n\n            columns.extend(\n                parents\n                    .into_par_iter()\n                    .map(|parent| t_aux.column(parent))\n                    .collect::<Result<Vec<Column<Tree::Hasher>>>>()?,\n            );\n\n            debug_assert!(columns.len() == base_degree);\n\n            Ok(columns)\n        };\n\n        let get_exp_parents_columns = |x: usize| -> Result<Vec<Column<Tree::Hasher>>> {\n            let mut parents = vec![0; graph.expansion_degree()];\n            graph.expanded_parents(x, &mut parents)?;\n\n            parents\n                .into_par_iter()\n                .map(|parent| t_aux.column(parent))\n                .collect()\n        };\n\n        (0..partition_count)\n            .map(|k| {\n                trace!(\"proving partition {}/{}\", k + 1, partition_count);\n\n                // Derive the set of challenges we are proving over.\n                let challenges = pub_inputs.challenges(layer_challenges, graph_size, Some(k));\n\n                // Stacked commitment specifics\n                challenges\n                    .into_par_iter()\n                    .enumerate()\n                    .map(|(challenge_index, challenge)| {\n                        trace!(\" challenge {} ({})\", challenge, challenge_index);\n                        assert!(challenge < graph.size(), \"Invalid challenge\");\n                        assert!(challenge > 0, \"Invalid challenge\");\n\n                        // Initial data layer openings (c_X in Comm_D)\n                        let comm_d_proof = t_aux.tree_d.gen_proof(challenge)?;\n                        assert!(comm_d_proof.validate(challenge));\n\n                        // Stacked replica column openings\n                        let rcp = {\n                            let (c_x, drg_parents, exp_parents) = {\n                                assert_eq!(p_aux.comm_c, t_aux.tree_c.root());\n                                let tree_c = &t_aux.tree_c;\n\n                                // All labels in C_X\n                                trace!(\"  c_x\");\n                                let c_x = t_aux.column(challenge as u32)?.into_proof(tree_c)?;\n\n                                // All labels in the DRG parents.\n                                trace!(\"  drg_parents\");\n                                let drg_parents = get_drg_parents_columns(challenge)?\n                                    .into_iter()\n                                    .map(|column| column.into_proof(tree_c))\n                                    .collect::<Result<_>>()?;\n\n                                // Labels for the expander parents\n                                trace!(\"  exp_parents\");\n                                let exp_parents = get_exp_parents_columns(challenge)?\n                                    .into_iter()\n                                    .map(|column| column.into_proof(tree_c))\n                                    .collect::<Result<_>>()?;\n\n                                (c_x, drg_parents, exp_parents)\n                            };\n\n                            ReplicaColumnProof {\n                                c_x,\n                                drg_parents,\n                                exp_parents,\n                            }\n                        };\n\n                        // Final replica layer openings\n                        trace!(\"final replica layer openings\");\n                        let comm_r_last_proof = t_aux.tree_r_last.gen_cached_proof(\n                            challenge,\n                            Some(t_aux.tree_r_last_config_rows_to_discard),\n                        )?;\n\n                        debug_assert!(comm_r_last_proof.validate(challenge));\n\n                        // Labeling Proofs Layer 1..l\n                        let mut labeling_proofs = Vec::with_capacity(layers);\n                        let mut encoding_proof = None;\n\n                        for layer in 1..=layers {\n                            trace!(\"  encoding proof layer {}\", layer,);\n                            let parents_data: Vec<<Tree::Hasher as Hasher>::Domain> = if layer == 1\n                            {\n                                let mut parents = vec![0; graph.base_graph().degree()];\n                                graph.base_parents(challenge, &mut parents)?;\n\n                                parents\n                                    .into_par_iter()\n                                    .map(|parent| t_aux.domain_node_at_layer(layer, parent))\n                                    .collect::<Result<_>>()?\n                            } else {\n                                let mut parents = vec![0; graph.degree()];\n                                graph.parents(challenge, &mut parents)?;\n                                let base_parents_count = graph.base_graph().degree();\n\n                                parents\n                                    .into_par_iter()\n                                    .enumerate()\n                                    .map(|(i, parent)| {\n                                        if i < base_parents_count {\n                                            // parents data for base parents is from the current layer\n                                            t_aux.domain_node_at_layer(layer, parent)\n                                        } else {\n                                            // parents data for exp parents is from the previous layer\n                                            t_aux.domain_node_at_layer(layer - 1, parent)\n                                        }\n                                    })\n                                    .collect::<Result<_>>()?\n                            };\n\n                            // repeat parents\n                            let mut parents_data_full = vec![Default::default(); TOTAL_PARENTS];\n                            for chunk in parents_data_full.chunks_mut(parents_data.len()) {\n                                chunk.copy_from_slice(&parents_data[..chunk.len()]);\n                            }\n\n                            let proof = LabelingProof::<Tree::Hasher>::new(\n                                layer as u32,\n                                challenge as u64,\n                                parents_data_full.clone(),\n                            );\n\n                            {\n                                let labeled_node = rcp.c_x.get_node_at_layer(layer)?;\n                                assert!(\n                                    proof.verify(&pub_inputs.replica_id, &labeled_node),\n                                    \"Invalid encoding proof generated at layer {}\",\n                                    layer,\n                                );\n                                trace!(\"Valid encoding proof generated at layer {}\", layer);\n                            }\n\n                            labeling_proofs.push(proof);\n\n                            if layer == layers {\n                                encoding_proof = Some(EncodingProof::new(\n                                    layer as u32,\n                                    challenge as u64,\n                                    parents_data_full,\n                                ));\n                            }\n                        }\n\n                        Ok(Proof {\n                            comm_d_proofs: comm_d_proof,\n                            replica_column_proofs: rcp,\n                            comm_r_last_proof,\n                            labeling_proofs,\n                            encoding_proof: encoding_proof.expect(\"invalid tapering\"),\n                        })\n                    })\n                    .collect()\n            })\n            .collect()\n    }\n\n    pub(crate) fn extract_and_invert_transform_layers(\n        graph: &StackedBucketGraph<Tree::Hasher>,\n        layer_challenges: &LayerChallenges,\n        replica_id: &<Tree::Hasher as Hasher>::Domain,\n        data: &mut [u8],\n        config: StoreConfig,\n    ) -> Result<()> {\n        trace!(\"extract_and_invert_transform_layers\");\n\n        let layers = layer_challenges.layers();\n        assert!(layers > 0);\n\n        let labels =\n            Self::generate_labels_for_decoding(graph, layer_challenges, replica_id, config)?;\n\n        let last_layer_labels = labels.labels_for_last_layer()?;\n        let size = Store::len(last_layer_labels);\n\n        for (key, encoded_node_bytes) in last_layer_labels\n            .read_range(0..size)?\n            .into_iter()\n            .zip(data.chunks_mut(NODE_SIZE))\n        {\n            let encoded_node =\n                <Tree::Hasher as Hasher>::Domain::try_from_bytes(encoded_node_bytes)?;\n            let data_node = decode::<<Tree::Hasher as Hasher>::Domain>(key, encoded_node);\n\n            // store result in the data\n            encoded_node_bytes.copy_from_slice(AsRef::<[u8]>::as_ref(&data_node));\n        }\n\n        Ok(())\n    }\n\n    /// Generates the layers as needed for encoding.\n    pub fn generate_labels_for_encoding(\n        graph: &StackedBucketGraph<Tree::Hasher>,\n        layer_challenges: &LayerChallenges,\n        replica_id: &<Tree::Hasher as Hasher>::Domain,\n        config: StoreConfig,\n    ) -> Result<(Labels<Tree>, Vec<LayerState>)> {\n        let mut parent_cache = graph.parent_cache()?;\n\n        if SETTINGS.use_multicore_sdr {\n            info!(\"multi core replication\");\n            create_label::multi::create_labels_for_encoding(\n                graph,\n                &parent_cache,\n                layer_challenges.layers(),\n                replica_id,\n                config,\n            )\n        } else {\n            info!(\"single core replication\");\n            create_label::single::create_labels_for_encoding(\n                graph,\n                &mut parent_cache,\n                layer_challenges.layers(),\n                replica_id,\n                config,\n            )\n        }\n    }\n\n    /// Generates the layers, as needed for decoding.\n    pub fn generate_labels_for_decoding(\n        graph: &StackedBucketGraph<Tree::Hasher>,\n        layer_challenges: &LayerChallenges,\n        replica_id: &<Tree::Hasher as Hasher>::Domain,\n        config: StoreConfig,\n    ) -> Result<LabelsCache<Tree>> {\n        let mut parent_cache = graph.parent_cache()?;\n\n        if SETTINGS.use_multicore_sdr {\n            info!(\"multi core replication\");\n            create_label::multi::create_labels_for_decoding(\n                graph,\n                &parent_cache,\n                layer_challenges.layers(),\n                replica_id,\n                config,\n            )\n        } else {\n            info!(\"single core replication\");\n            create_label::single::create_labels_for_decoding(\n                graph,\n                &mut parent_cache,\n                layer_challenges.layers(),\n                replica_id,\n                config,\n            )\n        }\n    }\n\n    fn build_binary_tree<K: Hasher>(\n        tree_data: &[u8],\n        config: StoreConfig,\n    ) -> Result<BinaryMerkleTree<K>> {\n        trace!(\"building tree (size: {})\", tree_data.len());\n\n        let leafs = tree_data.len() / NODE_SIZE;\n        assert_eq!(tree_data.len() % NODE_SIZE, 0);\n\n        let tree = MerkleTree::from_par_iter_with_config(\n            (0..leafs)\n                .into_par_iter()\n                // TODO: proper error handling instead of `unwrap()`\n                .map(|i| get_node::<K>(tree_data, i).expect(\"get_node failure\")),\n            config,\n        )?;\n        Ok(tree)\n    }\n\n    #[cfg(any(feature = \"gpu\"))]\n    fn generate_tree_c<ColumnArity, TreeArity>(\n        layers: usize,\n        nodes_count: usize,\n        tree_count: usize,\n        configs: Vec<StoreConfig>,\n        labels: &LabelsCache<Tree>,\n    ) -> Result<DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>\n    where\n        ColumnArity: 'static + PoseidonArity,\n        TreeArity: PoseidonArity,\n    {\n        if SETTINGS.use_gpu_column_builder {\n            Self::generate_tree_c_gpu::<ColumnArity, TreeArity>(\n                layers,\n                nodes_count,\n                tree_count,\n                configs,\n                labels,\n            )\n        } else {\n            Self::generate_tree_c_cpu::<ColumnArity, TreeArity>(\n                layers,\n                nodes_count,\n                tree_count,\n                configs,\n                labels,\n            )\n        }\n    }\n\n    #[cfg(not(any(feature = \"gpu\")))]\n    fn generate_tree_c<ColumnArity, TreeArity>(\n        layers: usize,\n        nodes_count: usize,\n        tree_count: usize,\n        configs: Vec<StoreConfig>,\n        labels: &LabelsCache<Tree>,\n    ) -> Result<DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>\n    where\n        ColumnArity: 'static + PoseidonArity,\n        TreeArity: PoseidonArity,\n    {\n        Self::generate_tree_c_cpu::<ColumnArity, TreeArity>(\n            layers,\n            nodes_count,\n            tree_count,\n            configs,\n            labels,\n        )\n    }\n\n    #[allow(clippy::needless_range_loop)]\n    #[cfg(any(feature = \"gpu\"))]\n    fn generate_tree_c_gpu<ColumnArity, TreeArity>(\n        layers: usize,\n        nodes_count: usize,\n        tree_count: usize,\n        configs: Vec<StoreConfig>,\n        labels: &LabelsCache<Tree>,\n    ) -> Result<DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>\n    where\n        ColumnArity: 'static + PoseidonArity,\n        TreeArity: PoseidonArity,\n    {\n        use std::cmp::min;\n        use std::sync::{mpsc::sync_channel, Arc, RwLock};\n\n        use bellperson::bls::Fr;\n        use fr32::fr_into_bytes;\n        use generic_array::GenericArray;\n        use merkletree::store::DiskStore;\n        use neptune::{\n            batch_hasher::BatcherType,\n            column_tree_builder::{ColumnTreeBuilder, ColumnTreeBuilderTrait},\n        };\n\n        info!(\"generating tree c using the GPU\");\n        // Build the tree for CommC\n        measure_op(Operation::GenerateTreeC, || {\n            info!(\"Building column hashes\");\n\n            // NOTE: The max number of columns we recommend sending to the GPU at once is\n            // 400000 for columns and 700000 for trees (conservative soft-limits discussed).\n            //\n            // 'column_write_batch_size' is how many nodes to chunk the base layer of data\n            // into when persisting to disk.\n            //\n            // Override these values with care using environment variables:\n            // FIL_PROOFS_MAX_GPU_COLUMN_BATCH_SIZE, FIL_PROOFS_MAX_GPU_TREE_BATCH_SIZE, and\n            // FIL_PROOFS_COLUMN_WRITE_BATCH_SIZE respectively.\n            let max_gpu_column_batch_size = SETTINGS.max_gpu_column_batch_size as usize;\n            let max_gpu_tree_batch_size = SETTINGS.max_gpu_tree_batch_size as usize;\n            let column_write_batch_size = SETTINGS.column_write_batch_size as usize;\n\n            // This channel will receive batches of columns and add them to the ColumnTreeBuilder.\n            let (builder_tx, builder_rx) = sync_channel(0);\n\n            let config_count = configs.len(); // Don't move config into closure below.\n            rayon::scope(|s| {\n                // This channel will receive the finished tree data to be written to disk.\n                let (writer_tx, writer_rx) = sync_channel::<(Vec<Fr>, Vec<Fr>)>(0);\n\n                s.spawn(move |_| {\n                    for i in 0..config_count {\n                        let mut node_index = 0;\n                        let builder_tx = builder_tx.clone();\n                        while node_index != nodes_count {\n                            let chunked_nodes_count =\n                                min(nodes_count - node_index, max_gpu_column_batch_size);\n                            trace!(\n                                \"processing config {}/{} with column nodes {}\",\n                                i + 1,\n                                tree_count,\n                                chunked_nodes_count,\n                            );\n\n                            let columns: Vec<GenericArray<Fr, ColumnArity>> = {\n                                use fr32::bytes_into_fr;\n\n                                // Allocate layer data array and insert a placeholder for each layer.\n                                let mut layer_data: Vec<Vec<u8>> =\n                                    vec![\n                                        vec![0u8; chunked_nodes_count * std::mem::size_of::<Fr>()];\n                                        layers\n                                    ];\n\n                                // gather all layer data.\n                                for (layer_index, mut layer_bytes) in\n                                    layer_data.iter_mut().enumerate()\n                                {\n                                    let store = labels.labels_for_layer(layer_index + 1);\n                                    let start = (i * nodes_count) + node_index;\n                                    let end = start + chunked_nodes_count;\n\n                                    store\n                                        .read_range_into(start, end, &mut layer_bytes)\n                                        .expect(\"failed to read store range\");\n                                }\n\n                                (0..chunked_nodes_count)\n                                    .into_par_iter()\n                                    .map(|index| {\n                                        (0..layers)\n                                            .map(|layer_index| {\n                                                bytes_into_fr(\n                                                &layer_data[layer_index][std::mem::size_of::<Fr>()\n                                                    * index\n                                                    ..std::mem::size_of::<Fr>() * (index + 1)],\n                                            )\n                                            .expect(\"Could not create Fr from bytes.\")\n                                            })\n                                            .collect::<GenericArray<Fr, ColumnArity>>()\n                                    })\n                                    .collect()\n                            };\n\n                            node_index += chunked_nodes_count;\n                            trace!(\n                                \"node index {}/{}/{}\",\n                                node_index,\n                                chunked_nodes_count,\n                                nodes_count,\n                            );\n\n                            let is_final = node_index == nodes_count;\n                            builder_tx\n                                .send((columns, is_final))\n                                .expect(\"failed to send columns\");\n                        }\n                    }\n                });\n                s.spawn(move |_| {\n                    let _gpu_lock = GPU_LOCK.lock().expect(\"failed to get gpu lock\");\n                    let mut column_tree_builder = ColumnTreeBuilder::<ColumnArity, TreeArity>::new(\n                        Some(BatcherType::OpenCL),\n                        nodes_count,\n                        max_gpu_column_batch_size,\n                        max_gpu_tree_batch_size,\n                    )\n                    .expect(\"failed to create ColumnTreeBuilder\");\n\n                    // Loop until all trees for all configs have been built.\n                    for i in 0..config_count {\n                        loop {\n                            let (columns, is_final): (Vec<GenericArray<Fr, ColumnArity>>, bool) =\n                                builder_rx.recv().expect(\"failed to recv columns\");\n\n                            // Just add non-final column batches.\n                            if !is_final {\n                                column_tree_builder\n                                    .add_columns(&columns)\n                                    .expect(\"failed to add columns\");\n                                continue;\n                            };\n\n                            // If we get here, this is a final column: build a sub-tree.\n                            let (base_data, tree_data) = column_tree_builder\n                                .add_final_columns(&columns)\n                                .expect(\"failed to add final columns\");\n                            trace!(\n                                \"base data len {}, tree data len {}\",\n                                base_data.len(),\n                                tree_data.len()\n                            );\n\n                            let tree_len = base_data.len() + tree_data.len();\n                            info!(\n                                \"persisting base tree_c {}/{} of length {}\",\n                                i + 1,\n                                tree_count,\n                                tree_len,\n                            );\n\n                            writer_tx\n                                .send((base_data, tree_data))\n                                .expect(\"failed to send base_data, tree_data\");\n                            break;\n                        }\n                    }\n                });\n\n                for config in &configs {\n                    let (base_data, tree_data) = writer_rx\n                        .recv()\n                        .expect(\"failed to receive base_data, tree_data for tree_c\");\n                    let tree_len = base_data.len() + tree_data.len();\n\n                    assert_eq!(base_data.len(), nodes_count);\n                    assert_eq!(tree_len, config.size.expect(\"config size failure\"));\n\n                    // Persist the base and tree data to disk based using the current store config.\n                    let tree_c_store_path = StoreConfig::data_path(&config.path, &config.id);\n                    let tree_c_store_exists = Path::new(&tree_c_store_path).exists();\n                    trace!(\n                        \"tree_c store path {:?} -- exists? {}\",\n                        tree_c_store_path,\n                        tree_c_store_exists\n                    );\n                    if tree_c_store_exists {\n                        std::fs::remove_file(&tree_c_store_path)\n                            .expect(\"failed to remove tree_c_store_path\");\n                    }\n\n                    let tree_c_store =\n                        DiskStore::<<Tree::Hasher as Hasher>::Domain>::new_with_config(\n                            tree_len,\n                            Tree::Arity::to_usize(),\n                            config.clone(),\n                        )\n                        .expect(\"failed to create DiskStore for base tree data\");\n\n                    let store = Arc::new(RwLock::new(tree_c_store));\n                    let batch_size = min(base_data.len(), column_write_batch_size);\n                    let flatten_and_write_store = |data: &Vec<Fr>, offset| {\n                        data.into_par_iter()\n                            .chunks(batch_size)\n                            .enumerate()\n                            .try_for_each(|(index, fr_elements)| {\n                                let mut buf = Vec::with_capacity(batch_size * NODE_SIZE);\n\n                                for fr in fr_elements {\n                                    buf.extend(fr_into_bytes(&fr));\n                                }\n                                store\n                                    .write()\n                                    .expect(\"failed to access store for write\")\n                                    .copy_from_slice(&buf[..], offset + (batch_size * index))\n                            })\n                    };\n\n                    trace!(\n                        \"flattening tree_c base data of {} nodes using batch size {}\",\n                        base_data.len(),\n                        batch_size\n                    );\n                    flatten_and_write_store(&base_data, 0)\n                        .expect(\"failed to flatten and write store\");\n                    trace!(\"done flattening tree_c base data\");\n\n                    let base_offset = base_data.len();\n                    trace!(\"flattening tree_c tree data of {} nodes using batch size {} and base offset {}\", tree_data.len(), batch_size, base_offset);\n                    flatten_and_write_store(&tree_data, base_offset)\n                        .expect(\"failed to flatten and write store\");\n                    trace!(\"done flattening tree_c tree data\");\n\n                    trace!(\"writing tree_c store data\");\n                    store\n                        .write()\n                        .expect(\"failed to access store for sync\")\n                        .sync()\n                        .expect(\"store sync failure\");\n                    trace!(\"done writing tree_c store data\");\n                }\n            });\n\n            create_disk_tree::<\n                DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n            >(configs[0].size.expect(\"config size failure\"), &configs)\n        })\n    }\n\n    fn generate_tree_c_cpu<ColumnArity, TreeArity>(\n        layers: usize,\n        nodes_count: usize,\n        tree_count: usize,\n        configs: Vec<StoreConfig>,\n        labels: &LabelsCache<Tree>,\n    ) -> Result<DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>\n    where\n        ColumnArity: PoseidonArity,\n        TreeArity: PoseidonArity,\n    {\n        info!(\"generating tree c using the CPU\");\n        measure_op(Operation::GenerateTreeC, || {\n            info!(\"Building column hashes\");\n\n            let mut trees = Vec::with_capacity(tree_count);\n            for (i, config) in configs.iter().enumerate() {\n                let mut hashes: Vec<<Tree::Hasher as Hasher>::Domain> =\n                    vec![<Tree::Hasher as Hasher>::Domain::default(); nodes_count];\n\n                rayon::scope(|s| {\n                    let n = num_cpus::get();\n\n                    // only split if we have at least two elements per thread\n                    let num_chunks = if n > nodes_count * 2 { 1 } else { n };\n\n                    // chunk into n chunks\n                    let chunk_size = (nodes_count as f64 / num_chunks as f64).ceil() as usize;\n\n                    // calculate all n chunks in parallel\n                    for (chunk, hashes_chunk) in hashes.chunks_mut(chunk_size).enumerate() {\n                        let labels = &labels;\n\n                        s.spawn(move |_| {\n                            for (j, hash) in hashes_chunk.iter_mut().enumerate() {\n                                let data: Vec<_> = (1..=layers)\n                                    .map(|layer| {\n                                        let store = labels.labels_for_layer(layer);\n                                        let el: <Tree::Hasher as Hasher>::Domain = store\n                                            .read_at((i * nodes_count) + j + chunk * chunk_size)\n                                            .expect(\"store read_at failure\");\n                                        el.into()\n                                    })\n                                    .collect();\n\n                                *hash = hash_single_column(&data).into();\n                            }\n                        });\n                    }\n                });\n\n                info!(\"building base tree_c {}/{}\", i + 1, tree_count);\n                trees.push(\n                    DiskTree::<Tree::Hasher, Tree::Arity, U0, U0>::from_par_iter_with_config(\n                        hashes.into_par_iter(),\n                        config.clone(),\n                    ),\n                );\n            }\n\n            assert_eq!(tree_count, trees.len());\n\n            create_disk_tree::<\n                DiskTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>,\n            >(configs[0].size.expect(\"config size failure\"), &configs)\n        })\n    }\n\n    #[cfg(any(feature = \"gpu\"))]\n    fn generate_tree_r_last<TreeArity>(\n        data: &mut Data<'_>,\n        nodes_count: usize,\n        tree_count: usize,\n        tree_r_last_config: StoreConfig,\n        replica_path: PathBuf,\n        labels: &LabelsCache<Tree>,\n    ) -> Result<LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>\n    where\n        TreeArity: PoseidonArity,\n    {\n        if SETTINGS.use_gpu_tree_builder {\n            Self::generate_tree_r_last_gpu::<TreeArity>(\n                data,\n                nodes_count,\n                tree_count,\n                tree_r_last_config,\n                replica_path,\n                labels,\n            )\n        } else {\n            Self::generate_tree_r_last_cpu::<TreeArity>(\n                data,\n                nodes_count,\n                tree_count,\n                tree_r_last_config,\n                replica_path,\n                labels,\n            )\n        }\n    }\n\n    #[cfg(not(any(feature = \"gpu\")))]\n    fn generate_tree_r_last<TreeArity>(\n        data: &mut Data<'_>,\n        nodes_count: usize,\n        tree_count: usize,\n        tree_r_last_config: StoreConfig,\n        replica_path: PathBuf,\n        labels: &LabelsCache<Tree>,\n    ) -> Result<LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>\n    where\n        TreeArity: PoseidonArity,\n    {\n        Self::generate_tree_r_last_cpu::<TreeArity>(\n            data,\n            nodes_count,\n            tree_count,\n            tree_r_last_config,\n            replica_path,\n            labels,\n        )\n    }\n\n    #[cfg(any(feature = \"gpu\"))]\n    fn generate_tree_r_last_gpu<TreeArity>(\n        data: &mut Data<'_>,\n        nodes_count: usize,\n        tree_count: usize,\n        tree_r_last_config: StoreConfig,\n        replica_path: PathBuf,\n        labels: &LabelsCache<Tree>,\n    ) -> Result<LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>\n    where\n        TreeArity: PoseidonArity,\n    {\n        use std::cmp::min;\n        use std::fs::OpenOptions;\n        use std::io::Write;\n        use std::sync::mpsc::sync_channel;\n\n        use bellperson::bls::Fr;\n        use fr32::fr_into_bytes;\n        use merkletree::merkle::{get_merkle_tree_cache_size, get_merkle_tree_leafs};\n        use neptune::{\n            batch_hasher::BatcherType,\n            tree_builder::{TreeBuilder, TreeBuilderTrait},\n        };\n\n        let (configs, replica_config) = split_config_and_replica(\n            tree_r_last_config.clone(),\n            replica_path,\n            nodes_count,\n            tree_count,\n        )?;\n\n        data.ensure_data()?;\n        let last_layer_labels = labels.labels_for_last_layer()?;\n\n        info!(\"generating tree r last using the GPU\");\n        let max_gpu_tree_batch_size = SETTINGS.max_gpu_tree_batch_size as usize;\n\n        // This channel will receive batches of leaf nodes and add them to the TreeBuilder.\n        let (builder_tx, builder_rx) = sync_channel::<(Vec<Fr>, bool)>(0);\n        let config_count = configs.len(); // Don't move config into closure below.\n        let configs = &configs;\n        let tree_r_last_config = &tree_r_last_config;\n        rayon::scope(|s| {\n            // This channel will receive the finished tree data to be written to disk.\n            let (writer_tx, writer_rx) = sync_channel::<Vec<Fr>>(0);\n\n            s.spawn(move |_| {\n                for i in 0..config_count {\n                    let mut node_index = 0;\n                    while node_index != nodes_count {\n                        let chunked_nodes_count =\n                            min(nodes_count - node_index, max_gpu_tree_batch_size);\n                        let start = (i * nodes_count) + node_index;\n                        let end = start + chunked_nodes_count;\n                        trace!(\n                            \"processing config {}/{} with leaf nodes {} [{}, {}, {}-{}]\",\n                            i + 1,\n                            tree_count,\n                            chunked_nodes_count,\n                            node_index,\n                            nodes_count,\n                            start,\n                            end,\n                        );\n\n                        let encoded_data = {\n                            use fr32::bytes_into_fr;\n\n                            let mut layer_bytes =\n                                vec![0u8; (end - start) * std::mem::size_of::<Fr>()];\n                            last_layer_labels\n                                .read_range_into(start, end, &mut layer_bytes)\n                                .expect(\"failed to read layer bytes\");\n\n                            layer_bytes\n                                .into_par_iter()\n                                .chunks(std::mem::size_of::<Fr>())\n                                .map(|chunk| {\n                                    bytes_into_fr(&chunk).expect(\"Could not create Fr from bytes.\")\n                                })\n                                .zip(\n                                    data.as_mut()[(start * NODE_SIZE)..(end * NODE_SIZE)]\n                                        .par_chunks_mut(NODE_SIZE),\n                                )\n                                .map(|(key, data_node_bytes)| {\n                                    let data_node =\n                                        <Tree::Hasher as Hasher>::Domain::try_from_bytes(\n                                            data_node_bytes,\n                                        )\n                                        .expect(\"try_from_bytes failed\");\n\n                                    let encoded_node = encode::<<Tree::Hasher as Hasher>::Domain>(\n                                        key.into(),\n                                        data_node,\n                                    );\n                                    data_node_bytes\n                                        .copy_from_slice(AsRef::<[u8]>::as_ref(&encoded_node));\n\n                                    encoded_node\n                                })\n                        };\n\n                        node_index += chunked_nodes_count;\n                        trace!(\n                            \"node index {}/{}/{}\",\n                            node_index,\n                            chunked_nodes_count,\n                            nodes_count,\n                        );\n\n                        let encoded: Vec<_> =\n                            encoded_data.into_par_iter().map(|x| x.into()).collect();\n\n                        let is_final = node_index == nodes_count;\n                        builder_tx\n                            .send((encoded, is_final))\n                            .expect(\"failed to send encoded\");\n                    }\n                }\n            });\n            s.spawn(move |_| {\n                let _gpu_lock = GPU_LOCK.lock().expect(\"failed to get gpu lock\");\n                let mut tree_builder = TreeBuilder::<Tree::Arity>::new(\n                    Some(BatcherType::OpenCL),\n                    nodes_count,\n                    max_gpu_tree_batch_size,\n                    tree_r_last_config.rows_to_discard,\n                )\n                .expect(\"failed to create TreeBuilder\");\n\n                // Loop until all trees for all configs have been built.\n                for i in 0..config_count {\n                    loop {\n                        let (encoded, is_final) =\n                            builder_rx.recv().expect(\"failed to recv encoded data\");\n\n                        // Just add non-final leaf batches.\n                        if !is_final {\n                            tree_builder\n                                .add_leaves(&encoded)\n                                .expect(\"failed to add leaves\");\n                            continue;\n                        };\n\n                        // If we get here, this is a final leaf batch: build a sub-tree.\n                        info!(\n                            \"building base tree_r_last with GPU {}/{}\",\n                            i + 1,\n                            tree_count\n                        );\n                        let (_, tree_data) = tree_builder\n                            .add_final_leaves(&encoded)\n                            .expect(\"failed to add final leaves\");\n\n                        writer_tx.send(tree_data).expect(\"failed to send tree_data\");\n                        break;\n                    }\n                }\n            });\n\n            for config in configs.iter() {\n                let tree_data = writer_rx\n                    .recv()\n                    .expect(\"failed to receive tree_data for tree_r_last\");\n\n                let tree_data_len = tree_data.len();\n                let cache_size = get_merkle_tree_cache_size(\n                    get_merkle_tree_leafs(\n                        config.size.expect(\"config size failure\"),\n                        Tree::Arity::to_usize(),\n                    )\n                    .expect(\"failed to get merkle tree leaves\"),\n                    Tree::Arity::to_usize(),\n                    config.rows_to_discard,\n                )\n                .expect(\"failed to get merkle tree cache size\");\n                assert_eq!(tree_data_len, cache_size);\n\n                let flat_tree_data: Vec<_> = tree_data\n                    .into_par_iter()\n                    .flat_map(|el| fr_into_bytes(&el))\n                    .collect();\n\n                // Persist the data to the store based on the current config.\n                let tree_r_last_path = StoreConfig::data_path(&config.path, &config.id);\n                trace!(\n                    \"persisting tree r of len {} with {} rows to discard at path {:?}\",\n                    tree_data_len,\n                    config.rows_to_discard,\n                    tree_r_last_path\n                );\n                let mut f = OpenOptions::new()\n                    .create(true)\n                    .write(true)\n                    .open(&tree_r_last_path)\n                    .expect(\"failed to open file for tree_r_last\");\n                f.write_all(&flat_tree_data)\n                    .expect(\"failed to wrote tree_r_last data\");\n            }\n        });\n\n        create_lc_tree::<LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>(\n            tree_r_last_config.size.expect(\"config size failure\"),\n            &configs,\n            &replica_config,\n        )\n    }\n\n    fn generate_tree_r_last_cpu<TreeArity>(\n        data: &mut Data<'_>,\n        nodes_count: usize,\n        tree_count: usize,\n        tree_r_last_config: StoreConfig,\n        replica_path: PathBuf,\n        labels: &LabelsCache<Tree>,\n    ) -> Result<LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>\n    where\n        TreeArity: PoseidonArity,\n    {\n        let (configs, replica_config) = split_config_and_replica(\n            tree_r_last_config.clone(),\n            replica_path,\n            nodes_count,\n            tree_count,\n        )?;\n\n        data.ensure_data()?;\n        let last_layer_labels = labels.labels_for_last_layer()?;\n\n        info!(\"generating tree r last using the CPU\");\n        let size = Store::len(last_layer_labels);\n\n        let mut start = 0;\n        let mut end = size / tree_count;\n\n        for (i, config) in configs.iter().enumerate() {\n            let encoded_data = last_layer_labels\n                .read_range(start..end)?\n                .into_par_iter()\n                .zip(\n                    data.as_mut()[(start * NODE_SIZE)..(end * NODE_SIZE)].par_chunks_mut(NODE_SIZE),\n                )\n                .map(|(key, data_node_bytes)| {\n                    let data_node =\n                        <Tree::Hasher as Hasher>::Domain::try_from_bytes(data_node_bytes)\n                            .expect(\"try from bytes failed\");\n                    let encoded_node = encode::<<Tree::Hasher as Hasher>::Domain>(key, data_node);\n                    data_node_bytes.copy_from_slice(AsRef::<[u8]>::as_ref(&encoded_node));\n\n                    encoded_node\n                });\n\n            info!(\n                \"building base tree_r_last with CPU {}/{}\",\n                i + 1,\n                tree_count\n            );\n\n            // Remove the tree_r_last store if it exists already\n            let tree_r_last_store_path = StoreConfig::data_path(&config.path, &config.id);\n            let tree_r_last_store_exists = Path::new(&tree_r_last_store_path).exists();\n            trace!(\n                \"tree_r_last store path {:?} -- exists? {}\",\n                tree_r_last_store_path,\n                tree_r_last_store_exists\n            );\n            if tree_r_last_store_exists {\n                std::fs::remove_file(&tree_r_last_store_path)\n                    .expect(\"failed to remove tree_r_last_store_path\");\n            }\n\n            LCTree::<Tree::Hasher, Tree::Arity, U0, U0>::from_par_iter_with_config(\n                encoded_data,\n                config.clone(),\n            )\n            .with_context(|| format!(\"failed tree_r_last CPU {}/{}\", i + 1, tree_count))?;\n\n            start = end;\n            end += size / tree_count;\n        }\n\n        create_lc_tree::<LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>(\n            tree_r_last_config.size.expect(\"config size failure\"),\n            &configs,\n            &replica_config,\n        )\n    }\n\n    pub(crate) fn transform_and_replicate_layers(\n        graph: &StackedBucketGraph<Tree::Hasher>,\n        layer_challenges: &LayerChallenges,\n        replica_id: &<Tree::Hasher as Hasher>::Domain,\n        data: Data<'_>,\n        data_tree: Option<BinaryMerkleTree<G>>,\n        config: StoreConfig,\n        replica_path: PathBuf,\n    ) -> Result<TransformedLayers<Tree, G>> {\n        // Generate key layers.\n        let labels = measure_op(Operation::EncodeWindowTimeAll, || {\n            Self::generate_labels_for_encoding(graph, layer_challenges, replica_id, config.clone())\n                .context(\"failed to generate labels\")\n        })?\n        .0;\n\n        Self::transform_and_replicate_layers_inner(\n            graph,\n            layer_challenges,\n            data,\n            data_tree,\n            config,\n            replica_path,\n            labels,\n        )\n        .context(\"failed to transform\")\n    }\n\n    pub(crate) fn transform_and_replicate_layers_inner(\n        graph: &StackedBucketGraph<Tree::Hasher>,\n        layer_challenges: &LayerChallenges,\n        mut data: Data<'_>,\n        data_tree: Option<BinaryMerkleTree<G>>,\n        config: StoreConfig,\n        replica_path: PathBuf,\n        label_configs: Labels<Tree>,\n    ) -> Result<TransformedLayers<Tree, G>> {\n        trace!(\"transform_and_replicate_layers\");\n        let nodes_count = graph.size();\n\n        assert_eq!(data.len(), nodes_count * NODE_SIZE);\n        trace!(\"nodes count {}, data len {}\", nodes_count, data.len());\n\n        let tree_count = get_base_tree_count::<Tree>();\n        let nodes_count = graph.size() / tree_count;\n\n        // Ensure that the node count will work for binary and oct arities.\n        let binary_arity_valid = is_merkle_tree_size_valid(nodes_count, BINARY_ARITY);\n        let other_arity_valid = is_merkle_tree_size_valid(nodes_count, Tree::Arity::to_usize());\n        trace!(\n            \"is_merkle_tree_size_valid({}, BINARY_ARITY) = {}\",\n            nodes_count,\n            binary_arity_valid\n        );\n        trace!(\n            \"is_merkle_tree_size_valid({}, {}) = {}\",\n            nodes_count,\n            Tree::Arity::to_usize(),\n            other_arity_valid\n        );\n        assert!(binary_arity_valid);\n        assert!(other_arity_valid);\n\n        let layers = layer_challenges.layers();\n        assert!(layers > 0);\n\n        // Generate all store configs that we need based on the\n        // cache_path in the specified config.\n        let mut tree_d_config = StoreConfig::from_config(\n            &config,\n            CacheKey::CommDTree.to_string(),\n            Some(get_merkle_tree_len(nodes_count, BINARY_ARITY)?),\n        );\n        tree_d_config.rows_to_discard = default_rows_to_discard(nodes_count, BINARY_ARITY);\n\n        let mut tree_r_last_config = StoreConfig::from_config(\n            &config,\n            CacheKey::CommRLastTree.to_string(),\n            Some(get_merkle_tree_len(nodes_count, Tree::Arity::to_usize())?),\n        );\n\n        // A default 'rows_to_discard' value will be chosen for tree_r_last, unless the user overrides this value via the\n        // environment setting (FIL_PROOFS_ROWS_TO_DISCARD).  If this value is specified, no checking is done on it and it may\n        // result in a broken configuration.  Use with caution.  It must be noted that if/when this unchecked value is passed\n        // through merkle_light, merkle_light now does a check that does not allow us to discard more rows than is possible\n        // to discard.\n        tree_r_last_config.rows_to_discard =\n            default_rows_to_discard(nodes_count, Tree::Arity::to_usize());\n        trace!(\n            \"tree_r_last using rows_to_discard={}\",\n            tree_r_last_config.rows_to_discard\n        );\n\n        let mut tree_c_config = StoreConfig::from_config(\n            &config,\n            CacheKey::CommCTree.to_string(),\n            Some(get_merkle_tree_len(nodes_count, Tree::Arity::to_usize())?),\n        );\n        tree_c_config.rows_to_discard =\n            default_rows_to_discard(nodes_count, Tree::Arity::to_usize());\n\n        let labels =\n            LabelsCache::<Tree>::new(&label_configs).context(\"failed to create labels cache\")?;\n        let configs = split_config(tree_c_config.clone(), tree_count)?;\n\n        match raise_fd_limit() {\n            Some(res) => {\n                info!(\"Building trees [{} descriptors max available]\", res);\n            }\n            None => error!(\"Failed to raise the fd limit\"),\n        };\n\n        let tree_c_root = match layers {\n            2 => {\n                let tree_c = Self::generate_tree_c::<U2, Tree::Arity>(\n                    layers,\n                    nodes_count,\n                    tree_count,\n                    configs,\n                    &labels,\n                )?;\n                tree_c.root()\n            }\n            8 => {\n                let tree_c = Self::generate_tree_c::<U8, Tree::Arity>(\n                    layers,\n                    nodes_count,\n                    tree_count,\n                    configs,\n                    &labels,\n                )?;\n                tree_c.root()\n            }\n            11 => {\n                let tree_c = Self::generate_tree_c::<U11, Tree::Arity>(\n                    layers,\n                    nodes_count,\n                    tree_count,\n                    configs,\n                    &labels,\n                )?;\n                tree_c.root()\n            }\n            _ => panic_any(\"Unsupported column arity\"),\n        };\n        info!(\"tree_c done\");\n\n        // Build the MerkleTree over the original data (if needed).\n        let tree_d = match data_tree {\n            Some(t) => {\n                trace!(\"using existing original data merkle tree\");\n                assert_eq!(t.len(), 2 * (data.len() / NODE_SIZE) - 1);\n\n                t\n            }\n            None => {\n                trace!(\"building merkle tree for the original data\");\n                data.ensure_data()?;\n                measure_op(Operation::CommD, || {\n                    Self::build_binary_tree::<G>(data.as_ref(), tree_d_config.clone())\n                })?\n            }\n        };\n        tree_d_config.size = Some(tree_d.len());\n        assert_eq!(\n            tree_d_config.size.expect(\"config size failure\"),\n            tree_d.len()\n        );\n        let tree_d_root = tree_d.root();\n        drop(tree_d);\n\n        // Encode original data into the last layer.\n        info!(\"building tree_r_last\");\n        let tree_r_last = measure_op(Operation::GenerateTreeRLast, || {\n            Self::generate_tree_r_last::<Tree::Arity>(\n                &mut data,\n                nodes_count,\n                tree_count,\n                tree_r_last_config.clone(),\n                replica_path.clone(),\n                &labels,\n            )\n            .context(\"failed to generate tree_r_last\")\n        })?;\n        info!(\"tree_r_last done\");\n\n        let tree_r_last_root = tree_r_last.root();\n        drop(tree_r_last);\n\n        data.drop_data();\n\n        // comm_r = H(comm_c || comm_r_last)\n        let comm_r: <Tree::Hasher as Hasher>::Domain =\n            <Tree::Hasher as Hasher>::Function::hash2(&tree_c_root, &tree_r_last_root);\n\n        Ok((\n            Tau {\n                comm_d: tree_d_root,\n                comm_r,\n            },\n            PersistentAux {\n                comm_c: tree_c_root,\n                comm_r_last: tree_r_last_root,\n            },\n            TemporaryAux {\n                labels: label_configs,\n                tree_d_config,\n                tree_r_last_config,\n                tree_c_config,\n                _g: PhantomData,\n            },\n        ))\n    }\n\n    /// Phase1 of replication.\n    pub fn replicate_phase1(\n        pp: &'a PublicParams<Tree>,\n        replica_id: &<Tree::Hasher as Hasher>::Domain,\n        config: StoreConfig,\n    ) -> Result<Labels<Tree>> {\n        info!(\"replicate_phase1\");\n\n        let labels = measure_op(Operation::EncodeWindowTimeAll, || {\n            Self::generate_labels_for_encoding(&pp.graph, &pp.layer_challenges, replica_id, config)\n        })?\n        .0;\n\n        Ok(labels)\n    }\n\n    /// Phase2 of replication.\n    #[allow(clippy::type_complexity)]\n    pub fn replicate_phase2(\n        pp: &'a PublicParams<Tree>,\n        labels: Labels<Tree>,\n        data: Data<'a>,\n        data_tree: BinaryMerkleTree<G>,\n        config: StoreConfig,\n        replica_path: PathBuf,\n    ) -> Result<(\n        <Self as PoRep<'a, Tree::Hasher, G>>::Tau,\n        <Self as PoRep<'a, Tree::Hasher, G>>::ProverAux,\n    )> {\n        info!(\"replicate_phase2\");\n\n        let (tau, paux, taux) = Self::transform_and_replicate_layers_inner(\n            &pp.graph,\n            &pp.layer_challenges,\n            data,\n            Some(data_tree),\n            config,\n            replica_path,\n            labels,\n        )?;\n\n        Ok((tau, (paux, taux)))\n    }\n\n    // Assumes data is all zeros.\n    // Replica path is used to create configs, but is not read.\n    // Instead new zeros are provided (hence the need for replica to be all zeros).\n    #[cfg(any(feature = \"gpu\"))]\n    fn generate_fake_tree_r_last<TreeArity>(\n        nodes_count: usize,\n        tree_count: usize,\n        tree_r_last_config: StoreConfig,\n        replica_path: PathBuf,\n    ) -> Result<LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>\n    where\n        TreeArity: PoseidonArity,\n    {\n        use std::fs::OpenOptions;\n        use std::io::Write;\n\n        use bellperson::bls::Fr;\n        use ff::Field;\n        use fr32::fr_into_bytes;\n        use merkletree::merkle::{get_merkle_tree_cache_size, get_merkle_tree_leafs};\n        use neptune::{\n            batch_hasher::BatcherType,\n            tree_builder::{TreeBuilder, TreeBuilderTrait},\n        };\n\n        let (configs, replica_config) = split_config_and_replica(\n            tree_r_last_config.clone(),\n            replica_path,\n            nodes_count,\n            tree_count,\n        )?;\n\n        if SETTINGS.use_gpu_tree_builder {\n            info!(\"generating tree r last using the GPU\");\n            let max_gpu_tree_batch_size = SETTINGS.max_gpu_tree_batch_size as usize;\n\n            let _gpu_lock = GPU_LOCK.lock().expect(\"failed to get gpu lock\");\n            let mut tree_builder = TreeBuilder::<Tree::Arity>::new(\n                #[cfg(feature = \"gpu\")]\n                Some(BatcherType::OpenCL),\n                nodes_count,\n                max_gpu_tree_batch_size,\n                tree_r_last_config.rows_to_discard,\n            )\n            .expect(\"failed to create TreeBuilder\");\n\n            // Allocate zeros once and reuse.\n            let zero_leaves: Vec<Fr> = vec![Fr::zero(); max_gpu_tree_batch_size];\n            for (i, config) in configs.iter().enumerate() {\n                let mut consumed = 0;\n                while consumed < nodes_count {\n                    let batch_size = usize::min(max_gpu_tree_batch_size, nodes_count - consumed);\n\n                    consumed += batch_size;\n\n                    if consumed != nodes_count {\n                        tree_builder\n                            .add_leaves(&zero_leaves[0..batch_size])\n                            .expect(\"failed to add leaves\");\n                        continue;\n                    };\n\n                    // If we get here, this is a final leaf batch: build a sub-tree.\n                    info!(\n                        \"building base tree_r_last with GPU {}/{}\",\n                        i + 1,\n                        tree_count\n                    );\n\n                    let (_, tree_data) = tree_builder\n                        .add_final_leaves(&zero_leaves[0..batch_size])\n                        .expect(\"failed to add final leaves\");\n                    let tree_data_len = tree_data.len();\n                    let cache_size = get_merkle_tree_cache_size(\n                        get_merkle_tree_leafs(\n                            config.size.expect(\"config size failure\"),\n                            Tree::Arity::to_usize(),\n                        )\n                        .expect(\"failed to get merkle tree leaves\"),\n                        Tree::Arity::to_usize(),\n                        config.rows_to_discard,\n                    )\n                    .expect(\"failed to get merkle tree cache size\");\n                    assert_eq!(tree_data_len, cache_size);\n\n                    let flat_tree_data: Vec<_> = tree_data\n                        .into_par_iter()\n                        .flat_map(|el| fr_into_bytes(&el))\n                        .collect();\n\n                    // Persist the data to the store based on the current config.\n                    let tree_r_last_path = StoreConfig::data_path(&config.path, &config.id);\n                    trace!(\n                        \"persisting tree r of len {} with {} rows to discard at path {:?}\",\n                        tree_data_len,\n                        config.rows_to_discard,\n                        tree_r_last_path\n                    );\n                    let mut f = OpenOptions::new()\n                        .create(true)\n                        .write(true)\n                        .open(&tree_r_last_path)\n                        .expect(\"failed to open file for tree_r_last\");\n                    f.write_all(&flat_tree_data)\n                        .expect(\"failed to wrote tree_r_last data\");\n                }\n            }\n        } else {\n            info!(\"generating tree r last using the CPU\");\n            for (i, config) in configs.iter().enumerate() {\n                let encoded_data = vec![<Tree::Hasher as Hasher>::Domain::default(); nodes_count];\n\n                info!(\n                    \"building base tree_r_last with CPU {}/{}\",\n                    i + 1,\n                    tree_count\n                );\n                LCTree::<Tree::Hasher, Tree::Arity, U0, U0>::from_par_iter_with_config(\n                    encoded_data,\n                    config.clone(),\n                )?;\n            }\n        };\n\n        create_lc_tree::<LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>(\n            tree_r_last_config.size.expect(\"config size failure\"),\n            &configs,\n            &replica_config,\n        )\n    }\n\n    // Assumes data is all zeros.\n    // Replica path is used to create configs, but is not read.\n    // Instead new zeros are provided (hence the need for replica to be all zeros).\n    #[cfg(not(any(feature = \"gpu\")))]\n    fn generate_fake_tree_r_last<TreeArity>(\n        nodes_count: usize,\n        tree_count: usize,\n        tree_r_last_config: StoreConfig,\n        replica_path: PathBuf,\n    ) -> Result<LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>\n    where\n        TreeArity: PoseidonArity,\n    {\n        let (configs, replica_config) = split_config_and_replica(\n            tree_r_last_config.clone(),\n            replica_path,\n            nodes_count,\n            tree_count,\n        )?;\n\n        info!(\"generating tree r last using the CPU\");\n        for (i, config) in configs.iter().enumerate() {\n            let encoded_data = vec![<Tree::Hasher as Hasher>::Domain::default(); nodes_count];\n\n            info!(\n                \"building base tree_r_last with CPU {}/{}\",\n                i + 1,\n                tree_count\n            );\n            LCTree::<Tree::Hasher, Tree::Arity, U0, U0>::from_par_iter_with_config(\n                encoded_data,\n                config.clone(),\n            )?;\n        }\n\n        create_lc_tree::<LCTree<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>(\n            tree_r_last_config.size.expect(\"config size failure\"),\n            &configs,\n            &replica_config,\n        )\n    }\n\n    pub fn fake_replicate_phase2<R: AsRef<Path>, S: AsRef<Path>>(\n        tree_c_root: <Tree::Hasher as Hasher>::Domain,\n        replica_path: R,\n        cache_path: S,\n        sector_size: usize,\n    ) -> Result<(\n        <Tree::Hasher as Hasher>::Domain,\n        PersistentAux<<Tree::Hasher as Hasher>::Domain>,\n    )> {\n        let leaf_count = sector_size / NODE_SIZE;\n        let replica_pathbuf = PathBuf::from(replica_path.as_ref());\n        assert_eq!(0, sector_size % NODE_SIZE);\n        let tree_count = get_base_tree_count::<Tree>();\n        let nodes_count = leaf_count / tree_count;\n\n        let config = StoreConfig::new(\n            cache_path.as_ref(),\n            CacheKey::CommRLastTree.to_string(),\n            default_rows_to_discard(nodes_count, Tree::Arity::to_usize()),\n        );\n        let tree_r_last_config = StoreConfig::from_config(\n            &config,\n            CacheKey::CommRLastTree.to_string(),\n            Some(get_merkle_tree_len(nodes_count, Tree::Arity::to_usize())?),\n        );\n\n        // Encode original data into the last layer.\n        info!(\"building tree_r_last\");\n        let tree_r_last = Self::generate_fake_tree_r_last::<Tree::Arity>(\n            nodes_count,\n            tree_count,\n            tree_r_last_config,\n            replica_pathbuf,\n        )?;\n        info!(\"tree_r_last done\");\n\n        let tree_r_last_root = tree_r_last.root();\n        drop(tree_r_last);\n\n        // comm_r = H(comm_c || comm_r_last)\n        let comm_r: <Tree::Hasher as Hasher>::Domain =\n            <Tree::Hasher as Hasher>::Function::hash2(&tree_c_root, &tree_r_last_root);\n\n        let p_aux = PersistentAux {\n            comm_c: tree_c_root,\n            comm_r_last: tree_r_last_root,\n        };\n\n        Ok((comm_r, p_aux))\n    }\n\n    pub fn fake_comm_r<R: AsRef<Path>>(\n        tree_c_root: <Tree::Hasher as Hasher>::Domain,\n        existing_p_aux_path: R,\n    ) -> Result<(\n        <Tree::Hasher as Hasher>::Domain,\n        PersistentAux<<Tree::Hasher as Hasher>::Domain>,\n    )> {\n        let existing_p_aux: PersistentAux<<Tree::Hasher as Hasher>::Domain> = {\n            let p_aux_bytes = fs::read(&existing_p_aux_path)?;\n\n            deserialize(&p_aux_bytes)\n        }?;\n\n        let existing_comm_r_last = existing_p_aux.comm_r_last;\n\n        // comm_r = H(comm_c || comm_r_last)\n        let comm_r: <Tree::Hasher as Hasher>::Domain =\n            <Tree::Hasher as Hasher>::Function::hash2(&tree_c_root, &existing_comm_r_last);\n\n        let p_aux = PersistentAux {\n            comm_c: tree_c_root,\n            comm_r_last: existing_comm_r_last,\n        };\n\n        Ok((comm_r, p_aux))\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/proof_scheme.rs",
    "content": "use anyhow::ensure;\nuse filecoin_hashers::{HashFunction, Hasher};\nuse log::trace;\nuse rayon::prelude::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator};\nuse storage_proofs_core::{\n    drgraph::Graph, error::Result, merkle::MerkleTreeTrait, proof::ProofScheme,\n};\n\nuse crate::stacked::vanilla::{\n    challenges::ChallengeRequirements,\n    graph::StackedBucketGraph,\n    params::{PrivateInputs, Proof, PublicInputs, PublicParams, SetupParams},\n    proof::StackedDrg,\n};\n\nimpl<'a, 'c, Tree: 'static + MerkleTreeTrait, G: 'static + Hasher> ProofScheme<'a>\n    for StackedDrg<'c, Tree, G>\n{\n    type PublicParams = PublicParams<Tree>;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<<Tree::Hasher as Hasher>::Domain, <G as Hasher>::Domain>;\n    type PrivateInputs = PrivateInputs<Tree, G>;\n    type Proof = Vec<Proof<Tree, G>>;\n    type Requirements = ChallengeRequirements;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        let graph = StackedBucketGraph::<Tree::Hasher>::new_stacked(\n            sp.nodes,\n            sp.degree,\n            sp.expansion_degree,\n            sp.porep_id,\n            sp.api_version,\n        )?;\n\n        Ok(PublicParams::new(graph, sp.layer_challenges.clone()))\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        let proofs = Self::prove_all_partitions(pub_params, pub_inputs, priv_inputs, 1)?;\n        let k = pub_inputs.k.unwrap_or(0);\n        // Because partition proofs require a common setup, the general ProofScheme implementation,\n        // which makes use of `ProofScheme::prove` cannot be used here. Instead, we need to prove all\n        // partitions in one pass, as implemented by `prove_all_partitions` below.\n        assert!(\n            k < 1,\n            \"It is a programmer error to call StackedDrg::prove with more than one partition.\"\n        );\n\n        Ok(proofs[k].to_owned())\n    }\n\n    fn prove_all_partitions<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n        partition_count: usize,\n    ) -> Result<Vec<Self::Proof>> {\n        trace!(\"prove_all_partitions\");\n        ensure!(partition_count > 0, \"partitions must not be 0\");\n\n        Self::prove_layers(\n            &pub_params.graph,\n            pub_inputs,\n            &priv_inputs.p_aux,\n            &priv_inputs.t_aux,\n            &pub_params.layer_challenges,\n            pub_params.layer_challenges.layers(),\n            partition_count,\n        )\n    }\n\n    fn verify_all_partitions(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        partition_proofs: &[Self::Proof],\n    ) -> Result<bool> {\n        trace!(\"verify_all_partitions\");\n\n        // generate graphs\n        let graph = &pub_params.graph;\n\n        let expected_comm_r = if let Some(ref tau) = pub_inputs.tau {\n            &tau.comm_r\n        } else {\n            return Ok(false);\n        };\n\n        let res = partition_proofs.par_iter().enumerate().all(|(k, proofs)| {\n            trace!(\n                \"verifying partition proof {}/{}\",\n                k + 1,\n                partition_proofs.len()\n            );\n\n            trace!(\"verify comm_r\");\n            let actual_comm_r: <Tree::Hasher as Hasher>::Domain = {\n                let comm_c = proofs[0].comm_c();\n                let comm_r_last = proofs[0].comm_r_last();\n                <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last)\n            };\n\n            if expected_comm_r != &actual_comm_r {\n                return false;\n            }\n\n            let challenges =\n                pub_inputs.challenges(&pub_params.layer_challenges, graph.size(), Some(k));\n\n            proofs.par_iter().enumerate().all(|(i, proof)| {\n                trace!(\"verify challenge {}/{}\", i + 1, challenges.len());\n\n                // Validate for this challenge\n                let challenge = challenges[i];\n\n                // make sure all proofs have the same comm_c\n                if proof.comm_c() != proofs[0].comm_c() {\n                    return false;\n                }\n                // make sure all proofs have the same comm_r_last\n                if proof.comm_r_last() != proofs[0].comm_r_last() {\n                    return false;\n                }\n\n                proof.verify(pub_params, pub_inputs, challenge, graph)\n            })\n        });\n\n        Ok(res)\n    }\n\n    fn with_partition(pub_in: Self::PublicInputs, k: Option<usize>) -> Self::PublicInputs {\n        PublicInputs {\n            replica_id: pub_in.replica_id,\n            seed: pub_in.seed,\n            tau: pub_in.tau,\n            k,\n        }\n    }\n\n    fn satisfies_requirements(\n        public_params: &PublicParams<Tree>,\n        requirements: &ChallengeRequirements,\n        partitions: usize,\n    ) -> bool {\n        let partition_challenges = public_params.layer_challenges.challenges_count_all();\n\n        assert_eq!(\n            partition_challenges.checked_mul(partitions),\n            Some(partition_challenges * partitions)\n        );\n        partition_challenges * partitions >= requirements.minimum_challenges\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/src/stacked/vanilla/utils.rs",
    "content": "use std::cell::UnsafeCell;\nuse std::slice::{self, ChunksExactMut};\n\n/// A slice type which can be shared between threads, but must be fully managed by the caller.\n/// Any synchronization must be ensured by the caller, which is why all access is `unsafe`.\n#[derive(Debug)]\npub struct UnsafeSlice<'a, T> {\n    // holds the data to ensure lifetime correctness\n    data: UnsafeCell<&'a mut [T]>,\n    /// pointer to the data\n    ptr: *mut T,\n    /// Number of elements, not bytes.\n    len: usize,\n}\n\nunsafe impl<'a, T> Sync for UnsafeSlice<'a, T> {}\n\nimpl<'a, T> UnsafeSlice<'a, T> {\n    /// Takes mutable slice, to ensure that `UnsafeSlice` is the only user of this memory, until it gets dropped.\n    pub fn from_slice(source: &'a mut [T]) -> Self {\n        let len = source.len();\n        let ptr = source.as_mut_ptr();\n        let data = UnsafeCell::new(source);\n        Self { data, ptr, len }\n    }\n\n    /// Safety: The caller must ensure that there are no unsynchronized parallel access to the same regions.\n    #[inline]\n    pub unsafe fn as_mut_slice(&self) -> &'a mut [T] {\n        slice::from_raw_parts_mut(self.ptr, self.len)\n    }\n    /// Safety: The caller must ensure that there are no unsynchronized parallel access to the same regions.\n    #[inline]\n    pub unsafe fn as_slice(&self) -> &'a [T] {\n        slice::from_raw_parts(self.ptr, self.len)\n    }\n\n    #[inline]\n    pub unsafe fn get(&self, index: usize) -> &'a T {\n        &*self.ptr.add(index)\n    }\n\n    #[inline]\n    pub unsafe fn get_mut(&self, index: usize) -> &'a mut T {\n        &mut *self.ptr.add(index)\n    }\n}\n\n/// Set all values in the given slice to the provided value.\n#[inline]\npub fn memset(slice: &mut [u8], value: u8) {\n    for v in slice.iter_mut() {\n        *v = value;\n    }\n}\n\n#[inline]\npub fn prepare_block(replica_id: &[u8], layer: u32, buf: &mut [u8]) {\n    buf[..32].copy_from_slice(replica_id);\n    buf[35] = (layer & 0xFF) as u8;\n    buf[64] = 0x80; // Padding\n    buf[126] = 0x02 // Length (512 bits = 64B)\n}\n\n#[derive(Default, Debug, Clone, Copy, PartialEq, Eq)]\npub struct BitMask(u32);\n\nimpl BitMask {\n    /// Sets the full mask for the first `n` bits.\n    #[inline]\n    pub fn set_upto(&mut self, n: u8) {\n        assert!(n <= 32);\n        self.0 |= (1 << n) - 1\n    }\n\n    /// Sets the ith bit.\n    #[inline]\n    pub fn set(&mut self, i: usize) {\n        self.0 |= 1 << i\n    }\n\n    /// Returns true if the ith bit is set, false otherwise.\n    #[inline]\n    pub fn get(self, i: usize) -> bool {\n        self.0 & (1 << i) != 0\n    }\n}\n\n#[derive(Debug)]\npub struct RingBuf {\n    data: UnsafeCell<Box<[u8]>>,\n    slot_size: usize,\n    num_slots: usize,\n}\n\nunsafe impl Sync for RingBuf {}\n\nimpl RingBuf {\n    /// Creates a new\n    pub fn new(slot_size: usize, num_slots: usize) -> Self {\n        let data = vec![0u8; slot_size * num_slots].into_boxed_slice();\n\n        RingBuf {\n            data: UnsafeCell::from(data),\n            slot_size,\n            num_slots,\n        }\n    }\n\n    #[allow(clippy::mut_from_ref)]\n    unsafe fn slice_mut(&self) -> &mut [u8] {\n        slice::from_raw_parts_mut((*self.data.get()).as_mut_ptr(), self.len())\n    }\n\n    fn len(&self) -> usize {\n        self.slot_size * self.num_slots\n    }\n\n    #[allow(clippy::mut_from_ref)]\n    pub unsafe fn slot_mut(&self, slot: usize) -> &mut [u8] {\n        let start = self.slot_size * slot;\n        let end = start + self.slot_size;\n\n        &mut self.slice_mut()[start..end]\n    }\n\n    pub fn iter_slot_mut(&mut self) -> ChunksExactMut<'_, u8> {\n        // Safety: safe because we are holding &mut self\n        unsafe { self.slice_mut().chunks_exact_mut(self.slot_size) }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-porep/tests/drg_circuit.rs",
    "content": "use bellperson::{\n    bls::{Bls12, Fr},\n    util_cs::test_cs::TestConstraintSystem,\n    ConstraintSystem,\n};\nuse ff::Field;\nuse filecoin_hashers::poseidon::PoseidonHasher;\nuse fr32::{bytes_into_fr, fr_into_bytes};\nuse generic_array::typenum::U2;\nuse merkletree::store::StoreConfig;\nuse pretty_assertions::assert_eq;\nuse rand::SeedableRng;\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    cache_key::CacheKey,\n    compound_proof,\n    drgraph::{graph_height, BucketGraph, BASE_DEGREE},\n    gadgets::variables::Root,\n    merkle::MerkleProofTrait,\n    proof::ProofScheme,\n    test_helper::setup_replica,\n    util::{data_at_node, default_rows_to_discard},\n    TEST_SEED,\n};\nuse storage_proofs_porep::{\n    drg::{self, DrgPoRep, DrgPoRepCircuit, DrgPoRepCompound},\n    stacked::BINARY_ARITY,\n    PoRep,\n};\nuse tempfile::tempdir;\n\n#[test]\nfn test_drg_porep_circuit() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let nodes = 16;\n    let degree = BASE_DEGREE;\n    let challenge = 2;\n\n    let replica_id: Fr = Fr::random(rng);\n\n    let data: Vec<u8> = (0..nodes)\n        .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n        .collect();\n\n    // MT for original data is always named tree-d, and it will be\n    // referenced later in the process as such.\n    let cache_dir = tempdir().unwrap();\n    let config = StoreConfig::new(\n        cache_dir.path(),\n        CacheKey::CommDTree.to_string(),\n        default_rows_to_discard(nodes, BINARY_ARITY),\n    );\n\n    // Generate a replica path.\n    let replica_path = cache_dir.path().join(\"replica-path\");\n    let mut mmapped_data = setup_replica(&data, &replica_path);\n\n    let data_node: Option<Fr> = Some(\n        bytes_into_fr(\n            data_at_node(&mmapped_data, challenge).expect(\"failed to read original data\"),\n        )\n        .unwrap(),\n    );\n\n    let sp = drg::SetupParams {\n        drg: drg::DrgParams {\n            nodes,\n            degree,\n            expansion_degree: 0,\n            porep_id: [32; 32],\n        },\n        private: false,\n        challenges_count: 1,\n        api_version: ApiVersion::V1_1_0,\n    };\n\n    let pp = DrgPoRep::<PoseidonHasher, BucketGraph<_>>::setup(&sp)\n        .expect(\"failed to create drgporep setup\");\n    let (tau, aux) = DrgPoRep::<PoseidonHasher, _>::replicate(\n        &pp,\n        &replica_id.into(),\n        (mmapped_data.as_mut()).into(),\n        None,\n        config,\n        replica_path,\n    )\n    .expect(\"failed to replicate\");\n\n    let pub_inputs = drg::PublicInputs {\n        replica_id: Some(replica_id.into()),\n        challenges: vec![challenge],\n        tau: Some(tau),\n    };\n\n    let priv_inputs = drg::PrivateInputs::<PoseidonHasher> {\n        tree_d: &aux.tree_d,\n        tree_r: &aux.tree_r,\n        tree_r_config_rows_to_discard: default_rows_to_discard(nodes, BINARY_ARITY),\n    };\n\n    let proof_nc = DrgPoRep::<PoseidonHasher, _>::prove(&pp, &pub_inputs, &priv_inputs)\n        .expect(\"failed to prove\");\n\n    assert!(\n        DrgPoRep::<PoseidonHasher, _>::verify(&pp, &pub_inputs, &proof_nc)\n            .expect(\"failed to verify\"),\n        \"failed to verify (non circuit)\"\n    );\n\n    let replica_node: Option<Fr> = Some(proof_nc.replica_nodes[0].data.into());\n\n    let replica_node_path = proof_nc.replica_nodes[0].proof.as_options();\n    let replica_root = Root::Val(Some(proof_nc.replica_root.into()));\n    let replica_parents = proof_nc\n        .replica_parents\n        .iter()\n        .map(|v| {\n            v.iter()\n                .map(|(_, parent)| Some(parent.data.into()))\n                .collect()\n        })\n        .collect();\n    let replica_parents_paths: Vec<_> = proof_nc\n        .replica_parents\n        .iter()\n        .map(|v| {\n            v.iter()\n                .map(|(_, parent)| parent.proof.as_options())\n                .collect()\n        })\n        .collect();\n\n    let data_node_path = proof_nc.nodes[0].proof.as_options();\n    let data_root = Root::Val(Some(proof_nc.data_root.into()));\n    let replica_id = Some(replica_id);\n\n    assert!(\n        proof_nc.nodes[0].proof.validate(challenge),\n        \"failed to verify data commitment\"\n    );\n    assert!(\n        proof_nc.nodes[0]\n            .proof\n            .validate_data(data_node.unwrap().into()),\n        \"failed to verify data commitment with data\"\n    );\n\n    let mut cs = TestConstraintSystem::<Bls12>::new();\n    DrgPoRepCircuit::<PoseidonHasher>::synthesize(\n        cs.namespace(|| \"drgporep\"),\n        vec![replica_node],\n        vec![replica_node_path],\n        replica_root,\n        replica_parents,\n        replica_parents_paths,\n        vec![data_node],\n        vec![data_node_path],\n        data_root,\n        replica_id,\n        false,\n    )\n    .expect(\"failed to synthesize circuit\");\n\n    if !cs.is_satisfied() {\n        println!(\n            \"failed to satisfy: {:?}\",\n            cs.which_is_unsatisfied().unwrap()\n        );\n    }\n\n    assert!(cs.is_satisfied(), \"constraints not satisfied\");\n    assert_eq!(cs.num_inputs(), 18, \"wrong number of inputs\");\n    assert_eq!(cs.num_constraints(), 115_660, \"wrong number of constraints\");\n\n    assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n\n    assert_eq!(\n        cs.get_input(1, \"drgporep/replica_id/input variable\"),\n        replica_id.unwrap()\n    );\n\n    let generated_inputs =\n        <DrgPoRepCompound<_, _> as compound_proof::CompoundProof<_, _>>::generate_public_inputs(\n            &pub_inputs,\n            &pp,\n            None,\n        )\n        .unwrap();\n    let expected_inputs = cs.get_inputs();\n\n    for ((input, label), generated_input) in\n        expected_inputs.iter().skip(1).zip(generated_inputs.iter())\n    {\n        assert_eq!(input, generated_input, \"{}\", label);\n    }\n\n    assert_eq!(\n        generated_inputs.len(),\n        expected_inputs.len() - 1,\n        \"inputs are not the same length\"\n    );\n\n    cache_dir.close().expect(\"Failed to remove cache dir\");\n}\n\n#[test]\nfn test_drg_porep_circuit_inputs_and_constraints() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    // 1 GB\n    let n = (1 << 30) / 32;\n    let m = BASE_DEGREE;\n    let tree_depth = graph_height::<U2>(n);\n\n    let mut cs = TestConstraintSystem::<Bls12>::new();\n    DrgPoRepCircuit::<PoseidonHasher>::synthesize(\n        cs.namespace(|| \"drgporep\"),\n        vec![Some(Fr::random(rng)); 1],\n        vec![vec![(vec![Some(Fr::random(rng))], Some(0)); tree_depth]; 1],\n        Root::Val(Some(Fr::random(rng))),\n        vec![vec![Some(Fr::random(rng)); m]; 1],\n        vec![vec![vec![(vec![Some(Fr::random(rng))], Some(0)); tree_depth]; m]; 1],\n        vec![Some(Fr::random(rng)); 1],\n        vec![vec![(vec![Some(Fr::random(rng))], Some(0)); tree_depth]; 1],\n        Root::Val(Some(Fr::random(rng))),\n        Some(Fr::random(rng)),\n        false,\n    )\n    .expect(\"failed to synthesize circuit\");\n\n    assert_eq!(cs.num_inputs(), 18, \"wrong number of inputs\");\n    assert_eq!(cs.num_constraints(), 170_924, \"wrong number of constraints\");\n}\n"
  },
  {
    "path": "storage-proofs-porep/tests/drg_compound.rs",
    "content": "use bellperson::{\n    bls::Fr,\n    util_cs::{metric_cs::MetricCS, test_cs::TestConstraintSystem},\n    Circuit,\n};\nuse ff::Field;\nuse filecoin_hashers::{poseidon::PoseidonHasher, Hasher};\nuse fr32::fr_into_bytes;\nuse merkletree::store::StoreConfig;\nuse pretty_assertions::assert_eq;\nuse rand::SeedableRng;\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    cache_key::CacheKey,\n    compound_proof::{self, CompoundProof},\n    drgraph::{BucketGraph, BASE_DEGREE},\n    merkle::{BinaryMerkleTree, MerkleTreeTrait},\n    proof::NoRequirements,\n    test_helper::setup_replica,\n    util::default_rows_to_discard,\n    TEST_SEED,\n};\nuse storage_proofs_porep::{\n    drg::{DrgParams, DrgPoRep, DrgPoRepCompound, PrivateInputs, PublicInputs, SetupParams},\n    stacked::BINARY_ARITY,\n    PoRep,\n};\nuse tempfile::tempdir;\n\n#[test]\n#[ignore]\nfn test_drg_porep_compound_poseidon() {\n    drg_porep_compound::<BinaryMerkleTree<PoseidonHasher>>();\n}\n\nfn drg_porep_compound<Tree: 'static + MerkleTreeTrait>() {\n    // femme::pretty::Logger::new()\n    //     .start(log::LevelFilter::Trace)\n    //     .ok();\n\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let nodes = 8;\n    let degree = BASE_DEGREE;\n    let challenges = vec![1, 3];\n\n    let replica_id: Fr = Fr::random(rng);\n    let data: Vec<u8> = (0..nodes)\n        .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n        .collect();\n\n    // MT for original data is always named tree-d, and it will be\n    // referenced later in the process as such.\n    let cache_dir = tempdir().unwrap();\n    let config = StoreConfig::new(\n        cache_dir.path(),\n        CacheKey::CommDTree.to_string(),\n        default_rows_to_discard(nodes, BINARY_ARITY),\n    );\n\n    // Generate a replica path.\n    let replica_path = cache_dir.path().join(\"replica-path\");\n    let mut mmapped_data = setup_replica(&data, &replica_path);\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params: SetupParams {\n            drg: DrgParams {\n                nodes,\n                degree,\n                expansion_degree: 0,\n                porep_id: [32; 32],\n            },\n            private: false,\n            challenges_count: 2,\n            api_version: ApiVersion::V1_1_0,\n        },\n        partitions: None,\n        priority: false,\n    };\n\n    let public_params =\n        DrgPoRepCompound::<Tree::Hasher, BucketGraph<Tree::Hasher>>::setup(&setup_params)\n            .expect(\"setup failed\");\n\n    let data_tree: Option<BinaryMerkleTree<Tree::Hasher>> = None;\n    let (tau, aux) = DrgPoRep::<Tree::Hasher, BucketGraph<_>>::replicate(\n        &public_params.vanilla_params,\n        &replica_id.into(),\n        (mmapped_data.as_mut()).into(),\n        data_tree,\n        config,\n        replica_path,\n    )\n    .expect(\"failed to replicate\");\n\n    let public_inputs = PublicInputs::<<Tree::Hasher as Hasher>::Domain> {\n        replica_id: Some(replica_id.into()),\n        challenges,\n        tau: Some(tau),\n    };\n    let private_inputs = PrivateInputs {\n        tree_d: &aux.tree_d,\n        tree_r: &aux.tree_r,\n        tree_r_config_rows_to_discard: default_rows_to_discard(nodes, BINARY_ARITY),\n    };\n\n    // This duplication is necessary so public_params don't outlive public_inputs and private_inputs.\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params: SetupParams {\n            drg: DrgParams {\n                nodes,\n                degree,\n                expansion_degree: 0,\n                porep_id: [32; 32],\n            },\n            private: false,\n            challenges_count: 2,\n            api_version: ApiVersion::V1_1_0,\n        },\n        partitions: None,\n        priority: false,\n    };\n\n    let public_params =\n        DrgPoRepCompound::<Tree::Hasher, BucketGraph<Tree::Hasher>>::setup(&setup_params)\n            .expect(\"setup failed\");\n\n    {\n        let (circuit, inputs) = DrgPoRepCompound::<Tree::Hasher, _>::circuit_for_test(\n            &public_params,\n            &public_inputs,\n            &private_inputs,\n        )\n        .unwrap();\n\n        let mut cs = TestConstraintSystem::new();\n\n        circuit\n            .synthesize(&mut cs)\n            .expect(\"failed to synthesize test circuit\");\n        assert!(cs.is_satisfied());\n        assert!(cs.verify(&inputs));\n\n        let blank_circuit = <DrgPoRepCompound<_, _> as CompoundProof<_, _>>::blank_circuit(\n            &public_params.vanilla_params,\n        );\n\n        let mut cs_blank = MetricCS::new();\n        blank_circuit\n            .synthesize(&mut cs_blank)\n            .expect(\"failed to synthesize blank circuit\");\n\n        let a = cs_blank.pretty_print_list();\n        let b = cs.pretty_print_list();\n\n        for (i, (a, b)) in a.chunks(100).zip(b.chunks(100)).enumerate() {\n            assert_eq!(a, b, \"failed at chunk {}\", i);\n        }\n    }\n\n    {\n        let gparams = DrgPoRepCompound::<Tree::Hasher, _>::groth_params(\n            Some(rng),\n            &public_params.vanilla_params,\n        )\n        .expect(\"failed to get groth params\");\n\n        let proof = DrgPoRepCompound::<Tree::Hasher, _>::prove(\n            &public_params,\n            &public_inputs,\n            &private_inputs,\n            &gparams,\n        )\n        .expect(\"failed while proving\");\n\n        let verified = DrgPoRepCompound::<Tree::Hasher, _>::verify(\n            &public_params,\n            &public_inputs,\n            &proof,\n            &NoRequirements,\n        )\n        .expect(\"failed while verifying\");\n\n        assert!(verified);\n    }\n\n    cache_dir.close().expect(\"Failed to remove cache dir\");\n}\n"
  },
  {
    "path": "storage-proofs-porep/tests/drg_vanilla.rs",
    "content": "use bellperson::bls::Fr;\nuse ff::Field;\nuse filecoin_hashers::{blake2s::Blake2sHasher, sha256::Sha256Hasher, Domain, Hasher};\nuse fr32::fr_into_bytes;\nuse merkletree::store::StoreConfig;\nuse pretty_assertions::assert_eq;\nuse rand::SeedableRng;\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    cache_key::CacheKey,\n    drgraph::{BucketGraph, BASE_DEGREE},\n    merkle::{BinaryMerkleTree, MerkleTreeTrait},\n    proof::ProofScheme,\n    table_tests,\n    test_helper::setup_replica,\n    util::default_rows_to_discard,\n    TEST_SEED,\n};\nuse storage_proofs_porep::{\n    drg::{self, DrgPoRep},\n    stacked::BINARY_ARITY,\n    PoRep,\n};\nuse tempfile::tempdir;\n\n#[test]\nfn text_drg_porep_extract_all_sha256() {\n    test_extract_all::<BinaryMerkleTree<Sha256Hasher>>();\n}\n\n#[test]\nfn text_drg_porep_extract_all_blake2s() {\n    test_extract_all::<BinaryMerkleTree<Blake2sHasher>>();\n}\n\nfn test_extract_all<Tree: MerkleTreeTrait>() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let replica_id: <Tree::Hasher as Hasher>::Domain =\n        <Tree::Hasher as Hasher>::Domain::random(rng);\n    let nodes = 4;\n    let data = vec![2u8; 32 * nodes];\n\n    // MT for original data is always named tree-d, and it will be\n    // referenced later in the process as such.\n    let cache_dir = tempdir().expect(\"tempdir failure\");\n    let config = StoreConfig::new(\n        cache_dir.path(),\n        CacheKey::CommDTree.to_string(),\n        default_rows_to_discard(nodes, BINARY_ARITY),\n    );\n\n    // Generate a replica path.\n    let replica_path = cache_dir.path().join(\"replica-path\");\n    let mut mmapped_data = setup_replica(&data, &replica_path);\n\n    let sp = drg::SetupParams {\n        drg: drg::DrgParams {\n            nodes,\n            degree: BASE_DEGREE,\n            expansion_degree: 0,\n            porep_id: [32; 32],\n        },\n        private: false,\n        challenges_count: 1,\n        api_version: ApiVersion::V1_1_0,\n    };\n\n    let pp: drg::PublicParams<Tree::Hasher, BucketGraph<Tree::Hasher>> =\n        DrgPoRep::setup(&sp).expect(\"setup failed\");\n\n    DrgPoRep::replicate(\n        &pp,\n        &replica_id,\n        (mmapped_data.as_mut()).into(),\n        None,\n        config.clone(),\n        replica_path,\n    )\n    .expect(\"replication failed\");\n\n    let mut copied = vec![0; data.len()];\n    copied.copy_from_slice(&mmapped_data);\n    assert_ne!(data, copied, \"replication did not change data\");\n\n    DrgPoRep::<Tree::Hasher, _>::extract_all(&pp, &replica_id, mmapped_data.as_mut(), Some(config))\n        .unwrap_or_else(|e| {\n            panic!(\"Failed to extract data from `DrgPoRep`: {}\", e);\n        });\n\n    assert_eq!(data, mmapped_data.as_ref(), \"failed to extract data\");\n\n    cache_dir.close().expect(\"Failed to remove cache dir\");\n}\n\n#[test]\nfn test_drg_porep_extract_sha256() {\n    test_extract::<BinaryMerkleTree<Sha256Hasher>>();\n}\n\n#[test]\nfn test_drg_porep_extract_blake2s() {\n    test_extract::<BinaryMerkleTree<Blake2sHasher>>();\n}\n\nfn test_extract<Tree: MerkleTreeTrait>() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let replica_id: <Tree::Hasher as Hasher>::Domain =\n        <Tree::Hasher as Hasher>::Domain::random(rng);\n    let nodes = 4;\n    let node_size = 32;\n    let data = vec![2u8; node_size * nodes];\n\n    // MT for original data is always named tree-d, and it will be\n    // referenced later in the process as such.\n    let cache_dir = tempdir().expect(\"tempdir failure\");\n    let config = StoreConfig::new(\n        cache_dir.path(),\n        CacheKey::CommDTree.to_string(),\n        default_rows_to_discard(nodes, BINARY_ARITY),\n    );\n\n    // Generate a replica path.\n    let replica_path = cache_dir.path().join(\"replica-path\");\n    let mut mmapped_data = setup_replica(&data, &replica_path);\n\n    let sp = drg::SetupParams {\n        drg: drg::DrgParams {\n            nodes: data.len() / node_size,\n            degree: BASE_DEGREE,\n            expansion_degree: 0,\n            porep_id: [32; 32],\n        },\n        private: false,\n        challenges_count: 1,\n        api_version: ApiVersion::V1_1_0,\n    };\n\n    let pp = DrgPoRep::<Tree::Hasher, BucketGraph<Tree::Hasher>>::setup(&sp).expect(\"setup failed\");\n\n    DrgPoRep::replicate(\n        &pp,\n        &replica_id,\n        (mmapped_data.as_mut()).into(),\n        None,\n        config.clone(),\n        replica_path,\n    )\n    .expect(\"replication failed\");\n\n    let mut copied = vec![0; data.len()];\n    copied.copy_from_slice(&mmapped_data);\n    assert_ne!(data, copied, \"replication did not change data\");\n\n    for i in 0..nodes {\n        DrgPoRep::extract(\n            &pp,\n            &replica_id,\n            mmapped_data.as_mut(),\n            i,\n            Some(config.clone()),\n        )\n        .expect(\"failed to extract node data from PoRep\");\n\n        // This is no longer working, so the assertion is now incorrect.\n        //let original_data = data_at_node(&data, i).expect(\"data_at_node failure\");\n        //let extracted_data = &mmapped_data[i * node_size..(i * node_size) + node_size];\n        //assert_eq!(original_data, extracted_data, \"failed to extract data\");\n    }\n}\n\ntable_tests! {\n    test_prove_verify {\n        test_drg_porep_prove_verify_32_16_1(16, 1);\n        test_drg_porep_prove_verify_32_64_1(64, 1);\n        test_drg_porep_prove_verify_32_64_2(64, 2);\n        test_drg_porep_prove_verify_32_256_1(256, 1);\n        test_drg_porep_prove_verify_32_256_2(256, 2);\n        test_drg_porep_prove_verify_32_256_3(256, 3);\n        test_drg_porep_prove_verify_32_256_4(256, 4);\n        test_drg_porep_prove_verify_32_256_5(256, 5);\n    }\n}\n\nfn test_prove_verify(n: usize, i: usize) {\n    test_prove_verify_aux::<BinaryMerkleTree<Sha256Hasher>>(n, i, false, false);\n    test_prove_verify_aux::<BinaryMerkleTree<Blake2sHasher>>(n, i, false, false);\n}\n\nfn test_prove_verify_aux<Tree: MerkleTreeTrait>(\n    nodes: usize,\n    i: usize,\n    use_wrong_challenge: bool,\n    use_wrong_parents: bool,\n) {\n    assert!(i < nodes);\n\n    // The loop is here in case we need to retry because of an edge case in the test design.\n    loop {\n        let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n        let degree = BASE_DEGREE;\n        let expansion_degree = 0;\n\n        let replica_id: <Tree::Hasher as Hasher>::Domain =\n            <Tree::Hasher as Hasher>::Domain::random(rng);\n        let data: Vec<u8> = (0..nodes)\n            .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n            .collect();\n\n        // MT for original data is always named tree-d, and it will be\n        // referenced later in the process as such.\n        let cache_dir = tempdir().expect(\"tempdir failure\");\n        let config = StoreConfig::new(\n            cache_dir.path(),\n            CacheKey::CommDTree.to_string(),\n            default_rows_to_discard(nodes, BINARY_ARITY),\n        );\n\n        // Generate a replica path.\n        let replica_path = cache_dir.path().join(\"replica-path\");\n        let mut mmapped_data = setup_replica(&data, &replica_path);\n\n        let challenge = i;\n\n        let sp = drg::SetupParams {\n            drg: drg::DrgParams {\n                nodes,\n                degree,\n                expansion_degree,\n                porep_id: [32; 32],\n            },\n            private: false,\n            challenges_count: 2,\n            api_version: ApiVersion::V1_1_0,\n        };\n\n        let pp = DrgPoRep::<Tree::Hasher, BucketGraph<_>>::setup(&sp).expect(\"setup failed\");\n\n        let (tau, aux) = DrgPoRep::<Tree::Hasher, _>::replicate(\n            &pp,\n            &replica_id,\n            (mmapped_data.as_mut()).into(),\n            None,\n            config,\n            replica_path.clone(),\n        )\n        .expect(\"replication failed\");\n\n        let mut copied = vec![0; data.len()];\n        copied.copy_from_slice(&mmapped_data);\n        assert_ne!(data, copied, \"replication did not change data\");\n\n        let pub_inputs = drg::PublicInputs::<<Tree::Hasher as Hasher>::Domain> {\n            replica_id: Some(replica_id),\n            challenges: vec![challenge, challenge],\n            tau: Some(tau),\n        };\n\n        let priv_inputs = drg::PrivateInputs::<Tree::Hasher> {\n            tree_d: &aux.tree_d,\n            tree_r: &aux.tree_r,\n            tree_r_config_rows_to_discard: default_rows_to_discard(nodes, BINARY_ARITY),\n        };\n\n        let real_proof = DrgPoRep::<Tree::Hasher, _>::prove(&pp, &pub_inputs, &priv_inputs)\n            .expect(\"proving failed\");\n\n        if use_wrong_parents {\n            // Only one 'wrong' option will be tested at a time.\n            assert!(!use_wrong_challenge);\n            let real_parents = real_proof.replica_parents;\n\n            // Parent vector claiming the wrong parents.\n            let fake_parents = vec![real_parents[0]\n                .iter()\n                // Incrementing each parent node will give us a different parent set.\n                // It's fine to be out of range, since this only needs to fail.\n                .map(|(i, data_proof)| (i + 1, data_proof.clone()))\n                .collect::<Vec<_>>()];\n\n            let proof = drg::Proof::new(\n                real_proof.replica_nodes.clone(),\n                fake_parents,\n                real_proof.nodes.clone(),\n            );\n\n            let is_valid = DrgPoRep::verify(&pp, &pub_inputs, &proof).expect(\"verification failed\");\n\n            assert!(!is_valid, \"verified in error -- with wrong parents\");\n\n            let mut all_same = true;\n            for (p, _) in &real_parents[0] {\n                if *p != real_parents[0][0].0 {\n                    all_same = false;\n                }\n            }\n\n            if all_same {\n                println!(\"invalid test data can't scramble proofs with all same parents.\");\n\n                // If for some reason, we hit this condition because of the data passed in,\n                // try again.\n                continue;\n            }\n\n            // Parent vector claiming the right parents but providing valid proofs for different\n            // parents.\n            let fake_proof_parents = vec![real_parents[0]\n                .iter()\n                .enumerate()\n                .map(|(i, (p, _))| {\n                    // Rotate the real parent proofs.\n                    let x = (i + 1) % real_parents[0].len();\n                    let j = real_parents[0][x].0;\n                    (*p, real_parents[0][j as usize].1.clone())\n                })\n                .collect::<Vec<_>>()];\n\n            let proof2 = drg::Proof::new(\n                real_proof.replica_nodes,\n                fake_proof_parents,\n                real_proof.nodes,\n            );\n\n            assert!(\n                !DrgPoRep::<Tree::Hasher, _>::verify(&pp, &pub_inputs, &proof2).unwrap_or_else(\n                    |e| {\n                        panic!(\"Verification failed: {}\", e);\n                    }\n                ),\n                \"verified in error -- with wrong parent proofs\"\n            );\n\n            return;\n        }\n\n        let proof = real_proof;\n\n        if use_wrong_challenge {\n            let pub_inputs_with_wrong_challenge_for_proof =\n                drg::PublicInputs::<<Tree::Hasher as Hasher>::Domain> {\n                    replica_id: Some(replica_id),\n                    challenges: vec![if challenge == 1 { 2 } else { 1 }],\n                    tau: Some(tau),\n                };\n            let verified = DrgPoRep::<Tree::Hasher, _>::verify(\n                &pp,\n                &pub_inputs_with_wrong_challenge_for_proof,\n                &proof,\n            )\n            .expect(\"Verification failed\");\n            assert!(\n                !verified,\n                \"wrongly verified proof which does not match challenge in public input\"\n            );\n        } else {\n            assert!(\n                DrgPoRep::<Tree::Hasher, _>::verify(&pp, &pub_inputs, &proof)\n                    .expect(\"verification failed\"),\n                \"failed to verify\"\n            );\n        }\n\n        cache_dir.close().expect(\"Failed to remove cache dir\");\n\n        // Normally, just run once.\n        break;\n    }\n}\n\n#[test]\nfn test_drg_porep_verify_fails_on_wrong_challenge() {\n    test_prove_verify_aux::<BinaryMerkleTree<Sha256Hasher>>(8, 1, true, false);\n    test_prove_verify_aux::<BinaryMerkleTree<Blake2sHasher>>(8, 1, true, false);\n}\n\n#[test]\nfn test_drg_porep_verify_fails_on_wrong_parents() {\n    test_prove_verify_aux::<BinaryMerkleTree<Sha256Hasher>>(8, 5, false, true);\n    test_prove_verify_aux::<BinaryMerkleTree<Blake2sHasher>>(8, 5, false, true);\n}\n"
  },
  {
    "path": "storage-proofs-porep/tests/stacked_circuit.rs",
    "content": "use bellperson::{\n    bls::{Bls12, Fr},\n    util_cs::{metric_cs::MetricCS, test_cs::TestConstraintSystem},\n    Circuit, ConstraintSystem,\n};\nuse ff::Field;\nuse filecoin_hashers::{poseidon::PoseidonHasher, sha256::Sha256Hasher, Hasher};\nuse fr32::fr_into_bytes;\nuse generic_array::typenum::{U0, U2, U4, U8};\nuse merkletree::store::StoreConfig;\nuse rand::{Rng, SeedableRng};\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    cache_key::CacheKey,\n    compound_proof::CompoundProof,\n    drgraph::BASE_DEGREE,\n    merkle::{get_base_tree_count, DiskTree, MerkleTreeTrait},\n    proof::ProofScheme,\n    test_helper::setup_replica,\n    util::default_rows_to_discard,\n    TEST_SEED,\n};\nuse storage_proofs_porep::{\n    stacked::{\n        LayerChallenges, PrivateInputs, PublicInputs, SetupParams, StackedCompound, StackedDrg,\n        TemporaryAux, TemporaryAuxCache, BINARY_ARITY, EXP_DEGREE,\n    },\n    PoRep,\n};\nuse tempfile::tempdir;\n\n#[test]\nfn test_stacked_porep_circuit_poseidon_base_2() {\n    test_stacked_porep_circuit::<DiskTree<PoseidonHasher, U2, U0, U0>>(22, 1_206_212);\n}\n\n#[test]\nfn test_stacked_input_circuit_poseidon_base_8() {\n    test_stacked_porep_circuit::<DiskTree<PoseidonHasher, U8, U0, U0>>(22, 1_199_620);\n}\n\n#[test]\nfn test_stacked_input_circuit_poseidon_sub_8_4() {\n    test_stacked_porep_circuit::<DiskTree<PoseidonHasher, U8, U4, U0>>(22, 1_296_576);\n}\n\n#[test]\nfn test_stacked_input_circuit_poseidon_top_8_4_2() {\n    test_stacked_porep_circuit::<DiskTree<PoseidonHasher, U8, U4, U2>>(22, 1_346_982);\n}\n\nfn test_stacked_porep_circuit<Tree: MerkleTreeTrait + 'static>(\n    expected_inputs: usize,\n    expected_constraints: usize,\n) {\n    let nodes = 8 * get_base_tree_count::<Tree>();\n    let degree = BASE_DEGREE;\n    let expansion_degree = EXP_DEGREE;\n    let num_layers = 2;\n    let layer_challenges = LayerChallenges::new(num_layers, 1);\n\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let replica_id: Fr = Fr::random(rng);\n    let data: Vec<u8> = (0..nodes)\n        .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n        .collect();\n\n    // MT for original data is always named tree-d, and it will be\n    // referenced later in the process as such.\n    let cache_dir = tempdir().unwrap();\n    let config = StoreConfig::new(\n        cache_dir.path(),\n        CacheKey::CommDTree.to_string(),\n        default_rows_to_discard(nodes, BINARY_ARITY),\n    );\n\n    // Generate a replica path.\n    let replica_path = cache_dir.path().join(\"replica-path\");\n    let mut mmapped_data = setup_replica(&data, &replica_path);\n\n    let arbitrary_porep_id = [44; 32];\n    let sp = SetupParams {\n        nodes,\n        degree,\n        expansion_degree,\n        porep_id: arbitrary_porep_id,\n        layer_challenges,\n        api_version: ApiVersion::V1_1_0,\n    };\n\n    let pp = StackedDrg::<Tree, Sha256Hasher>::setup(&sp).expect(\"setup failed\");\n    let (tau, (p_aux, t_aux)) = StackedDrg::<Tree, Sha256Hasher>::replicate(\n        &pp,\n        &replica_id.into(),\n        (mmapped_data.as_mut()).into(),\n        None,\n        config,\n        replica_path.clone(),\n    )\n    .expect(\"replication failed\");\n\n    let mut copied = vec![0; data.len()];\n    copied.copy_from_slice(&mmapped_data);\n    assert_ne!(data, copied, \"replication did not change data\");\n\n    let seed = rng.gen();\n    let pub_inputs =\n        PublicInputs::<<Tree::Hasher as Hasher>::Domain, <Sha256Hasher as Hasher>::Domain> {\n            replica_id: replica_id.into(),\n            seed,\n            tau: Some(tau),\n            k: None,\n        };\n\n    // Store copy of original t_aux for later resource deletion.\n    let t_aux_orig = t_aux.clone();\n\n    // Convert TemporaryAux to TemporaryAuxCache, which instantiates all\n    // elements based on the configs stored in TemporaryAux.\n    let t_aux = TemporaryAuxCache::<Tree, Sha256Hasher>::new(&t_aux, replica_path)\n        .expect(\"failed to restore contents of t_aux\");\n\n    let priv_inputs = PrivateInputs::<Tree, Sha256Hasher> { p_aux, t_aux };\n\n    let proofs =\n        StackedDrg::<Tree, Sha256Hasher>::prove_all_partitions(&pp, &pub_inputs, &priv_inputs, 1)\n            .expect(\"failed to generate partition proofs\");\n\n    let proofs_are_valid =\n        StackedDrg::<Tree, Sha256Hasher>::verify_all_partitions(&pp, &pub_inputs, &proofs)\n            .expect(\"failed while trying to verify partition proofs\");\n\n    assert!(proofs_are_valid);\n\n    // Discard cached MTs that are no longer needed.\n    TemporaryAux::<Tree, Sha256Hasher>::clear_temp(t_aux_orig).expect(\"t_aux delete failed\");\n\n    {\n        // Verify that MetricCS returns the same metrics as TestConstraintSystem.\n        let mut cs = MetricCS::<Bls12>::new();\n\n        StackedCompound::<Tree, Sha256Hasher>::circuit(&pub_inputs, (), &proofs[0], &pp, None)\n            .expect(\"circuit failed\")\n            .synthesize(&mut cs.namespace(|| \"stacked drgporep\"))\n            .expect(\"failed to synthesize circuit\");\n\n        assert_eq!(cs.num_inputs(), expected_inputs, \"wrong number of inputs\");\n        assert_eq!(\n            cs.num_constraints(),\n            expected_constraints,\n            \"wrong number of constraints\"\n        );\n    }\n    let mut cs = TestConstraintSystem::<Bls12>::new();\n\n    StackedCompound::<Tree, Sha256Hasher>::circuit(&pub_inputs, (), &proofs[0], &pp, None)\n        .expect(\"circuit failed\")\n        .synthesize(&mut cs.namespace(|| \"stacked drgporep\"))\n        .expect(\"failed to synthesize circuit\");\n\n    assert!(cs.is_satisfied(), \"constraints not satisfied\");\n    assert_eq!(cs.num_inputs(), expected_inputs, \"wrong number of inputs\");\n    assert_eq!(\n        cs.num_constraints(),\n        expected_constraints,\n        \"wrong number of constraints\"\n    );\n\n    assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n\n    let generated_inputs = <StackedCompound<Tree, Sha256Hasher> as CompoundProof<\n        StackedDrg<'_, Tree, Sha256Hasher>,\n        _,\n    >>::generate_public_inputs(&pub_inputs, &pp, None)\n    .expect(\"failed to generate public inputs\");\n    let expected_inputs = cs.get_inputs();\n\n    for ((input, label), generated_input) in\n        expected_inputs.iter().skip(1).zip(generated_inputs.iter())\n    {\n        assert_eq!(input, generated_input, \"{}\", label);\n    }\n\n    assert_eq!(\n        generated_inputs.len(),\n        expected_inputs.len() - 1,\n        \"inputs are not the same length\"\n    );\n\n    cache_dir.close().expect(\"Failed to remove cache dir\");\n}\n"
  },
  {
    "path": "storage-proofs-porep/tests/stacked_compound.rs",
    "content": "use bellperson::{\n    bls::Fr,\n    util_cs::{metric_cs::MetricCS, test_cs::TestConstraintSystem},\n    Circuit,\n};\nuse ff::Field;\nuse filecoin_hashers::{poseidon::PoseidonHasher, sha256::Sha256Hasher, Hasher};\nuse fr32::fr_into_bytes;\nuse generic_array::typenum::{U0, U2, U4, U8};\nuse merkletree::store::StoreConfig;\nuse rand::{Rng, SeedableRng};\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    cache_key::CacheKey,\n    compound_proof::{self, CompoundProof},\n    drgraph::BASE_DEGREE,\n    merkle::{get_base_tree_count, DiskTree, MerkleTreeTrait},\n    test_helper::setup_replica,\n    util::default_rows_to_discard,\n    TEST_SEED,\n};\nuse storage_proofs_porep::{\n    stacked::{\n        ChallengeRequirements, LayerChallenges, PrivateInputs, PublicInputs, SetupParams,\n        StackedCompound, StackedDrg, TemporaryAux, TemporaryAuxCache, BINARY_ARITY, EXP_DEGREE,\n    },\n    PoRep,\n};\nuse tempfile::tempdir;\n\n#[test]\n#[ignore]\nfn test_stacked_compound_poseidon_base_8() {\n    test_stacked_compound::<DiskTree<PoseidonHasher, U8, U0, U0>>();\n}\n\n#[test]\n#[ignore]\nfn test_stacked_compound_poseidon_sub_8_4() {\n    test_stacked_compound::<DiskTree<PoseidonHasher, U8, U4, U0>>();\n}\n\n#[test]\n#[ignore]\nfn test_stacked_compound_poseidon_top_8_4_2() {\n    test_stacked_compound::<DiskTree<PoseidonHasher, U8, U4, U2>>();\n}\n\nfn test_stacked_compound<Tree: 'static + MerkleTreeTrait>() {\n    let nodes = 8 * get_base_tree_count::<Tree>();\n\n    let degree = BASE_DEGREE;\n    let expansion_degree = EXP_DEGREE;\n    let num_layers = 2;\n    let layer_challenges = LayerChallenges::new(num_layers, 1);\n    let partition_count = 1;\n\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let replica_id: Fr = Fr::random(rng);\n    let data: Vec<u8> = (0..nodes)\n        .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n        .collect();\n\n    let arbitrary_porep_id = [55; 32];\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params: SetupParams {\n            nodes,\n            degree,\n            expansion_degree,\n            porep_id: arbitrary_porep_id,\n            layer_challenges,\n            api_version: ApiVersion::V1_1_0,\n        },\n        partitions: Some(partition_count),\n        priority: false,\n    };\n\n    // MT for original data is always named tree-d, and it will be\n    // referenced later in the process as such.\n    let cache_dir = tempdir().unwrap();\n    let config = StoreConfig::new(\n        cache_dir.path(),\n        CacheKey::CommDTree.to_string(),\n        default_rows_to_discard(nodes, BINARY_ARITY),\n    );\n\n    // Generate a replica path.\n    let replica_path = cache_dir.path().join(\"replica-path\");\n    let mut mmapped_data = setup_replica(&data, &replica_path);\n\n    let public_params = StackedCompound::setup(&setup_params).expect(\"setup failed\");\n    let (tau, (p_aux, t_aux)) = StackedDrg::<Tree, _>::replicate(\n        &public_params.vanilla_params,\n        &replica_id.into(),\n        (mmapped_data.as_mut()).into(),\n        None,\n        config,\n        replica_path.clone(),\n    )\n    .expect(\"replication failed\");\n\n    let mut copied = vec![0; data.len()];\n    copied.copy_from_slice(&mmapped_data);\n    assert_ne!(data, copied, \"replication did not change data\");\n\n    let seed = rng.gen();\n    let public_inputs =\n        PublicInputs::<<Tree::Hasher as Hasher>::Domain, <Sha256Hasher as Hasher>::Domain> {\n            replica_id: replica_id.into(),\n            seed,\n            tau: Some(tau),\n            k: None,\n        };\n\n    // Store a copy of the t_aux for later resource deletion.\n    let t_aux_orig = t_aux.clone();\n\n    // Convert TemporaryAux to TemporaryAuxCache, which instantiates all\n    // elements based on the configs stored in TemporaryAux.\n    let t_aux = TemporaryAuxCache::<Tree, _>::new(&t_aux, replica_path)\n        .expect(\"failed to restore contents of t_aux\");\n\n    let private_inputs = PrivateInputs::<Tree, Sha256Hasher> { p_aux, t_aux };\n\n    {\n        let (circuit, inputs) =\n            StackedCompound::circuit_for_test(&public_params, &public_inputs, &private_inputs)\n                .unwrap();\n\n        let mut cs = TestConstraintSystem::new();\n\n        circuit.synthesize(&mut cs).expect(\"failed to synthesize\");\n\n        if !cs.is_satisfied() {\n            panic!(\n                \"failed to satisfy: {:?}\",\n                cs.which_is_unsatisfied().unwrap()\n            );\n        }\n        assert!(\n            cs.verify(&inputs),\n            \"verification failed with TestContraintSystem and generated inputs\"\n        );\n    }\n\n    // Use this to debug differences between blank and regular circuit generation.\n    {\n        let (circuit1, _inputs) =\n            StackedCompound::circuit_for_test(&public_params, &public_inputs, &private_inputs)\n                .unwrap();\n        let blank_circuit = <StackedCompound<Tree, Sha256Hasher> as CompoundProof<\n            StackedDrg<'_, Tree, Sha256Hasher>,\n            _,\n        >>::blank_circuit(&public_params.vanilla_params);\n\n        let mut cs_blank = MetricCS::new();\n        blank_circuit\n            .synthesize(&mut cs_blank)\n            .expect(\"failed to synthesize\");\n\n        let a = cs_blank.pretty_print_list();\n\n        let mut cs1 = TestConstraintSystem::new();\n        circuit1.synthesize(&mut cs1).expect(\"failed to synthesize\");\n        let b = cs1.pretty_print_list();\n\n        for (i, (a, b)) in a.chunks(100).zip(b.chunks(100)).enumerate() {\n            assert_eq!(a, b, \"failed at chunk {}\", i);\n        }\n    }\n\n    let blank_groth_params = <StackedCompound<Tree, Sha256Hasher> as CompoundProof<\n        StackedDrg<'_, Tree, Sha256Hasher>,\n        _,\n    >>::groth_params(Some(rng), &public_params.vanilla_params)\n    .expect(\"failed to generate groth params\");\n\n    // Discard cached MTs that are no longer needed.\n    TemporaryAux::<Tree, Sha256Hasher>::clear_temp(t_aux_orig).expect(\"t_aux delete failed\");\n\n    let proof = StackedCompound::prove(\n        &public_params,\n        &public_inputs,\n        &private_inputs,\n        &blank_groth_params,\n    )\n    .expect(\"failed while proving\");\n\n    let verified = StackedCompound::verify(\n        &public_params,\n        &public_inputs,\n        &proof,\n        &ChallengeRequirements {\n            minimum_challenges: 1,\n        },\n    )\n    .expect(\"failed while verifying\");\n\n    assert!(verified);\n\n    cache_dir.close().expect(\"Failed to remove cache dir\");\n}\n"
  },
  {
    "path": "storage-proofs-porep/tests/stacked_vanilla.rs",
    "content": "use std::fs::remove_file;\n\nuse bellperson::bls::{Fr, FrRepr};\nuse ff::{Field, PrimeField};\nuse filecoin_hashers::{\n    blake2s::Blake2sHasher, poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain, Hasher,\n};\nuse fr32::fr_into_bytes;\nuse generic_array::typenum::{U0, U2, U4, U8};\nuse glob::glob;\nuse merkletree::store::{Store, StoreConfig};\nuse rand::{Rng, SeedableRng};\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    cache_key::CacheKey,\n    drgraph::BASE_DEGREE,\n    merkle::{get_base_tree_count, DiskTree, MerkleTreeTrait},\n    proof::ProofScheme,\n    table_tests,\n    test_helper::setup_replica,\n    util::{default_rows_to_discard, NODE_SIZE},\n    TEST_SEED,\n};\nuse storage_proofs_porep::{\n    stacked::{\n        LayerChallenges, PrivateInputs, PublicInputs, SetupParams, StackedBucketGraph, StackedDrg,\n        TemporaryAux, TemporaryAuxCache, BINARY_ARITY, EXP_DEGREE,\n    },\n    PoRep,\n};\nuse tempfile::tempdir;\n\nconst DEFAULT_STACKED_LAYERS: usize = 11;\n\n#[test]\nfn test_stacked_porep_extract_all_sha256_base_8() {\n    test_extract_all::<DiskTree<Sha256Hasher, U8, U0, U0>>();\n}\n\n#[test]\nfn test_stacked_porep_extract_all_sha256_sub_8_8() {\n    test_extract_all::<DiskTree<Sha256Hasher, U8, U8, U0>>();\n}\n\n#[test]\nfn test_stacked_porep_extract_all_sha256_top_8_8_2() {\n    test_extract_all::<DiskTree<Sha256Hasher, U8, U8, U2>>();\n}\n\n#[test]\nfn test_stacked_porep_extract_all_blake2s_base_8() {\n    test_extract_all::<DiskTree<Blake2sHasher, U8, U0, U0>>();\n}\n\n#[test]\nfn test_stacked_porep_extract_all_blake2s_sub_8_8() {\n    test_extract_all::<DiskTree<Blake2sHasher, U8, U8, U0>>();\n}\n\n#[test]\nfn test_stacked_porep_extract_all_blake2s_top_8_8_2() {\n    test_extract_all::<DiskTree<Blake2sHasher, U8, U8, U2>>();\n}\n\n#[test]\nfn test_stacked_porep_extract_all_poseidon_base_8() {\n    test_extract_all::<DiskTree<PoseidonHasher, U8, U0, U0>>();\n}\n\n#[test]\nfn test_stacked_porep_extract_all_poseidon_sub_8_2() {\n    test_extract_all::<DiskTree<PoseidonHasher, U8, U2, U0>>();\n}\n\n#[test]\nfn test_stacked_porep_extract_all_poseidon_top_8_8_2() {\n    test_extract_all::<DiskTree<PoseidonHasher, U8, U8, U2>>();\n}\n\nfn test_extract_all<Tree: 'static + MerkleTreeTrait>() {\n    // pretty_env_logger::try_init();\n\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n    let replica_id: <Tree::Hasher as Hasher>::Domain =\n        <Tree::Hasher as Hasher>::Domain::random(rng);\n    let nodes = 64 * get_base_tree_count::<Tree>();\n\n    let data: Vec<u8> = (0..nodes)\n        .flat_map(|_| {\n            let v = <Tree::Hasher as Hasher>::Domain::random(rng);\n            v.into_bytes()\n        })\n        .collect();\n\n    // MT for original data is always named tree-d, and it will be\n    // referenced later in the process as such.\n    let cache_dir = tempdir().expect(\"tempdir failure\");\n    let config = StoreConfig::new(\n        cache_dir.path(),\n        CacheKey::CommDTree.to_string(),\n        default_rows_to_discard(nodes, BINARY_ARITY),\n    );\n\n    // Generate a replica path.\n    let replica_path = cache_dir.path().join(\"replica-path\");\n    let mut mmapped_data = setup_replica(&data, &replica_path);\n\n    let layer_challenges = LayerChallenges::new(DEFAULT_STACKED_LAYERS, 5);\n\n    let sp = SetupParams {\n        nodes,\n        degree: BASE_DEGREE,\n        expansion_degree: EXP_DEGREE,\n        porep_id: [32; 32],\n        layer_challenges: layer_challenges.clone(),\n        api_version: ApiVersion::V1_1_0,\n    };\n\n    let pp = StackedDrg::<Tree, Blake2sHasher>::setup(&sp).expect(\"setup failed\");\n\n    StackedDrg::<Tree, Blake2sHasher>::replicate(\n        &pp,\n        &replica_id,\n        (mmapped_data.as_mut()).into(),\n        None,\n        config.clone(),\n        replica_path.clone(),\n    )\n    .expect(\"replication failed\");\n\n    // The layers are still in the cache dir, so rerunning the label generation should\n    // not do any work.\n\n    let (_, label_states) = StackedDrg::<Tree, Blake2sHasher>::generate_labels_for_encoding(\n        &pp.graph,\n        &layer_challenges,\n        &replica_id,\n        config.clone(),\n    )\n    .expect(\"label generation failed\");\n    for state in &label_states {\n        assert!(state.generated);\n    }\n    // delete last 2 layers\n    let off = label_states.len() - 3;\n    for label_state in &label_states[off..] {\n        let config = &label_state.config;\n        let data_path = StoreConfig::data_path(&config.path, &config.id);\n        remove_file(data_path).expect(\"failed to delete layer cache\");\n    }\n\n    let (_, label_states) = StackedDrg::<Tree, Blake2sHasher>::generate_labels_for_encoding(\n        &pp.graph,\n        &layer_challenges,\n        &replica_id,\n        config.clone(),\n    )\n    .expect(\"label generation failed\");\n    for state in &label_states[..off] {\n        assert!(state.generated);\n    }\n    for state in &label_states[off..] {\n        assert!(!state.generated);\n    }\n\n    assert_ne!(data, &mmapped_data[..], \"replication did not change data\");\n\n    StackedDrg::<Tree, Blake2sHasher>::extract_all(\n        &pp,\n        &replica_id,\n        mmapped_data.as_mut(),\n        Some(config),\n    )\n    .expect(\"failed to extract data\");\n\n    assert_eq!(data, mmapped_data.as_ref());\n\n    cache_dir.close().expect(\"Failed to remove cache dir\");\n}\n\n#[test]\nfn test_stacked_porep_resume_seal() {\n    // pretty_env_logger::try_init().ok();\n\n    type Tree = DiskTree<PoseidonHasher, U8, U8, U2>;\n\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n    let replica_id = <PoseidonHasher as Hasher>::Domain::random(rng);\n    let nodes = 64 * get_base_tree_count::<Tree>();\n\n    let data: Vec<u8> = (0..nodes)\n        .flat_map(|_| {\n            let v = <PoseidonHasher as Hasher>::Domain::random(rng);\n            v.into_bytes()\n        })\n        .collect();\n\n    // MT for original data is always named tree-d, and it will be\n    // referenced later in the process as such.\n    let cache_dir = tempdir().expect(\"tempdir failure\");\n    let config = StoreConfig::new(\n        cache_dir.path(),\n        CacheKey::CommDTree.to_string(),\n        default_rows_to_discard(nodes, BINARY_ARITY),\n    );\n\n    // Generate a replica path.\n    let replica_path1 = cache_dir.path().join(\"replica-path-1\");\n    let replica_path2 = cache_dir.path().join(\"replica-path-2\");\n    let replica_path3 = cache_dir.path().join(\"replica-path-3\");\n    let mut mmapped_data1 = setup_replica(&data, &replica_path1);\n    let mut mmapped_data2 = setup_replica(&data, &replica_path2);\n    let mut mmapped_data3 = setup_replica(&data, &replica_path3);\n\n    let layer_challenges = LayerChallenges::new(DEFAULT_STACKED_LAYERS, 5);\n\n    let sp = SetupParams {\n        nodes,\n        degree: BASE_DEGREE,\n        expansion_degree: EXP_DEGREE,\n        porep_id: [32; 32],\n        layer_challenges: layer_challenges.clone(),\n        api_version: ApiVersion::V1_1_0,\n    };\n\n    let pp = StackedDrg::<Tree, Blake2sHasher>::setup(&sp).expect(\"setup failed\");\n\n    let clear_temp = || {\n        for entry in glob(&(cache_dir.path().to_string_lossy() + \"/*.dat\")).unwrap() {\n            let entry = entry.unwrap();\n            if entry.is_file() {\n                // delete everything except the data-layers\n                if !entry.to_string_lossy().contains(\"data-layer\") {\n                    remove_file(entry).unwrap();\n                }\n            }\n        }\n    };\n\n    // first replicaton\n    StackedDrg::<Tree, Blake2sHasher>::replicate(\n        &pp,\n        &replica_id,\n        (mmapped_data1.as_mut()).into(),\n        None,\n        config.clone(),\n        replica_path1.clone(),\n    )\n    .expect(\"replication failed 1\");\n    clear_temp();\n\n    // replicate a second time\n    StackedDrg::<Tree, Blake2sHasher>::replicate(\n        &pp,\n        &replica_id,\n        (mmapped_data2.as_mut()).into(),\n        None,\n        config.clone(),\n        replica_path2.clone(),\n    )\n    .expect(\"replication failed 2\");\n    clear_temp();\n\n    // delete last 2 layers\n    let (_, label_states) = StackedDrg::<Tree, Blake2sHasher>::generate_labels_for_encoding(\n        &pp.graph,\n        &layer_challenges,\n        &replica_id,\n        config.clone(),\n    )\n    .expect(\"label generation failed\");\n    let off = label_states.len() - 3;\n    for label_state in &label_states[off..] {\n        let config = &label_state.config;\n        let data_path = StoreConfig::data_path(&config.path, &config.id);\n        remove_file(data_path).expect(\"failed to delete layer cache\");\n    }\n\n    // replicate a third time\n    StackedDrg::<Tree, Blake2sHasher>::replicate(\n        &pp,\n        &replica_id,\n        (mmapped_data3.as_mut()).into(),\n        None,\n        config.clone(),\n        replica_path3.clone(),\n    )\n    .expect(\"replication failed 3\");\n    clear_temp();\n\n    assert_ne!(data, &mmapped_data1[..], \"replication did not change data\");\n\n    assert_eq!(&mmapped_data1[..], &mmapped_data2[..]);\n    assert_eq!(&mmapped_data2[..], &mmapped_data3[..]);\n\n    StackedDrg::<Tree, Blake2sHasher>::extract_all(\n        &pp,\n        &replica_id,\n        mmapped_data1.as_mut(),\n        Some(config),\n    )\n    .expect(\"failed to extract data\");\n\n    assert_eq!(data, mmapped_data1.as_ref());\n\n    cache_dir.close().expect(\"Failed to remove cache dir\");\n}\n\ntable_tests! {\n    test_prove_verify_fixed {\n       test_stacked_porep_prove_verify(64);\n    }\n}\n\nfn test_prove_verify_fixed(n: usize) {\n    let challenges = LayerChallenges::new(DEFAULT_STACKED_LAYERS, 5);\n\n    test_prove_verify::<DiskTree<Sha256Hasher, U8, U0, U0>>(n, challenges.clone());\n    test_prove_verify::<DiskTree<Sha256Hasher, U8, U2, U0>>(n, challenges.clone());\n    test_prove_verify::<DiskTree<Sha256Hasher, U8, U8, U2>>(n, challenges.clone());\n\n    test_prove_verify::<DiskTree<Sha256Hasher, U4, U0, U0>>(n, challenges.clone());\n    test_prove_verify::<DiskTree<Sha256Hasher, U4, U2, U0>>(n, challenges.clone());\n    test_prove_verify::<DiskTree<Sha256Hasher, U4, U8, U2>>(n, challenges.clone());\n\n    test_prove_verify::<DiskTree<Blake2sHasher, U4, U0, U0>>(n, challenges.clone());\n    test_prove_verify::<DiskTree<Blake2sHasher, U4, U2, U0>>(n, challenges.clone());\n    test_prove_verify::<DiskTree<Blake2sHasher, U4, U8, U2>>(n, challenges.clone());\n\n    test_prove_verify::<DiskTree<Blake2sHasher, U8, U0, U0>>(n, challenges.clone());\n    test_prove_verify::<DiskTree<Blake2sHasher, U8, U2, U0>>(n, challenges.clone());\n    test_prove_verify::<DiskTree<Blake2sHasher, U8, U8, U2>>(n, challenges.clone());\n\n    test_prove_verify::<DiskTree<PoseidonHasher, U4, U0, U0>>(n, challenges.clone());\n    test_prove_verify::<DiskTree<PoseidonHasher, U4, U2, U0>>(n, challenges.clone());\n    test_prove_verify::<DiskTree<PoseidonHasher, U4, U8, U2>>(n, challenges.clone());\n\n    test_prove_verify::<DiskTree<PoseidonHasher, U8, U0, U0>>(n, challenges.clone());\n    test_prove_verify::<DiskTree<PoseidonHasher, U8, U2, U0>>(n, challenges.clone());\n    test_prove_verify::<DiskTree<PoseidonHasher, U8, U8, U2>>(n, challenges);\n}\n\nfn test_prove_verify<Tree: 'static + MerkleTreeTrait>(n: usize, challenges: LayerChallenges) {\n    // This will be called multiple times, only the first one succeeds, and that is ok.\n    // femme::pretty::Logger::new()\n    //     .start(log::LevelFilter::Trace)\n    //     .ok();\n\n    let nodes = n * get_base_tree_count::<Tree>();\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let degree = BASE_DEGREE;\n    let expansion_degree = EXP_DEGREE;\n    let replica_id: <Tree::Hasher as Hasher>::Domain =\n        <Tree::Hasher as Hasher>::Domain::random(rng);\n    let data: Vec<u8> = (0..nodes)\n        .flat_map(|_| fr_into_bytes(&Fr::random(rng)))\n        .collect();\n\n    // MT for original data is always named tree-d, and it will be\n    // referenced later in the process as such.\n    let cache_dir = tempdir().expect(\"tempdir failure\");\n    let config = StoreConfig::new(\n        cache_dir.path(),\n        CacheKey::CommDTree.to_string(),\n        default_rows_to_discard(nodes, BINARY_ARITY),\n    );\n\n    // Generate a replica path.\n    let replica_path = cache_dir.path().join(\"replica-path\");\n    let mut mmapped_data = setup_replica(&data, &replica_path);\n\n    let partitions = 2;\n\n    let arbitrary_porep_id = [92; 32];\n    let sp = SetupParams {\n        nodes,\n        degree,\n        expansion_degree,\n        porep_id: arbitrary_porep_id,\n        layer_challenges: challenges,\n        api_version: ApiVersion::V1_1_0,\n    };\n\n    let pp = StackedDrg::<Tree, Blake2sHasher>::setup(&sp).expect(\"setup failed\");\n    let (tau, (p_aux, t_aux)) = StackedDrg::<Tree, Blake2sHasher>::replicate(\n        &pp,\n        &replica_id,\n        (mmapped_data.as_mut()).into(),\n        None,\n        config,\n        replica_path.clone(),\n    )\n    .expect(\"replication failed\");\n\n    let mut copied = vec![0; data.len()];\n    copied.copy_from_slice(&mmapped_data);\n    assert_ne!(data, copied, \"replication did not change data\");\n\n    let seed = rng.gen();\n    let pub_inputs =\n        PublicInputs::<<Tree::Hasher as Hasher>::Domain, <Blake2sHasher as Hasher>::Domain> {\n            replica_id,\n            seed,\n            tau: Some(tau),\n            k: None,\n        };\n\n    // Store a copy of the t_aux for later resource deletion.\n    let t_aux_orig = t_aux.clone();\n\n    // Convert TemporaryAux to TemporaryAuxCache, which instantiates all\n    // elements based on the configs stored in TemporaryAux.\n    let t_aux = TemporaryAuxCache::<Tree, Blake2sHasher>::new(&t_aux, replica_path)\n        .expect(\"failed to restore contents of t_aux\");\n\n    let priv_inputs = PrivateInputs { p_aux, t_aux };\n\n    let all_partition_proofs = &StackedDrg::<Tree, Blake2sHasher>::prove_all_partitions(\n        &pp,\n        &pub_inputs,\n        &priv_inputs,\n        partitions,\n    )\n    .expect(\"failed to generate partition proofs\");\n\n    let proofs_are_valid = StackedDrg::<Tree, Blake2sHasher>::verify_all_partitions(\n        &pp,\n        &pub_inputs,\n        all_partition_proofs,\n    )\n    .expect(\"failed to verify partition proofs\");\n\n    // Discard cached MTs that are no longer needed.\n    TemporaryAux::<Tree, Blake2sHasher>::clear_temp(t_aux_orig).expect(\"t_aux delete failed\");\n\n    assert!(proofs_are_valid);\n\n    cache_dir.close().expect(\"Failed to remove cache dir\");\n}\n\n// We are seeing a bug, in which setup never terminates for some sector sizes. This test is to\n// debug that and should remain as a regression test.\n#[test]\nfn test_stacked_porep_setup_terminates() {\n    let degree = BASE_DEGREE;\n    let expansion_degree = EXP_DEGREE;\n    let nodes = 1024 * 1024 * 32 * 8; // This corresponds to 8GiB sectors (32-byte nodes)\n    let layer_challenges = LayerChallenges::new(10, 333);\n    let sp = SetupParams {\n        nodes,\n        degree,\n        expansion_degree,\n        porep_id: [32; 32],\n        layer_challenges,\n        api_version: ApiVersion::V1_1_0,\n    };\n\n    // When this fails, the call to setup should panic, but seems to actually hang (i.e. neither return nor panic) for some reason.\n    // When working as designed, the call to setup returns without error.\n    let _pp = StackedDrg::<DiskTree<Sha256Hasher, U8, U0, U0>, Blake2sHasher>::setup(&sp)\n        .expect(\"setup failed\");\n}\n\n#[test]\nfn test_stacked_porep_generate_labels() {\n    let layers = 11;\n    let nodes_2k = 1 << 11;\n    let nodes_4k = 1 << 12;\n    let replica_id = [9u8; 32];\n    let legacy_porep_id = [0; 32];\n    let porep_id = [123; 32];\n    test_generate_labels_aux(\n        nodes_2k,\n        layers,\n        replica_id,\n        legacy_porep_id,\n        ApiVersion::V1_0_0,\n        Fr::from_repr(FrRepr([\n            0xd3faa96b9a0fba04,\n            0xea81a283d106485e,\n            0xe3d51b9afa5ac2b3,\n            0x0462f4f4f1a68d37,\n        ]))\n        .unwrap(),\n    );\n\n    test_generate_labels_aux(\n        nodes_4k,\n        layers,\n        replica_id,\n        legacy_porep_id,\n        ApiVersion::V1_0_0,\n        Fr::from_repr(FrRepr([\n            0x7e191e52c4a8da86,\n            0x5ae8a1c9e6fac148,\n            0xce239f3b88a894b8,\n            0x234c00d1dc1d53be,\n        ]))\n        .unwrap(),\n    );\n\n    test_generate_labels_aux(\n        nodes_2k,\n        layers,\n        replica_id,\n        porep_id,\n        ApiVersion::V1_1_0,\n        Fr::from_repr(FrRepr([\n            0xabb3f38bb70defcf,\n            0x777a2e4d7769119f,\n            0x3448959d495490bc,\n            0x06021188c7a71cb5,\n        ]))\n        .unwrap(),\n    );\n\n    test_generate_labels_aux(\n        nodes_4k,\n        layers,\n        replica_id,\n        porep_id,\n        ApiVersion::V1_1_0,\n        Fr::from_repr(FrRepr([\n            0x22ab81cf68c4676d,\n            0x7a77a82fc7c9c189,\n            0xc6c03d32c1e42d23,\n            0x0f777c18cc2c55bd,\n        ]))\n        .unwrap(),\n    );\n}\n\nfn test_generate_labels_aux(\n    sector_size: usize,\n    layers: usize,\n    replica_id: [u8; 32],\n    porep_id: [u8; 32],\n    api_version: ApiVersion,\n    expected_last_label: Fr,\n) {\n    let nodes = sector_size / NODE_SIZE;\n\n    let cache_dir = tempdir().expect(\"tempdir failure\");\n    let config = StoreConfig::new(\n        cache_dir.path(),\n        CacheKey::CommDTree.to_string(),\n        nodes.trailing_zeros() as usize,\n    );\n\n    let graph = StackedBucketGraph::<PoseidonHasher>::new(\n        None,\n        nodes,\n        BASE_DEGREE,\n        EXP_DEGREE,\n        porep_id,\n        api_version,\n    )\n    .unwrap();\n\n    let unused_layer_challenges = LayerChallenges::new(layers, 0);\n\n    let labels = StackedDrg::<\n        // Although not generally correct for every size, the hasher shape is not used,\n        // so for purposes of testing label creation, it is safe to supply a dummy.\n        DiskTree<PoseidonHasher, U8, U8, U2>,\n        Sha256Hasher,\n    >::generate_labels_for_decoding(\n        &graph,\n        &unused_layer_challenges,\n        &<PoseidonHasher as Hasher>::Domain::try_from_bytes(&replica_id).unwrap(),\n        config,\n    )\n    .unwrap();\n\n    let final_labels = labels.labels_for_last_layer().unwrap();\n    let last_label = final_labels.read_at(nodes - 1).unwrap();\n\n    assert_eq!(expected_last_label.into_repr(), last_label.0);\n}\n"
  },
  {
    "path": "storage-proofs-post/Cargo.toml",
    "content": "[package]\nname = \"storage-proofs-post\"\nversion = \"7.0.1\"\nauthors = [\"dignifiedquire <me@dignifiedquire.com>\"]\nlicense = \"MIT OR Apache-2.0\"\ndescription = \"Proofs of Space Time\"\nedition = \"2018\"\nrepository = \"https://github.com/filecoin-project/rust-fil-proofs\"\nreadme = \"README.md\"\n\n[dependencies]\nstorage-proofs-core = { path = \"../storage-proofs-core\", version = \"^7.0.0\", default-features = false}\nfilecoin-hashers = { path = \"../filecoin-hashers\", version = \"^2.0.0\", default-features = false, features = [\"poseidon\", \"sha256\"]}\nrand = \"0.7\"\nmerkletree = \"0.21.0\"\nbyteorder = \"1\"\ncrossbeam = \"0.8\"\nsha2 = \"0.9.1\"\nrayon = \"1.0.0\"\nserde = { version = \"1.0\", features = [\"derive\"]}\nblake2b_simd = \"0.5\"\nblake2s_simd = \"0.5\"\nff = { version = \"0.2.3\", package = \"fff\" }\nbellperson = { version = \"0.13\", default-features = false }\nlog = \"0.4.7\"\nhex = \"0.4.0\"\ngeneric-array = \"0.14.4\"\nanyhow = \"1.0.23\"\nneptune = { version = \"2.2.0\", default-features = false }\nnum_cpus = \"1.10.1\"\nfr32 = { path = \"../fr32\", version = \"^0.2.0\", default-features = false }\n\n[dev-dependencies]\ntempfile = \"3\"\npretty_assertions = \"0.6.1\"\nrand_xorshift = \"0.2.0\"\nfilecoin-hashers = { path = \"../filecoin-hashers\", version = \"^2.0.0\", default-features = false, features = [\"poseidon\", \"sha256\", \"blake2s\"]}\n\n[features]\ndefault = [\"pairing\", \"gpu\"]\ngpu = [\"storage-proofs-core/gpu\", \"filecoin-hashers/gpu\", \"fr32/gpu\", \"neptune/opencl\"]\npairing = [\"storage-proofs-core/pairing\", \"bellperson/pairing\", \"neptune/pairing\", \"filecoin-hashers/pairing\", \"fr32/pairing\"]\nblst = [\"storage-proofs-core/blst\", \"bellperson/blst\", \"neptune/blst\", \"filecoin-hashers/blst\", \"fr32/blst\"]\n"
  },
  {
    "path": "storage-proofs-post/README.md",
    "content": "# Storage Proofs PoSt\n\n## License\n\nMIT or Apache 2.0\n"
  },
  {
    "path": "storage-proofs-post/src/election/circuit.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellperson::{\n    bls::{Bls12, Fr},\n    gadgets::num::AllocatedNum,\n    Circuit, ConstraintSystem, SynthesisError,\n};\nuse ff::Field;\nuse filecoin_hashers::{poseidon::PoseidonFunction, HashFunction, Hasher, PoseidonMDArity};\nuse generic_array::typenum::Unsigned;\nuse storage_proofs_core::{\n    compound_proof::CircuitComponent,\n    gadgets::{constraint, por::PoRCircuit, variables::Root},\n    merkle::MerkleTreeTrait,\n};\n\n/// This is the `ElectionPoSt` circuit.\npub struct ElectionPoStCircuit<Tree: MerkleTreeTrait> {\n    pub comm_r: Option<Fr>,\n    pub comm_c: Option<Fr>,\n    pub comm_r_last: Option<Fr>,\n    pub leafs: Vec<Option<Fr>>,\n    #[allow(clippy::type_complexity)]\n    pub paths: Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>,\n    pub partial_ticket: Option<Fr>,\n    pub randomness: Option<Fr>,\n    pub prover_id: Option<Fr>,\n    pub sector_id: Option<Fr>,\n    pub _t: PhantomData<Tree>,\n}\n\n#[derive(Clone, Default)]\npub struct ComponentPrivateInputs {}\n\nimpl<'a, Tree: MerkleTreeTrait> CircuitComponent for ElectionPoStCircuit<Tree> {\n    type ComponentPrivateInputs = ComponentPrivateInputs;\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait> Circuit<Bls12> for ElectionPoStCircuit<Tree> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let comm_r = self.comm_r;\n        let comm_c = self.comm_c;\n        let comm_r_last = self.comm_r_last;\n        let leafs = self.leafs;\n        let paths = self.paths;\n        let partial_ticket = self.partial_ticket;\n        let randomness = self.randomness;\n        let prover_id = self.prover_id;\n        let sector_id = self.sector_id;\n\n        assert_eq!(paths.len(), leafs.len());\n\n        // 1. Verify comm_r\n\n        let comm_r_last_num = AllocatedNum::alloc(cs.namespace(|| \"comm_r_last\"), || {\n            comm_r_last\n                .map(Into::into)\n                .ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        let comm_c_num = AllocatedNum::alloc(cs.namespace(|| \"comm_c\"), || {\n            comm_c\n                .map(Into::into)\n                .ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        let comm_r_num = AllocatedNum::alloc(cs.namespace(|| \"comm_r\"), || {\n            comm_r\n                .map(Into::into)\n                .ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        comm_r_num.inputize(cs.namespace(|| \"comm_r_input\"))?;\n\n        // Verify H(Comm_C || comm_r_last) == comm_r\n        {\n            let hash_num = <Tree::Hasher as Hasher>::Function::hash2_circuit(\n                cs.namespace(|| \"H_comm_c_comm_r_last\"),\n                &comm_c_num,\n                &comm_r_last_num,\n            )?;\n\n            // Check actual equality\n            constraint::equal(\n                cs,\n                || \"enforce_comm_c_comm_r_last_hash_comm_r\",\n                &comm_r_num,\n                &hash_num,\n            );\n        }\n\n        // 2. Verify Inclusion Paths\n        for (i, (leaf, path)) in leafs.iter().zip(paths.iter()).enumerate() {\n            PoRCircuit::<Tree>::synthesize(\n                cs.namespace(|| format!(\"challenge_inclusion{}\", i)),\n                Root::Val(*leaf),\n                path.clone().into(),\n                Root::from_allocated::<CS>(comm_r_last_num.clone()),\n                true,\n            )?;\n        }\n\n        // 3. Verify partial ticket\n\n        // randomness\n        let randomness_num = AllocatedNum::alloc(cs.namespace(|| \"randomness\"), || {\n            randomness\n                .map(Into::into)\n                .ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        // prover_id\n        let prover_id_num = AllocatedNum::alloc(cs.namespace(|| \"prover_id\"), || {\n            prover_id\n                .map(Into::into)\n                .ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        // sector_id\n        let sector_id_num = AllocatedNum::alloc(cs.namespace(|| \"sector_id\"), || {\n            sector_id\n                .map(Into::into)\n                .ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        let mut partial_ticket_nums = vec![randomness_num, prover_id_num, sector_id_num];\n        for (i, leaf) in leafs.iter().enumerate() {\n            let leaf_num = AllocatedNum::alloc(cs.namespace(|| format!(\"leaf_{}\", i)), || {\n                leaf.map(Into::into)\n                    .ok_or(SynthesisError::AssignmentMissing)\n            })?;\n            partial_ticket_nums.push(leaf_num);\n        }\n\n        // pad to a multiple of md arity\n        let arity = PoseidonMDArity::to_usize();\n        while partial_ticket_nums.len() % arity != 0 {\n            partial_ticket_nums.push(AllocatedNum::alloc(\n                cs.namespace(|| format!(\"padding_{}\", partial_ticket_nums.len())),\n                || Ok(Fr::zero()),\n            )?);\n        }\n\n        // hash it\n        let partial_ticket_num = PoseidonFunction::hash_md_circuit::<_>(\n            &mut cs.namespace(|| \"partial_ticket_hash\"),\n            &partial_ticket_nums,\n        )?;\n\n        // allocate expected input\n        let expected_partial_ticket_num =\n            AllocatedNum::alloc(cs.namespace(|| \"partial_ticket\"), || {\n                partial_ticket\n                    .map(Into::into)\n                    .ok_or(SynthesisError::AssignmentMissing)\n            })?;\n\n        expected_partial_ticket_num.inputize(cs.namespace(|| \"partial_ticket_input\"))?;\n\n        // check equality\n        constraint::equal(\n            cs,\n            || \"enforce partial_ticket is correct\",\n            &partial_ticket_num,\n            &expected_partial_ticket_num,\n        );\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "storage-proofs-post/src/election/compound.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellperson::{\n    bls::{Bls12, Fr},\n    Circuit,\n};\nuse generic_array::typenum::Unsigned;\nuse storage_proofs_core::{\n    compound_proof::{CircuitComponent, CompoundProof},\n    drgraph::graph_height,\n    error::Result,\n    gadgets::por::PoRCompound,\n    merkle::MerkleTreeTrait,\n    parameter_cache::{CacheableParameters, ParameterSetMetadata},\n    por,\n    proof::ProofScheme,\n    util::NODE_SIZE,\n};\n\nuse crate::election::{generate_leaf_challenge, ElectionPoSt, ElectionPoStCircuit};\n\npub struct ElectionPoStCompound<Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    _t: PhantomData<Tree>,\n}\n\nimpl<C: Circuit<Bls12>, P: ParameterSetMetadata, Tree: MerkleTreeTrait> CacheableParameters<C, P>\n    for ElectionPoStCompound<Tree>\n{\n    fn cache_prefix() -> String {\n        format!(\"proof-of-spacetime-election-{}\", Tree::display())\n    }\n}\n\nimpl<'a, Tree> CompoundProof<'a, ElectionPoSt<'a, Tree>, ElectionPoStCircuit<Tree>>\n    for ElectionPoStCompound<Tree>\nwhere\n    Tree: 'static + MerkleTreeTrait,\n{\n    fn generate_public_inputs(\n        pub_inputs: &<ElectionPoSt<'a, Tree> as ProofScheme<'a>>::PublicInputs,\n        pub_params: &<ElectionPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Result<Vec<Fr>> {\n        let mut inputs = Vec::new();\n\n        let por_pub_params = por::PublicParams {\n            leaves: (pub_params.sector_size as usize / NODE_SIZE),\n            private: true,\n        };\n\n        // 1. Inputs for verifying comm_r = H(comm_c || comm_r_last)\n\n        inputs.push(pub_inputs.comm_r.into());\n\n        // 2. Inputs for verifying inclusion paths\n\n        for n in 0..pub_params.challenge_count {\n            let challenged_leaf_start = generate_leaf_challenge(\n                &pub_params,\n                pub_inputs.randomness,\n                pub_inputs.sector_challenge_index,\n                n as u64,\n            )?;\n            for i in 0..pub_params.challenged_nodes {\n                let por_pub_inputs = por::PublicInputs {\n                    commitment: None,\n                    challenge: challenged_leaf_start as usize + i,\n                };\n                let por_inputs = PoRCompound::<Tree>::generate_public_inputs(\n                    &por_pub_inputs,\n                    &por_pub_params,\n                    None,\n                )?;\n\n                inputs.extend(por_inputs);\n            }\n        }\n\n        // 3. Inputs for verifying partial_ticket generation\n        inputs.push(pub_inputs.partial_ticket);\n\n        Ok(inputs)\n    }\n\n    fn circuit(\n        pub_in: &<ElectionPoSt<'a, Tree> as ProofScheme<'a>>::PublicInputs,\n        _priv_in: <ElectionPoStCircuit<Tree> as CircuitComponent>::ComponentPrivateInputs,\n        vanilla_proof: &<ElectionPoSt<'a, Tree> as ProofScheme<'a>>::Proof,\n        _pub_params: &<ElectionPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Result<ElectionPoStCircuit<Tree>> {\n        let comm_r = pub_in.comm_r.into();\n        let comm_c = vanilla_proof.comm_c.into();\n        let comm_r_last = vanilla_proof.comm_r_last().into();\n\n        let leafs: Vec<_> = vanilla_proof\n            .leafs()\n            .iter()\n            .map(|c| Some((*c).into()))\n            .collect();\n\n        let paths: Vec<Vec<_>> = vanilla_proof\n            .paths()\n            .iter()\n            .map(|v| {\n                v.iter()\n                    .map(|p| {\n                        (\n                            (*p).0.iter().copied().map(Into::into).map(Some).collect(),\n                            Some(p.1),\n                        )\n                    })\n                    .collect()\n            })\n            .collect();\n\n        Ok(ElectionPoStCircuit {\n            leafs,\n            comm_r: Some(comm_r),\n            comm_c: Some(comm_c),\n            comm_r_last: Some(comm_r_last),\n            paths,\n            partial_ticket: Some(pub_in.partial_ticket),\n            randomness: Some(pub_in.randomness.into()),\n            prover_id: Some(pub_in.prover_id.into()),\n            sector_id: Some(pub_in.sector_id.into()),\n            _t: PhantomData,\n        })\n    }\n\n    fn blank_circuit(\n        pub_params: &<ElectionPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n    ) -> ElectionPoStCircuit<Tree> {\n        let challenges_count = pub_params.challenged_nodes * pub_params.challenge_count;\n        let height = graph_height::<Tree::Arity>(pub_params.sector_size as usize / NODE_SIZE);\n\n        let leafs = vec![None; challenges_count];\n        let paths = vec![\n            vec![(vec![None; Tree::Arity::to_usize() - 1], None); height - 1];\n            challenges_count\n        ];\n\n        ElectionPoStCircuit {\n            comm_r: None,\n            comm_c: None,\n            comm_r_last: None,\n            partial_ticket: None,\n            leafs,\n            paths,\n            randomness: None,\n            prover_id: None,\n            sector_id: None,\n            _t: PhantomData,\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-post/src/election/mod.rs",
    "content": "mod circuit;\nmod compound;\nmod vanilla;\n\npub use circuit::*;\npub use compound::*;\npub use vanilla::*;\n"
  },
  {
    "path": "storage-proofs-post/src/election/vanilla.rs",
    "content": "use std::collections::BTreeMap;\nuse std::fmt::{self, Debug, Formatter};\nuse std::marker::PhantomData;\n\nuse anyhow::{bail, ensure, Context};\nuse bellperson::bls::Fr;\nuse byteorder::{ByteOrder, LittleEndian};\nuse filecoin_hashers::{\n    poseidon::{PoseidonDomain, PoseidonFunction},\n    Domain, HashFunction, Hasher, PoseidonMDArity,\n};\nuse fr32::fr_into_bytes;\nuse generic_array::typenum::Unsigned;\nuse log::trace;\nuse rayon::prelude::{\n    IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator,\n};\nuse serde::{de::DeserializeOwned, Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\nuse storage_proofs_core::{\n    error::{Error, Result},\n    measurements::{measure_op, Operation},\n    merkle::{MerkleProof, MerkleProofTrait, MerkleTreeTrait, MerkleTreeWrapper},\n    parameter_cache::ParameterSetMetadata,\n    proof::{NoRequirements, ProofScheme},\n    sector::{OrderedSectorSet, SectorId},\n    util::NODE_SIZE,\n};\n\n#[derive(Debug, Clone)]\npub struct SetupParams {\n    /// Size of the sector in bytes.\n    pub sector_size: u64,\n    pub challenge_count: usize,\n    pub challenged_nodes: usize,\n}\n\n#[derive(Debug, Clone)]\npub struct PublicParams {\n    /// Size of the sector in bytes.\n    pub sector_size: u64,\n    pub challenge_count: usize,\n    pub challenged_nodes: usize,\n}\n\nimpl ParameterSetMetadata for PublicParams {\n    fn identifier(&self) -> String {\n        format!(\n            \"ElectionPoSt::PublicParams{{sector_size: {}, count: {}, nodes: {}}}\",\n            self.sector_size(),\n            self.challenge_count,\n            self.challenged_nodes,\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        self.sector_size\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct PublicInputs<T: Domain> {\n    #[serde(bound = \"\")]\n    pub randomness: T,\n    pub sector_id: SectorId,\n    #[serde(bound = \"\")]\n    pub prover_id: T,\n    #[serde(bound = \"\")]\n    pub comm_r: T,\n    pub partial_ticket: Fr,\n    pub sector_challenge_index: u64,\n}\n\n#[derive(Debug)]\npub struct PrivateInputs<Tree: MerkleTreeTrait> {\n    pub tree: MerkleTreeWrapper<\n        Tree::Hasher,\n        Tree::Store,\n        Tree::Arity,\n        Tree::SubTreeArity,\n        Tree::TopTreeArity,\n    >,\n    pub comm_c: <Tree::Hasher as Hasher>::Domain,\n    pub comm_r_last: <Tree::Hasher as Hasher>::Domain,\n}\n\n/// The candidate data, that is needed for ticket generation.\n#[derive(Clone, Serialize, Deserialize)]\npub struct Candidate {\n    pub sector_id: SectorId,\n    pub partial_ticket: Fr,\n    pub ticket: [u8; 32],\n    pub sector_challenge_index: u64,\n}\n\nimpl Debug for Candidate {\n    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {\n        f.debug_struct(\"Candidate\")\n            .field(\"sector_id\", &self.sector_id)\n            .field(\"partial_ticket\", &self.partial_ticket)\n            .field(\"ticket\", &hex::encode(&self.ticket))\n            .field(\"sector_challenge_index\", &self.sector_challenge_index)\n            .finish()\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Proof<P: MerkleProofTrait> {\n    #[serde(bound(\n        serialize = \"MerkleProof<P::Hasher, P::Arity, P::SubTreeArity, P::TopTreeArity>: Serialize\",\n        deserialize = \"MerkleProof<P::Hasher, P::Arity, P::SubTreeArity, P::TopTreeArity>: DeserializeOwned\"\n    ))]\n    inclusion_proofs: Vec<MerkleProof<P::Hasher, P::Arity, P::SubTreeArity, P::TopTreeArity>>,\n    pub ticket: [u8; 32],\n    pub comm_c: <P::Hasher as Hasher>::Domain,\n}\n\nimpl<P: MerkleProofTrait> Proof<P> {\n    pub fn leafs(&self) -> Vec<<P::Hasher as Hasher>::Domain> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProof::leaf)\n            .collect()\n    }\n\n    pub fn comm_r_last(&self) -> <P::Hasher as Hasher>::Domain {\n        self.inclusion_proofs[0].root()\n    }\n\n    pub fn commitments(&self) -> Vec<<P::Hasher as Hasher>::Domain> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProof::root)\n            .collect()\n    }\n\n    #[allow(clippy::type_complexity)]\n    pub fn paths(&self) -> Vec<Vec<(Vec<<P::Hasher as Hasher>::Domain>, usize)>> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProof::path)\n            .collect()\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct ElectionPoSt<'a, Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    _t: PhantomData<&'a Tree>,\n}\n\n#[allow(clippy::type_complexity)]\npub fn generate_candidates<Tree: MerkleTreeTrait>(\n    pub_params: &PublicParams,\n    challenged_sectors: &[SectorId],\n    trees: &BTreeMap<\n        SectorId,\n        MerkleTreeWrapper<\n            Tree::Hasher,\n            Tree::Store,\n            Tree::Arity,\n            Tree::SubTreeArity,\n            Tree::TopTreeArity,\n        >,\n    >,\n    prover_id: <Tree::Hasher as Hasher>::Domain,\n    randomness: <Tree::Hasher as Hasher>::Domain,\n) -> Result<Vec<Candidate>> {\n    challenged_sectors\n        .par_iter()\n        .enumerate()\n        .map(|(sector_challenge_index, sector_id)| {\n            let tree = match trees.get(sector_id) {\n                Some(tree) => tree,\n                None => bail!(Error::MissingPrivateInput(\"tree\", (*sector_id).into())),\n            };\n\n            generate_candidate::<Tree>(\n                pub_params,\n                tree,\n                prover_id,\n                *sector_id,\n                randomness,\n                sector_challenge_index as u64,\n            )\n        })\n        .collect()\n}\n\nfn generate_candidate<Tree: MerkleTreeTrait>(\n    pub_params: &PublicParams,\n    tree: &MerkleTreeWrapper<\n        Tree::Hasher,\n        Tree::Store,\n        Tree::Arity,\n        Tree::SubTreeArity,\n        Tree::TopTreeArity,\n    >,\n    prover_id: <Tree::Hasher as Hasher>::Domain,\n    sector_id: SectorId,\n    randomness: <Tree::Hasher as Hasher>::Domain,\n    sector_challenge_index: u64,\n) -> Result<Candidate> {\n    let randomness_fr: Fr = randomness.into();\n    let prover_id_fr: Fr = prover_id.into();\n    let mut data: Vec<PoseidonDomain> = vec![\n        randomness_fr.into(),\n        prover_id_fr.into(),\n        Fr::from(sector_id).into(),\n    ];\n\n    for n in 0..pub_params.challenge_count {\n        let challenge =\n            generate_leaf_challenge(pub_params, randomness, sector_challenge_index, n as u64)?;\n\n        let val: Fr = measure_op(Operation::PostReadChallengedRange, || {\n            tree.read_at(challenge as usize)\n        })?\n        .into();\n        data.push(val.into());\n    }\n\n    // pad for md\n    let arity = PoseidonMDArity::to_usize();\n    while data.len() % arity != 0 {\n        data.push(PoseidonDomain::default());\n    }\n\n    let partial_ticket: Fr = measure_op(Operation::PostPartialTicketHash, || {\n        PoseidonFunction::hash_md(&data)\n    })\n    .into();\n\n    // ticket = sha256(partial_ticket)\n    let ticket = finalize_ticket(&partial_ticket);\n\n    Ok(Candidate {\n        sector_challenge_index,\n        sector_id,\n        partial_ticket,\n        ticket,\n    })\n}\n\npub fn finalize_ticket(partial_ticket: &Fr) -> [u8; 32] {\n    let bytes = fr_into_bytes(partial_ticket);\n    let ticket_hash = Sha256::digest(&bytes);\n    let mut ticket = [0u8; 32];\n    ticket.copy_from_slice(&ticket_hash[..]);\n    ticket\n}\n\npub fn is_valid_sector_challenge_index(challenge_count: u64, index: u64) -> bool {\n    index < challenge_count\n}\n\npub fn generate_sector_challenges<T: Domain>(\n    randomness: T,\n    challenge_count: u64,\n    sectors: &OrderedSectorSet,\n) -> Result<Vec<SectorId>> {\n    (0..challenge_count)\n        .into_par_iter()\n        .map(|n| generate_sector_challenge(randomness, n as usize, sectors))\n        .collect()\n}\n\npub fn generate_sector_challenge<T: Domain>(\n    randomness: T,\n    n: usize,\n    sectors: &OrderedSectorSet,\n) -> Result<SectorId> {\n    let mut hasher = Sha256::new();\n    hasher.update(AsRef::<[u8]>::as_ref(&randomness));\n    hasher.update(&n.to_le_bytes()[..]);\n    let hash = hasher.finalize();\n\n    let sector_challenge = LittleEndian::read_u64(&hash[..8]);\n    let sector_index = (sector_challenge % sectors.len() as u64) as usize;\n    let sector = *sectors\n        .iter()\n        .nth(sector_index)\n        .context(\"invalid challenge generated\")?;\n\n    Ok(sector)\n}\n\n/// Generate all challenged leaf ranges for a single sector, such that the range fits into the sector.\npub fn generate_leaf_challenges<T: Domain>(\n    pub_params: &PublicParams,\n    randomness: T,\n    sector_challenge_index: u64,\n    challenge_count: usize,\n) -> Result<Vec<u64>> {\n    let mut challenges = Vec::with_capacity(challenge_count);\n\n    for leaf_challenge_index in 0..challenge_count {\n        let challenge = generate_leaf_challenge(\n            pub_params,\n            randomness,\n            sector_challenge_index,\n            leaf_challenge_index as u64,\n        )?;\n        challenges.push(challenge)\n    }\n\n    Ok(challenges)\n}\n\n/// Generates challenge, such that the range fits into the sector.\npub fn generate_leaf_challenge<T: Domain>(\n    pub_params: &PublicParams,\n    randomness: T,\n    sector_challenge_index: u64,\n    leaf_challenge_index: u64,\n) -> Result<u64> {\n    ensure!(\n        pub_params.sector_size > pub_params.challenged_nodes as u64 * NODE_SIZE as u64,\n        \"sector size {} is too small\",\n        pub_params.sector_size\n    );\n\n    let mut hasher = Sha256::new();\n    hasher.update(AsRef::<[u8]>::as_ref(&randomness));\n    hasher.update(&sector_challenge_index.to_le_bytes()[..]);\n    hasher.update(&leaf_challenge_index.to_le_bytes()[..]);\n    let hash = hasher.finalize();\n\n    let leaf_challenge = LittleEndian::read_u64(&hash[..8]);\n\n    let challenged_range_index = leaf_challenge\n        % (pub_params.sector_size / (pub_params.challenged_nodes * NODE_SIZE) as u64);\n\n    Ok(challenged_range_index * pub_params.challenged_nodes as u64)\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait> ProofScheme<'a> for ElectionPoSt<'a, Tree> {\n    type PublicParams = PublicParams;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<<Tree::Hasher as Hasher>::Domain>;\n    type PrivateInputs = PrivateInputs<Tree>;\n    type Proof = Proof<Tree::Proof>;\n    type Requirements = NoRequirements;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        Ok(PublicParams {\n            sector_size: sp.sector_size,\n            challenge_count: sp.challenge_count,\n            challenged_nodes: sp.challenged_nodes,\n        })\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        // 1. Inclusions proofs of all challenged leafs in all challenged ranges\n        let tree = &priv_inputs.tree;\n        let tree_leafs = tree.leafs();\n\n        trace!(\n            \"Generating proof for tree of len {} with leafs {}\",\n            tree.len(),\n            tree_leafs,\n        );\n\n        let inclusion_proofs = measure_op(Operation::PostInclusionProofs, || {\n            (0..pub_params.challenge_count)\n                .into_par_iter()\n                .flat_map(|n| {\n                    // TODO: replace expect with proper error handling\n                    let challenged_leaf_start = generate_leaf_challenge(\n                        pub_params,\n                        pub_inputs.randomness,\n                        pub_inputs.sector_challenge_index,\n                        n as u64,\n                    )\n                    .expect(\"generate leaf challenge failure\");\n                    (0..pub_params.challenged_nodes)\n                        .into_par_iter()\n                        .map(move |i| {\n                            tree.gen_cached_proof(challenged_leaf_start as usize + i, None)\n                        })\n                })\n                .collect::<Result<Vec<_>>>()\n        })?;\n\n        // 2. correct generation of the ticket from the partial_ticket (add this to the candidate)\n        let ticket = measure_op(Operation::PostFinalizeTicket, || {\n            finalize_ticket(&pub_inputs.partial_ticket)\n        });\n\n        Ok(Proof {\n            inclusion_proofs,\n            ticket,\n            comm_c: priv_inputs.comm_c,\n        })\n    }\n\n    fn verify(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        proof: &Self::Proof,\n    ) -> Result<bool> {\n        // verify that H(Comm_c || Comm_r_last) == Comm_R\n        // comm_r_last is the root of the proof\n        let comm_r_last = proof.inclusion_proofs[0].root();\n        let comm_c = proof.comm_c;\n        let comm_r = &pub_inputs.comm_r;\n\n        if AsRef::<[u8]>::as_ref(&<Tree::Hasher as Hasher>::Function::hash2(\n            &comm_c,\n            &comm_r_last,\n        )) != AsRef::<[u8]>::as_ref(comm_r)\n        {\n            return Ok(false);\n        }\n\n        for n in 0..pub_params.challenge_count {\n            let challenged_leaf_start = generate_leaf_challenge(\n                pub_params,\n                pub_inputs.randomness,\n                pub_inputs.sector_challenge_index,\n                n as u64,\n            )?;\n            for i in 0..pub_params.challenged_nodes {\n                let merkle_proof = &proof.inclusion_proofs[n * pub_params.challenged_nodes + i];\n\n                // validate all comm_r_lasts match\n                if merkle_proof.root() != comm_r_last {\n                    return Ok(false);\n                }\n\n                // validate the path length\n                let expected_path_length =\n                    merkle_proof.expected_len(pub_params.sector_size as usize / NODE_SIZE);\n\n                if expected_path_length != merkle_proof.path().len() {\n                    return Ok(false);\n                }\n\n                if !merkle_proof.validate(challenged_leaf_start as usize + i) {\n                    return Ok(false);\n                }\n            }\n        }\n\n        Ok(true)\n    }\n}\n"
  },
  {
    "path": "storage-proofs-post/src/fallback/circuit.rs",
    "content": "use bellperson::{\n    bls::{Bls12, Fr},\n    gadgets::num::AllocatedNum,\n    Circuit, ConstraintSystem, SynthesisError,\n};\nuse ff::Field;\nuse filecoin_hashers::{HashFunction, Hasher};\nuse rayon::prelude::{ParallelIterator, ParallelSlice};\nuse storage_proofs_core::{\n    compound_proof::CircuitComponent,\n    error::Result,\n    gadgets::{\n        constraint,\n        por::{AuthPath, PoRCircuit},\n        variables::Root,\n    },\n    merkle::MerkleTreeTrait,\n    por,\n    settings::SETTINGS,\n    util::NODE_SIZE,\n};\n\nuse crate::fallback::{PublicParams, PublicSector, SectorProof};\n\n/// This is the `FallbackPoSt` circuit.\npub struct FallbackPoStCircuit<Tree: MerkleTreeTrait> {\n    pub prover_id: Option<Fr>,\n    pub sectors: Vec<Sector<Tree>>,\n}\n\n// We must manually implement Clone for all types generic over MerkleTreeTrait (instead of using\n// #[derive(Clone)]) because derive(Clone) will only expand for MerkleTreeTrait types that also\n// implement Clone. Not every MerkleTreeTrait type is Clone-able because not all merkel Store's are\n// Clone-able, therefore deriving Clone would impl Clone for less than all possible Tree types.\nimpl<Tree: 'static + MerkleTreeTrait> Clone for FallbackPoStCircuit<Tree> {\n    fn clone(&self) -> Self {\n        FallbackPoStCircuit {\n            prover_id: self.prover_id,\n            sectors: self.sectors.clone(),\n        }\n    }\n}\n\npub struct Sector<Tree: MerkleTreeTrait> {\n    pub comm_r: Option<Fr>,\n    pub comm_c: Option<Fr>,\n    pub comm_r_last: Option<Fr>,\n    pub leafs: Vec<Option<Fr>>,\n    pub paths: Vec<AuthPath<Tree::Hasher, Tree::Arity, Tree::SubTreeArity, Tree::TopTreeArity>>,\n    pub id: Option<Fr>,\n}\n\n// We must manually implement Clone for all types generic over MerkleTreeTrait (instead of using\n// #derive(Clone)).\nimpl<Tree: MerkleTreeTrait> Clone for Sector<Tree> {\n    fn clone(&self) -> Self {\n        Sector {\n            comm_r: self.comm_r,\n            comm_c: self.comm_c,\n            comm_r_last: self.comm_r_last,\n            leafs: self.leafs.clone(),\n            paths: self.paths.clone(),\n            id: self.id,\n        }\n    }\n}\n\nimpl<Tree: 'static + MerkleTreeTrait> Sector<Tree> {\n    pub fn circuit(\n        sector: &PublicSector<<Tree::Hasher as Hasher>::Domain>,\n        vanilla_proof: &SectorProof<Tree::Proof>,\n    ) -> Result<Self> {\n        let leafs = vanilla_proof\n            .leafs()\n            .iter()\n            .map(|l| Some((*l).into()))\n            .collect();\n\n        let paths = vanilla_proof\n            .as_options()\n            .into_iter()\n            .map(Into::into)\n            .collect();\n\n        Ok(Sector {\n            leafs,\n            id: Some(sector.id.into()),\n            comm_r: Some(sector.comm_r.into()),\n            comm_c: Some(vanilla_proof.comm_c.into()),\n            comm_r_last: Some(vanilla_proof.comm_r_last.into()),\n            paths,\n        })\n    }\n\n    pub fn blank_circuit(pub_params: &PublicParams) -> Self {\n        let challenges_count = pub_params.challenge_count;\n        let leaves = pub_params.sector_size as usize / NODE_SIZE;\n\n        let por_params = por::PublicParams {\n            leaves,\n            private: true,\n        };\n        let leafs = vec![None; challenges_count];\n        let paths = vec![AuthPath::blank(por_params.leaves); challenges_count];\n\n        Sector {\n            id: None,\n            comm_r: None,\n            comm_c: None,\n            comm_r_last: None,\n            leafs,\n            paths,\n        }\n    }\n}\n\nimpl<Tree: 'static + MerkleTreeTrait> Circuit<Bls12> for &Sector<Tree> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let Sector {\n            comm_r,\n            comm_c,\n            comm_r_last,\n            leafs,\n            paths,\n            ..\n        } = self;\n\n        assert_eq!(paths.len(), leafs.len());\n\n        // 1. Verify comm_r\n        let comm_r_last_num = AllocatedNum::alloc(cs.namespace(|| \"comm_r_last\"), || {\n            comm_r_last\n                .map(Into::into)\n                .ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        let comm_c_num = AllocatedNum::alloc(cs.namespace(|| \"comm_c\"), || {\n            comm_c\n                .map(Into::into)\n                .ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        let comm_r_num = AllocatedNum::alloc(cs.namespace(|| \"comm_r\"), || {\n            comm_r\n                .map(Into::into)\n                .ok_or(SynthesisError::AssignmentMissing)\n        })?;\n\n        comm_r_num.inputize(cs.namespace(|| \"comm_r_input\"))?;\n\n        // 1. Verify H(Comm_C || comm_r_last) == comm_r\n        {\n            let hash_num = <Tree::Hasher as Hasher>::Function::hash2_circuit(\n                cs.namespace(|| \"H_comm_c_comm_r_last\"),\n                &comm_c_num,\n                &comm_r_last_num,\n            )?;\n\n            // Check actual equality\n            constraint::equal(\n                cs,\n                || \"enforce_comm_c_comm_r_last_hash_comm_r\",\n                &comm_r_num,\n                &hash_num,\n            );\n        }\n\n        // 2. Verify Inclusion Paths\n        for (i, (leaf, path)) in leafs.iter().zip(paths.iter()).enumerate() {\n            PoRCircuit::<Tree>::synthesize(\n                cs.namespace(|| format!(\"challenge_inclusion_{}\", i)),\n                Root::Val(*leaf),\n                path.clone(),\n                Root::from_allocated::<CS>(comm_r_last_num.clone()),\n                true,\n            )?;\n        }\n\n        Ok(())\n    }\n}\n\n#[derive(Clone, Default)]\npub struct ComponentPrivateInputs {}\n\nimpl<Tree: MerkleTreeTrait> CircuitComponent for FallbackPoStCircuit<Tree> {\n    type ComponentPrivateInputs = ComponentPrivateInputs;\n}\n\nimpl<Tree: 'static + MerkleTreeTrait> Circuit<Bls12> for FallbackPoStCircuit<Tree> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        if CS::is_extensible() {\n            return self.synthesize_extendable(cs);\n        }\n\n        self.synthesize_default(cs)\n    }\n}\n\nimpl<Tree: 'static + MerkleTreeTrait> FallbackPoStCircuit<Tree> {\n    fn synthesize_default<CS: ConstraintSystem<Bls12>>(\n        self,\n        cs: &mut CS,\n    ) -> Result<(), SynthesisError> {\n        let cs = &mut cs.namespace(|| \"outer namespace\".to_string());\n\n        for (i, sector) in self.sectors.iter().enumerate() {\n            let cs = &mut cs.namespace(|| format!(\"sector_{}\", i));\n            sector.synthesize(cs)?;\n        }\n        Ok(())\n    }\n\n    fn synthesize_extendable<CS: ConstraintSystem<Bls12>>(\n        self,\n        cs: &mut CS,\n    ) -> Result<(), SynthesisError> {\n        let FallbackPoStCircuit { sectors, .. } = self;\n\n        let num_chunks = SETTINGS.window_post_synthesis_num_cpus as usize;\n\n        let chunk_size = (sectors.len() / num_chunks).max(1);\n        let css = sectors\n            .par_chunks(chunk_size)\n            .map(|sector_group| {\n                let mut cs = CS::new();\n                cs.alloc_input(|| \"temp ONE\", || Ok(Fr::one()))?;\n\n                for (i, sector) in sector_group.iter().enumerate() {\n                    let mut cs = cs.namespace(|| format!(\"sector_{}\", i));\n\n                    sector.synthesize(&mut cs)?;\n                }\n                Ok(cs)\n            })\n            .collect::<Result<Vec<_>, SynthesisError>>()?;\n\n        for sector_cs in css.into_iter() {\n            cs.extend(sector_cs);\n        }\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "storage-proofs-post/src/fallback/compound.rs",
    "content": "use std::marker::PhantomData;\n\nuse anyhow::{anyhow, ensure};\nuse bellperson::{\n    bls::{Bls12, Fr},\n    Circuit,\n};\nuse filecoin_hashers::Hasher;\nuse sha2::{Digest, Sha256};\nuse storage_proofs_core::{\n    compound_proof::{CircuitComponent, CompoundProof},\n    error::Result,\n    gadgets::por::PoRCompound,\n    merkle::MerkleTreeTrait,\n    parameter_cache::{CacheableParameters, ParameterSetMetadata},\n    por,\n    proof::ProofScheme,\n    util::NODE_SIZE,\n};\n\nuse crate::fallback::{generate_leaf_challenge_inner, FallbackPoSt, FallbackPoStCircuit, Sector};\n\npub struct FallbackPoStCompound<Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    _t: PhantomData<Tree>,\n}\n\nimpl<C: Circuit<Bls12>, P: ParameterSetMetadata, Tree: MerkleTreeTrait> CacheableParameters<C, P>\n    for FallbackPoStCompound<Tree>\n{\n    fn cache_prefix() -> String {\n        format!(\"proof-of-spacetime-fallback-{}\", Tree::display())\n    }\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait>\n    CompoundProof<'a, FallbackPoSt<'a, Tree>, FallbackPoStCircuit<Tree>>\n    for FallbackPoStCompound<Tree>\n{\n    fn generate_public_inputs(\n        pub_inputs: &<FallbackPoSt<'a, Tree> as ProofScheme<'a>>::PublicInputs,\n        pub_params: &<FallbackPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n        partition_k: Option<usize>,\n    ) -> Result<Vec<Fr>> {\n        let mut inputs = Vec::new();\n\n        let por_pub_params = por::PublicParams {\n            leaves: (pub_params.sector_size as usize / NODE_SIZE),\n            private: true,\n        };\n\n        let num_sectors_per_chunk = pub_params.sector_count;\n\n        let partition_index = partition_k.unwrap_or(0);\n\n        let sectors = pub_inputs\n            .sectors\n            .chunks(num_sectors_per_chunk)\n            .nth(partition_index)\n            .ok_or_else(|| anyhow!(\"invalid number of sectors/partition index\"))?;\n\n        for (i, sector) in sectors.iter().enumerate() {\n            // 1. Inputs for verifying comm_r = H(comm_c || comm_r_last)\n            inputs.push(sector.comm_r.into());\n\n            // avoid rehashing fixed inputs\n            let mut challenge_hasher = Sha256::new();\n            challenge_hasher.update(AsRef::<[u8]>::as_ref(&pub_inputs.randomness));\n            challenge_hasher.update(&u64::from(sector.id).to_le_bytes()[..]);\n\n            // 2. Inputs for verifying inclusion paths\n            for n in 0..pub_params.challenge_count {\n                let challenge_index = ((partition_index * pub_params.sector_count + i)\n                    * pub_params.challenge_count\n                    + n) as u64;\n                let challenged_leaf = generate_leaf_challenge_inner::<\n                    <Tree::Hasher as Hasher>::Domain,\n                >(\n                    challenge_hasher.clone(), &pub_params, challenge_index\n                );\n\n                let por_pub_inputs = por::PublicInputs {\n                    commitment: None,\n                    challenge: challenged_leaf as usize,\n                };\n                let por_inputs = PoRCompound::<Tree>::generate_public_inputs(\n                    &por_pub_inputs,\n                    &por_pub_params,\n                    partition_k,\n                )?;\n\n                inputs.extend(por_inputs);\n            }\n        }\n        let num_inputs_per_sector = inputs.len() / sectors.len();\n\n        // duplicate last one if too little sectors available\n        while inputs.len() / num_inputs_per_sector < num_sectors_per_chunk {\n            let s = inputs[inputs.len() - num_inputs_per_sector..].to_vec();\n            inputs.extend_from_slice(&s);\n        }\n        assert_eq!(inputs.len(), num_inputs_per_sector * num_sectors_per_chunk);\n\n        Ok(inputs)\n    }\n\n    fn circuit(\n        pub_in: &<FallbackPoSt<'a, Tree> as ProofScheme<'a>>::PublicInputs,\n        _priv_in: <FallbackPoStCircuit<Tree> as CircuitComponent>::ComponentPrivateInputs,\n        vanilla_proof: &<FallbackPoSt<'a, Tree> as ProofScheme<'a>>::Proof,\n        pub_params: &<FallbackPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n        partition_k: Option<usize>,\n    ) -> Result<FallbackPoStCircuit<Tree>> {\n        let num_sectors_per_chunk = pub_params.sector_count;\n        ensure!(\n            pub_params.sector_count == vanilla_proof.sectors.len(),\n            \"vanilla proofs must equal sector_count: {} != {}\",\n            num_sectors_per_chunk,\n            vanilla_proof.sectors.len(),\n        );\n\n        let partition_index = partition_k.unwrap_or(0);\n        let sectors = pub_in\n            .sectors\n            .chunks(num_sectors_per_chunk)\n            .nth(partition_index)\n            .ok_or_else(|| anyhow!(\"invalid number of sectors/partition index\"))?;\n\n        let mut res_sectors = Vec::with_capacity(vanilla_proof.sectors.len());\n\n        for (i, vanilla_proof) in vanilla_proof.sectors.iter().enumerate() {\n            let pub_sector = if i < sectors.len() {\n                &sectors[i]\n            } else {\n                // Repeat the last sector, iff there are too little inputs to fill the circuit.\n                &sectors[sectors.len() - 1]\n            };\n\n            res_sectors.push(Sector::circuit(pub_sector, vanilla_proof)?);\n        }\n\n        assert_eq!(res_sectors.len(), num_sectors_per_chunk);\n\n        Ok(FallbackPoStCircuit {\n            prover_id: Some(pub_in.prover_id.into()),\n            sectors: res_sectors,\n        })\n    }\n\n    fn blank_circuit(\n        pub_params: &<FallbackPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n    ) -> FallbackPoStCircuit<Tree> {\n        let sectors = (0..pub_params.sector_count)\n            .map(|_| Sector::blank_circuit(pub_params))\n            .collect();\n\n        FallbackPoStCircuit {\n            prover_id: None,\n            sectors,\n        }\n    }\n}\n"
  },
  {
    "path": "storage-proofs-post/src/fallback/mod.rs",
    "content": "mod circuit;\nmod compound;\nmod vanilla;\n\npub use circuit::*;\npub use compound::*;\npub use vanilla::*;\n"
  },
  {
    "path": "storage-proofs-post/src/fallback/vanilla.rs",
    "content": "use std::collections::BTreeSet;\nuse std::marker::PhantomData;\n\nuse anyhow::ensure;\nuse bellperson::bls::Fr;\nuse byteorder::{ByteOrder, LittleEndian};\nuse filecoin_hashers::{Domain, HashFunction, Hasher};\nuse generic_array::typenum::Unsigned;\nuse log::{error, trace};\nuse rayon::prelude::{\n    IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator,\n};\nuse serde::{de::DeserializeOwned, Deserialize, Serialize};\nuse sha2::{Digest, Sha256};\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    error::{Error, Result},\n    merkle::{MerkleProof, MerkleProofTrait, MerkleTreeTrait, MerkleTreeWrapper},\n    parameter_cache::ParameterSetMetadata,\n    proof::ProofScheme,\n    sector::SectorId,\n    util::{default_rows_to_discard, NODE_SIZE},\n};\n\n#[derive(Debug, Clone)]\npub struct SetupParams {\n    /// Size of the sector in bytes.\n    pub sector_size: u64,\n    /// Number of challenges per sector.\n    pub challenge_count: usize,\n    /// Number of challenged sectors.\n    pub sector_count: usize,\n    pub api_version: ApiVersion,\n}\n\n#[derive(Debug, Clone)]\npub struct PublicParams {\n    /// Size of the sector in bytes.\n    pub sector_size: u64,\n    /// Number of challenges per sector.\n    pub challenge_count: usize,\n    /// Number of challenged sectors.\n    pub sector_count: usize,\n    pub api_version: ApiVersion,\n}\n\n#[derive(Debug, Default)]\npub struct ChallengeRequirements {\n    /// The sum of challenges across all challenged sectors. (even across partitions)\n    pub minimum_challenge_count: usize,\n}\n\nimpl ParameterSetMetadata for PublicParams {\n    fn identifier(&self) -> String {\n        format!(\n            \"FallbackPoSt::PublicParams{{sector_size: {}, challenge_count: {}, sector_count: {}}}\",\n            self.sector_size(),\n            self.challenge_count,\n            self.sector_count,\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        self.sector_size\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct PublicInputs<T: Domain> {\n    #[serde(bound = \"\")]\n    pub randomness: T,\n    #[serde(bound = \"\")]\n    pub prover_id: T,\n    #[serde(bound = \"\")]\n    pub sectors: Vec<PublicSector<T>>,\n    /// Partition index\n    pub k: Option<usize>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct PublicSector<T: Domain> {\n    pub id: SectorId,\n    #[serde(bound = \"\")]\n    pub comm_r: T,\n}\n\n#[derive(Debug)]\npub struct PrivateSector<'a, Tree: MerkleTreeTrait> {\n    pub tree: &'a MerkleTreeWrapper<\n        Tree::Hasher,\n        Tree::Store,\n        Tree::Arity,\n        Tree::SubTreeArity,\n        Tree::TopTreeArity,\n    >,\n    pub comm_c: <Tree::Hasher as Hasher>::Domain,\n    pub comm_r_last: <Tree::Hasher as Hasher>::Domain,\n}\n\n#[derive(Debug)]\npub struct PrivateInputs<'a, Tree: MerkleTreeTrait> {\n    pub sectors: &'a [PrivateSector<'a, Tree>],\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Proof<P: MerkleProofTrait> {\n    #[serde(bound(\n        serialize = \"SectorProof<P>: Serialize\",\n        deserialize = \"SectorProof<P>: Deserialize<'de>\"\n    ))]\n    pub sectors: Vec<SectorProof<P>>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct SectorProof<Proof: MerkleProofTrait> {\n    #[serde(bound(\n        serialize = \"MerkleProof<Proof::Hasher, Proof::Arity, Proof::SubTreeArity, Proof::TopTreeArity>: Serialize\",\n        deserialize = \"MerkleProof<Proof::Hasher, Proof::Arity, Proof::SubTreeArity, Proof::TopTreeArity>: DeserializeOwned\"\n    ))]\n    pub inclusion_proofs:\n        Vec<MerkleProof<Proof::Hasher, Proof::Arity, Proof::SubTreeArity, Proof::TopTreeArity>>,\n    pub comm_c: <Proof::Hasher as Hasher>::Domain,\n    pub comm_r_last: <Proof::Hasher as Hasher>::Domain,\n}\n\nimpl<P: MerkleProofTrait> SectorProof<P> {\n    pub fn leafs(&self) -> Vec<<P::Hasher as Hasher>::Domain> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProofTrait::leaf)\n            .collect()\n    }\n\n    pub fn comm_r_last(&self) -> <P::Hasher as Hasher>::Domain {\n        self.inclusion_proofs[0].root()\n    }\n\n    pub fn commitments(&self) -> Vec<<P::Hasher as Hasher>::Domain> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProofTrait::root)\n            .collect()\n    }\n\n    #[allow(clippy::type_complexity)]\n    pub fn paths(&self) -> Vec<Vec<(Vec<<P::Hasher as Hasher>::Domain>, usize)>> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProofTrait::path)\n            .collect()\n    }\n\n    pub fn as_options(&self) -> Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProofTrait::as_options)\n            .collect()\n    }\n\n    // Returns a read-only reference.\n    pub fn inclusion_proofs(\n        &self,\n    ) -> &Vec<MerkleProof<P::Hasher, P::Arity, P::SubTreeArity, P::TopTreeArity>> {\n        &self.inclusion_proofs\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct FallbackPoSt<'a, Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    _t: PhantomData<&'a Tree>,\n}\n\npub fn generate_sector_challenges<T: Domain>(\n    randomness: T,\n    challenge_count: usize,\n    sector_set_len: u64,\n    prover_id: T,\n) -> Result<Vec<u64>> {\n    (0..challenge_count)\n        .map(|n| generate_sector_challenge(randomness, n, sector_set_len, prover_id))\n        .collect()\n}\n\n/// Generate a single sector challenge.\npub fn generate_sector_challenge<T: Domain>(\n    randomness: T,\n    n: usize,\n    sector_set_len: u64,\n    prover_id: T,\n) -> Result<u64> {\n    let mut hasher = Sha256::new();\n    hasher.update(AsRef::<[u8]>::as_ref(&prover_id));\n    hasher.update(AsRef::<[u8]>::as_ref(&randomness));\n    hasher.update(&n.to_le_bytes()[..]);\n\n    let hash = hasher.finalize();\n\n    let sector_challenge = LittleEndian::read_u64(&hash[..8]);\n    let sector_index = sector_challenge % sector_set_len;\n\n    Ok(sector_index)\n}\n\n/// Generate all challenged leaf ranges for a single sector, such that the range fits into the sector.\npub fn generate_leaf_challenges<T: Domain>(\n    pub_params: &PublicParams,\n    randomness: T,\n    sector_id: u64,\n    challenge_count: usize,\n) -> Vec<u64> {\n    let mut challenges = Vec::with_capacity(challenge_count);\n\n    let mut hasher = Sha256::new();\n    hasher.update(AsRef::<[u8]>::as_ref(&randomness));\n    hasher.update(&sector_id.to_le_bytes()[..]);\n\n    for challenge_index in 0..challenge_count {\n        let challenge =\n            generate_leaf_challenge_inner::<T>(hasher.clone(), pub_params, challenge_index as u64);\n        challenges.push(challenge)\n    }\n\n    challenges\n}\n\n/// Generates challenge, such that the range fits into the sector.\npub fn generate_leaf_challenge<T: Domain>(\n    pub_params: &PublicParams,\n    randomness: T,\n    sector_id: u64,\n    leaf_challenge_index: u64,\n) -> u64 {\n    let mut hasher = Sha256::new();\n    hasher.update(AsRef::<[u8]>::as_ref(&randomness));\n    hasher.update(&sector_id.to_le_bytes()[..]);\n\n    generate_leaf_challenge_inner::<T>(hasher, pub_params, leaf_challenge_index)\n}\n\npub fn generate_leaf_challenge_inner<T: Domain>(\n    mut hasher: Sha256,\n    pub_params: &PublicParams,\n    leaf_challenge_index: u64,\n) -> u64 {\n    hasher.update(&leaf_challenge_index.to_le_bytes()[..]);\n    let hash = hasher.finalize();\n\n    let leaf_challenge = LittleEndian::read_u64(&hash[..8]);\n\n    leaf_challenge % (pub_params.sector_size / NODE_SIZE as u64)\n}\n\n// Generates a single vanilla proof, given the private inputs and sector challenges.\npub fn vanilla_proof<Tree: MerkleTreeTrait>(\n    sector_id: SectorId,\n    priv_inputs: &PrivateInputs<'_, Tree>,\n    challenges: &[u64],\n) -> Result<Proof<Tree::Proof>> {\n    ensure!(\n        priv_inputs.sectors.len() == 1,\n        \"vanilla_proof called with multiple sector proofs\"\n    );\n\n    let priv_sector = &priv_inputs.sectors[0];\n    let comm_c = priv_sector.comm_c;\n    let comm_r_last = priv_sector.comm_r_last;\n    let tree = priv_sector.tree;\n\n    let tree_leafs = tree.leafs();\n    let rows_to_discard = default_rows_to_discard(tree_leafs, Tree::Arity::to_usize());\n\n    trace!(\n        \"Generating proof for tree leafs {} and arity {}\",\n        tree_leafs,\n        Tree::Arity::to_usize(),\n    );\n\n    let inclusion_proofs = (0..challenges.len())\n        .into_par_iter()\n        .map(|challenged_leaf_index| {\n            let challenged_leaf = challenges[challenged_leaf_index];\n            let proof = tree.gen_cached_proof(challenged_leaf as usize, Some(rows_to_discard))?;\n\n            ensure!(\n                proof.validate(challenged_leaf as usize) && proof.root() == priv_sector.comm_r_last,\n                \"Generated vanilla proof for sector {} is invalid\",\n                sector_id\n            );\n\n            Ok(proof)\n        })\n        .collect::<Result<Vec<_>>>()?;\n\n    Ok(Proof {\n        sectors: vec![SectorProof {\n            inclusion_proofs,\n            comm_c,\n            comm_r_last,\n        }],\n    })\n}\n\nimpl<'a, Tree: 'a + MerkleTreeTrait> ProofScheme<'a> for FallbackPoSt<'a, Tree> {\n    type PublicParams = PublicParams;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<<Tree::Hasher as Hasher>::Domain>;\n    type PrivateInputs = PrivateInputs<'a, Tree>;\n    type Proof = Proof<Tree::Proof>;\n    type Requirements = ChallengeRequirements;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        Ok(PublicParams {\n            sector_size: sp.sector_size,\n            challenge_count: sp.challenge_count,\n            sector_count: sp.sector_count,\n            api_version: sp.api_version,\n        })\n    }\n\n    fn prove<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        let proofs = Self::prove_all_partitions(pub_params, pub_inputs, priv_inputs, 1)?;\n        let k = pub_inputs.k.unwrap_or(0);\n        // Because partition proofs require a common setup, the general ProofScheme implementation,\n        // which makes use of `ProofScheme::prove` cannot be used here. Instead, we need to prove all\n        // partitions in one pass, as implemented by `prove_all_partitions` below.\n        assert!(\n            k < 1,\n            \"It is a programmer error to call StackedDrg::prove with more than one partition.\"\n        );\n\n        Ok(proofs[k].to_owned())\n    }\n\n    fn prove_all_partitions<'b>(\n        pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n        partition_count: usize,\n    ) -> Result<Vec<Self::Proof>> {\n        ensure!(\n            priv_inputs.sectors.len() == pub_inputs.sectors.len(),\n            \"inconsistent number of private and public sectors {} != {}\",\n            priv_inputs.sectors.len(),\n            pub_inputs.sectors.len(),\n        );\n\n        let num_sectors_per_chunk = pub_params.sector_count;\n        let num_sectors = pub_inputs.sectors.len();\n\n        ensure!(\n            num_sectors <= partition_count * num_sectors_per_chunk,\n            \"cannot prove the provided number of sectors: {} > {} * {}\",\n            num_sectors,\n            partition_count,\n            num_sectors_per_chunk,\n        );\n\n        let mut partition_proofs = Vec::new();\n\n        // Use `BTreeSet` so failure result will be canonically ordered (sorted).\n        let mut faulty_sectors = BTreeSet::new();\n\n        for (j, (pub_sectors_chunk, priv_sectors_chunk)) in pub_inputs\n            .sectors\n            .chunks(num_sectors_per_chunk)\n            .zip(priv_inputs.sectors.chunks(num_sectors_per_chunk))\n            .enumerate()\n        {\n            let (mut proofs, mut faults) = pub_sectors_chunk\n                .par_iter()\n                .zip(priv_sectors_chunk.par_iter())\n                .enumerate()\n                .map(|(i, (pub_sector, priv_sector))| {\n                    let sector_id = pub_sector.id;\n                    let tree = priv_sector.tree;\n                    let tree_leafs = tree.leafs();\n                    let rows_to_discard =\n                        default_rows_to_discard(tree_leafs, Tree::Arity::to_usize());\n\n                    trace!(\n                        \"Generating proof for tree leafs {} and arity {}\",\n                        tree_leafs,\n                        Tree::Arity::to_usize(),\n                    );\n\n                    // avoid rehashing fixed inputs\n                    let mut challenge_hasher = Sha256::new();\n                    challenge_hasher.update(AsRef::<[u8]>::as_ref(&pub_inputs.randomness));\n                    challenge_hasher.update(&u64::from(sector_id).to_le_bytes()[..]);\n\n                    let (inclusion_proofs, faults) = (0..pub_params.challenge_count)\n                        .into_par_iter()\n                        .fold(\n                            || (Vec::new(), BTreeSet::new()),\n                            |(mut inclusion_proofs, mut faults), n| {\n                                let challenge_index =\n                                    ((j * num_sectors_per_chunk + i) * pub_params.challenge_count\n                                        + n) as u64;\n                                let challenged_leaf = generate_leaf_challenge_inner::<\n                                    <Tree::Hasher as Hasher>::Domain,\n                                >(\n                                    challenge_hasher.clone(),\n                                    pub_params,\n                                    challenge_index,\n                                );\n                                let proof = tree.gen_cached_proof(\n                                    challenged_leaf as usize,\n                                    Some(rows_to_discard),\n                                );\n\n                                match proof {\n                                    Ok(proof) => {\n                                        if proof.validate(challenged_leaf as usize)\n                                            && proof.root() == priv_sector.comm_r_last\n                                            && pub_sector.comm_r\n                                                == <Tree::Hasher as Hasher>::Function::hash2(\n                                                    &priv_sector.comm_c,\n                                                    &priv_sector.comm_r_last,\n                                                )\n                                        {\n                                            inclusion_proofs.push(proof);\n                                        } else {\n                                            error!(\"faulty sector: {:?}\", sector_id);\n                                            faults.insert(sector_id);\n                                        }\n                                    }\n                                    Err(err) => {\n                                        error!(\"faulty sector: {:?} ({:?})\", sector_id, err);\n                                        faults.insert(sector_id);\n                                    }\n                                }\n                                (inclusion_proofs, faults)\n                            },\n                        )\n                        .reduce(\n                            || (Vec::new(), BTreeSet::new()),\n                            |(mut inclusion_proofs, mut faults), (p, f)| {\n                                inclusion_proofs.extend(p);\n                                faults.extend(f);\n                                (inclusion_proofs, faults)\n                            },\n                        );\n\n                    (\n                        SectorProof {\n                            inclusion_proofs,\n                            comm_c: priv_sector.comm_c,\n                            comm_r_last: priv_sector.comm_r_last,\n                        },\n                        faults,\n                    )\n                })\n                .fold(\n                    || (Vec::new(), BTreeSet::new()),\n                    |(mut sector_proofs, mut sector_faults), (sector_proof, mut faults)| {\n                        sector_faults.append(&mut faults);\n                        sector_proofs.push(sector_proof);\n                        (sector_proofs, sector_faults)\n                    },\n                )\n                .reduce(\n                    || (Vec::new(), BTreeSet::new()),\n                    |(mut sector_proofs, mut sector_faults), (proofs, mut faults)| {\n                        sector_proofs.extend(proofs);\n                        sector_faults.append(&mut faults);\n                        (sector_proofs, sector_faults)\n                    },\n                );\n\n            // If there were less than the required number of sectors provided, we duplicate the last one\n            // to pad the proof out, such that it works in the circuit part.\n            while proofs.len() < num_sectors_per_chunk {\n                proofs.push(proofs[proofs.len() - 1].clone());\n            }\n\n            partition_proofs.push(Proof { sectors: proofs });\n            faulty_sectors.append(&mut faults);\n        }\n\n        if faulty_sectors.is_empty() {\n            Ok(partition_proofs)\n        } else {\n            Err(Error::FaultySectors(faulty_sectors.into_iter().collect()).into())\n        }\n    }\n\n    fn verify_all_partitions(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        partition_proofs: &[Self::Proof],\n    ) -> Result<bool> {\n        let challenge_count = pub_params.challenge_count;\n        let num_sectors_per_chunk = pub_params.sector_count;\n        let num_sectors = pub_inputs.sectors.len();\n\n        ensure!(\n            num_sectors <= num_sectors_per_chunk * partition_proofs.len(),\n            \"inconsistent number of sectors: {} > {} * {}\",\n            num_sectors,\n            num_sectors_per_chunk,\n            partition_proofs.len(),\n        );\n\n        for (j, (proof, pub_sectors_chunk)) in partition_proofs\n            .iter()\n            .zip(pub_inputs.sectors.chunks(num_sectors_per_chunk))\n            .enumerate()\n        {\n            ensure!(\n                pub_sectors_chunk.len() <= num_sectors_per_chunk,\n                \"inconsistent number of public sectors: {} > {}\",\n                pub_sectors_chunk.len(),\n                num_sectors_per_chunk,\n            );\n            ensure!(\n                proof.sectors.len() == num_sectors_per_chunk,\n                \"invalid number of sectors in the partition proof {}: {} != {}\",\n                j,\n                proof.sectors.len(),\n                num_sectors_per_chunk,\n            );\n\n            let is_valid = pub_sectors_chunk\n                .par_iter()\n                .zip(proof.sectors.par_iter())\n                .enumerate()\n                .map(|(i, (pub_sector, sector_proof))| {\n                    let sector_id = pub_sector.id;\n                    let comm_r = &pub_sector.comm_r;\n                    let comm_c = sector_proof.comm_c;\n                    let inclusion_proofs = &sector_proof.inclusion_proofs;\n\n                    // Verify that H(Comm_c || Comm_r_last) == Comm_R\n\n                    // comm_r_last is the root of the proof\n                    let comm_r_last = inclusion_proofs[0].root();\n\n                    if AsRef::<[u8]>::as_ref(&<Tree::Hasher as Hasher>::Function::hash2(\n                        &comm_c,\n                        &comm_r_last,\n                    )) != AsRef::<[u8]>::as_ref(comm_r)\n                    {\n                        error!(\"hash(comm_c || comm_r_last) != comm_r: {:?}\", sector_id);\n                        return Ok(false);\n                    }\n\n                    ensure!(\n                        challenge_count == inclusion_proofs.len(),\n                        \"unexpected number of inclusion proofs: {} != {}\",\n                        challenge_count,\n                        inclusion_proofs.len()\n                    );\n\n                    // avoid rehashing fixed inputs\n                    let mut challenge_hasher = Sha256::new();\n                    challenge_hasher.update(AsRef::<[u8]>::as_ref(&pub_inputs.randomness));\n                    challenge_hasher.update(&u64::from(sector_id).to_le_bytes()[..]);\n\n                    let is_valid_list = inclusion_proofs\n                        .par_iter()\n                        .enumerate()\n                        .map(|(n, inclusion_proof)| -> Result<bool> {\n                            let challenge_index =\n                                (j * num_sectors_per_chunk + i) * pub_params.challenge_count + n;\n                            let challenged_leaf =\n                                generate_leaf_challenge_inner::<<Tree::Hasher as Hasher>::Domain>(\n                                    challenge_hasher.clone(),\n                                    pub_params,\n                                    challenge_index as u64,\n                                );\n\n                            // validate all comm_r_lasts match\n                            if inclusion_proof.root() != comm_r_last {\n                                error!(\"inclusion proof root != comm_r_last: {:?}\", sector_id);\n                                return Ok(false);\n                            }\n\n                            // validate the path length\n                            let expected_path_length = inclusion_proof\n                                .expected_len(pub_params.sector_size as usize / NODE_SIZE);\n\n                            if expected_path_length != inclusion_proof.path().len() {\n                                error!(\"wrong path length: {:?}\", sector_id);\n                                return Ok(false);\n                            }\n\n                            if !inclusion_proof.validate(challenged_leaf as usize) {\n                                error!(\"invalid inclusion proof: {:?}\", sector_id);\n                                return Ok(false);\n                            }\n                            Ok(true)\n                        })\n                        .collect::<Result<Vec<bool>>>()?;\n\n                    Ok(is_valid_list.into_iter().all(|v| v))\n                })\n                .reduce(\n                    || Ok(true),\n                    |all_valid, is_valid| Ok(all_valid? && is_valid?),\n                )?;\n            if !is_valid {\n                return Ok(false);\n            }\n        }\n        Ok(true)\n    }\n\n    fn satisfies_requirements(\n        public_params: &Self::PublicParams,\n        requirements: &Self::Requirements,\n        partitions: usize,\n    ) -> bool {\n        let checked = partitions * public_params.sector_count;\n\n        assert_eq!(\n            partitions.checked_mul(public_params.sector_count),\n            Some(checked)\n        );\n        assert_eq!(\n            checked.checked_mul(public_params.challenge_count),\n            Some(checked * public_params.challenge_count)\n        );\n\n        checked * public_params.challenge_count >= requirements.minimum_challenge_count\n    }\n}\n"
  },
  {
    "path": "storage-proofs-post/src/lib.rs",
    "content": "#![deny(clippy::all, clippy::perf, clippy::correctness, rust_2018_idioms)]\n#![warn(clippy::unwrap_used)]\n\npub mod election;\npub mod fallback;\npub mod rational;\n"
  },
  {
    "path": "storage-proofs-post/src/rational/circuit.rs",
    "content": "use std::marker::PhantomData;\n\nuse bellperson::{\n    bls::{Bls12, Fr},\n    gadgets::num::AllocatedNum,\n    Circuit, ConstraintSystem, SynthesisError,\n};\nuse filecoin_hashers::{HashFunction, Hasher};\nuse storage_proofs_core::{\n    compound_proof::CircuitComponent, error::Result, gadgets::constraint, gadgets::por::PoRCircuit,\n    gadgets::variables::Root, merkle::MerkleTreeTrait,\n};\n\n/// This is the `RationalPoSt` circuit.\npub struct RationalPoStCircuit<Tree: MerkleTreeTrait> {\n    /// Paramters for the engine.\n    pub comm_rs: Vec<Option<Fr>>,\n    pub comm_cs: Vec<Option<Fr>>,\n    pub comm_r_lasts: Vec<Option<Fr>>,\n    pub leafs: Vec<Option<Fr>>,\n    #[allow(clippy::type_complexity)]\n    pub paths: Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>,\n    pub _t: PhantomData<Tree>,\n}\n\n#[derive(Clone, Default)]\npub struct ComponentPrivateInputs {}\n\nimpl<'a, Tree: MerkleTreeTrait> CircuitComponent for RationalPoStCircuit<Tree> {\n    type ComponentPrivateInputs = ComponentPrivateInputs;\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait> Circuit<Bls12> for RationalPoStCircuit<Tree> {\n    fn synthesize<CS: ConstraintSystem<Bls12>>(self, cs: &mut CS) -> Result<(), SynthesisError> {\n        let comm_rs = self.comm_rs;\n        let comm_cs = self.comm_cs;\n        let comm_r_lasts = self.comm_r_lasts;\n        let leafs = self.leafs;\n        let paths = self.paths;\n\n        assert_eq!(paths.len(), leafs.len());\n        assert_eq!(paths.len(), comm_rs.len());\n        assert_eq!(paths.len(), comm_cs.len());\n        assert_eq!(paths.len(), comm_r_lasts.len());\n\n        for (((i, comm_r_last), comm_c), comm_r) in comm_r_lasts\n            .iter()\n            .enumerate()\n            .zip(comm_cs.iter())\n            .zip(comm_rs.iter())\n        {\n            let comm_r_last_num =\n                AllocatedNum::alloc(cs.namespace(|| format!(\"comm_r_last_{}\", i)), || {\n                    comm_r_last\n                        .map(Into::into)\n                        .ok_or(SynthesisError::AssignmentMissing)\n                })?;\n\n            let comm_c_num = AllocatedNum::alloc(cs.namespace(|| format!(\"comm_c_{}\", i)), || {\n                comm_c\n                    .map(Into::into)\n                    .ok_or(SynthesisError::AssignmentMissing)\n            })?;\n\n            let comm_r_num = AllocatedNum::alloc(cs.namespace(|| format!(\"comm_r_{}\", i)), || {\n                comm_r\n                    .map(Into::into)\n                    .ok_or(SynthesisError::AssignmentMissing)\n            })?;\n\n            comm_r_num.inputize(cs.namespace(|| format!(\"comm_r_{}_input\", i)))?;\n\n            // Verify H(Comm_C || comm_r_last) == comm_r\n            {\n                let hash_num = <Tree::Hasher as Hasher>::Function::hash2_circuit(\n                    cs.namespace(|| format!(\"H_comm_c_comm_r_last_{}\", i)),\n                    &comm_c_num,\n                    &comm_r_last_num,\n                )?;\n\n                // Check actual equality\n                constraint::equal(\n                    cs,\n                    || format!(\"enforce_comm_c_comm_r_last_hash_comm_r_{}\", i),\n                    &comm_r_num,\n                    &hash_num,\n                );\n            }\n\n            PoRCircuit::<Tree>::synthesize(\n                cs.namespace(|| format!(\"challenge_inclusion{}\", i)),\n                Root::Val(leafs[i]),\n                paths[i].clone().into(),\n                Root::from_allocated::<CS>(comm_r_last_num),\n                true,\n            )?;\n        }\n\n        Ok(())\n    }\n}\n"
  },
  {
    "path": "storage-proofs-post/src/rational/compound.rs",
    "content": "use std::marker::PhantomData;\n\nuse anyhow::ensure;\nuse bellperson::bls::{Bls12, Fr};\nuse bellperson::{Circuit, ConstraintSystem, SynthesisError};\nuse generic_array::typenum::U2;\nuse storage_proofs_core::{\n    compound_proof::{CircuitComponent, CompoundProof},\n    drgraph::graph_height,\n    error::Result,\n    gadgets::por::PoRCompound,\n    merkle::MerkleTreeTrait,\n    parameter_cache::{CacheableParameters, ParameterSetMetadata},\n    por,\n    proof::ProofScheme,\n    util::NODE_SIZE,\n};\n\nuse crate::rational::{RationalPoSt, RationalPoStCircuit};\n\npub struct RationalPoStCompound<Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    _t: PhantomData<Tree>,\n}\n\nimpl<C: Circuit<Bls12>, P: ParameterSetMetadata, Tree: MerkleTreeTrait> CacheableParameters<C, P>\n    for RationalPoStCompound<Tree>\n{\n    fn cache_prefix() -> String {\n        format!(\"proof-of-spacetime-rational-{}\", Tree::display())\n    }\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait>\n    CompoundProof<'a, RationalPoSt<'a, Tree>, RationalPoStCircuit<Tree>>\n    for RationalPoStCompound<Tree>\nwhere\n    Tree: 'static + MerkleTreeTrait,\n{\n    fn generate_public_inputs(\n        pub_in: &<RationalPoSt<'a, Tree> as ProofScheme<'a>>::PublicInputs,\n        pub_params: &<RationalPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Result<Vec<Fr>> {\n        let mut inputs = Vec::new();\n\n        let por_pub_params = por::PublicParams {\n            leaves: (pub_params.sector_size as usize / NODE_SIZE),\n            private: true,\n        };\n\n        ensure!(\n            pub_in.challenges.len() == pub_in.comm_rs.len(),\n            \"Missmatch in challenges and comm_rs\"\n        );\n\n        for (challenge, comm_r) in pub_in.challenges.iter().zip(pub_in.comm_rs.iter()) {\n            inputs.push((*comm_r).into());\n\n            let por_pub_inputs = por::PublicInputs {\n                commitment: None,\n                challenge: challenge.leaf as usize,\n            };\n            let por_inputs = PoRCompound::<Tree>::generate_public_inputs(\n                &por_pub_inputs,\n                &por_pub_params,\n                None,\n            )?;\n\n            inputs.extend(por_inputs);\n        }\n\n        Ok(inputs)\n    }\n\n    fn circuit(\n        pub_in: &<RationalPoSt<'a, Tree> as ProofScheme<'a>>::PublicInputs,\n        _priv_in: <RationalPoStCircuit<Tree> as CircuitComponent>::ComponentPrivateInputs,\n        vanilla_proof: &<RationalPoSt<'a, Tree> as ProofScheme<'a>>::Proof,\n        _pub_params: &<RationalPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n        _partition_k: Option<usize>,\n    ) -> Result<RationalPoStCircuit<Tree>> {\n        let comm_rs: Vec<_> = pub_in.comm_rs.iter().map(|c| Some((*c).into())).collect();\n        let comm_cs: Vec<_> = vanilla_proof\n            .comm_cs\n            .iter()\n            .map(|c| Some((*c).into()))\n            .collect();\n\n        let comm_r_lasts: Vec<_> = vanilla_proof\n            .commitments()\n            .into_iter()\n            .map(|c| Some(c.into()))\n            .collect();\n\n        let leafs: Vec<_> = vanilla_proof\n            .leafs()\n            .iter()\n            .map(|c| Some((*c).into()))\n            .collect();\n\n        let paths: Vec<Vec<_>> = vanilla_proof\n            .paths()\n            .iter()\n            .map(|v| {\n                v.iter()\n                    .map(|p| {\n                        (\n                            (*p).0.iter().copied().map(Into::into).map(Some).collect(),\n                            Some(p.1),\n                        )\n                    })\n                    .collect()\n            })\n            .collect();\n\n        Ok(RationalPoStCircuit {\n            leafs,\n            comm_rs,\n            comm_cs,\n            comm_r_lasts,\n            paths,\n            _t: PhantomData,\n        })\n    }\n\n    fn blank_circuit(\n        pub_params: &<RationalPoSt<'a, Tree> as ProofScheme<'a>>::PublicParams,\n    ) -> RationalPoStCircuit<Tree> {\n        let challenges_count = pub_params.challenges_count;\n        let height = graph_height::<U2>(pub_params.sector_size as usize / NODE_SIZE);\n\n        let comm_rs = vec![None; challenges_count];\n        let comm_cs = vec![None; challenges_count];\n        let comm_r_lasts = vec![None; challenges_count];\n        let leafs = vec![None; challenges_count];\n        let paths = vec![vec![(vec![None; 1], None); height - 1]; challenges_count];\n\n        RationalPoStCircuit {\n            comm_rs,\n            comm_cs,\n            comm_r_lasts,\n            leafs,\n            paths,\n            _t: PhantomData,\n        }\n    }\n}\n\nimpl<'a, Tree: 'static + MerkleTreeTrait> RationalPoStCircuit<Tree> {\n    #[allow(clippy::type_complexity)]\n    pub fn synthesize<CS: ConstraintSystem<Bls12>>(\n        cs: &mut CS,\n        leafs: Vec<Option<Fr>>,\n        comm_rs: Vec<Option<Fr>>,\n        comm_cs: Vec<Option<Fr>>,\n        comm_r_lasts: Vec<Option<Fr>>,\n        paths: Vec<Vec<(Vec<Option<Fr>>, Option<usize>)>>,\n    ) -> Result<(), SynthesisError> {\n        Self {\n            leafs,\n            comm_rs,\n            comm_cs,\n            comm_r_lasts,\n            paths,\n            _t: PhantomData,\n        }\n        .synthesize(cs)\n    }\n}\n"
  },
  {
    "path": "storage-proofs-post/src/rational/mod.rs",
    "content": "mod circuit;\nmod compound;\nmod vanilla;\n\npub use circuit::*;\npub use compound::*;\npub use vanilla::*;\n"
  },
  {
    "path": "storage-proofs-post/src/rational/vanilla.rs",
    "content": "use std::collections::{BTreeMap, HashSet};\nuse std::marker::PhantomData;\n\nuse anyhow::{bail, ensure, Context};\nuse blake2b_simd::blake2b;\nuse byteorder::{ByteOrder, LittleEndian};\nuse filecoin_hashers::{Domain, HashFunction, Hasher};\nuse serde::{de::DeserializeOwned, Deserialize, Serialize};\nuse storage_proofs_core::{\n    error::{Error, Result},\n    merkle::{MerkleProof, MerkleProofTrait, MerkleTreeTrait, MerkleTreeWrapper},\n    parameter_cache::ParameterSetMetadata,\n    proof::{NoRequirements, ProofScheme},\n    sector::{OrderedSectorSet, SectorId},\n    util::NODE_SIZE,\n};\n\n#[derive(Debug, Clone)]\npub struct SetupParams {\n    /// The size of a sector.\n    pub sector_size: u64,\n    // TODO: can we drop this?\n    /// How many challenges there are in total.\n    pub challenges_count: usize,\n}\n\n#[derive(Debug, Clone)]\npub struct PublicParams {\n    /// The size of a sector.\n    pub sector_size: u64,\n    /// How many challenges there are in total.\n    pub challenges_count: usize,\n}\n\nimpl ParameterSetMetadata for PublicParams {\n    fn identifier(&self) -> String {\n        format!(\n            \"RationalPoSt::PublicParams{{sector_size: {} challenges_count: {}}}\",\n            self.sector_size(),\n            self.challenges_count,\n        )\n    }\n\n    fn sector_size(&self) -> u64 {\n        self.sector_size\n    }\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct PublicInputs<T: Domain> {\n    /// The challenges, which leafs to prove.\n    pub challenges: Vec<Challenge>,\n    pub faults: OrderedSectorSet,\n    #[serde(bound = \"\")]\n    pub comm_rs: Vec<T>,\n}\n\n#[derive(Debug, Clone)]\n#[allow(clippy::type_complexity)]\npub struct PrivateInputs<'a, Tree: 'a + MerkleTreeTrait> {\n    pub trees: &'a BTreeMap<\n        SectorId,\n        &'a MerkleTreeWrapper<\n            Tree::Hasher,\n            Tree::Store,\n            Tree::Arity,\n            Tree::SubTreeArity,\n            Tree::TopTreeArity,\n        >,\n    >,\n    pub comm_cs: &'a Vec<<Tree::Hasher as Hasher>::Domain>,\n    pub comm_r_lasts: &'a Vec<<Tree::Hasher as Hasher>::Domain>,\n}\n\n#[derive(Debug, Clone, Serialize, Deserialize)]\npub struct Proof<P: MerkleProofTrait> {\n    #[serde(bound(\n        serialize = \"MerkleProof<P::Hasher, P::Arity, P::SubTreeArity, P::TopTreeArity>: Serialize\",\n        deserialize = \"MerkleProof<P::Hasher, P::Arity, P::SubTreeArity, P::TopTreeArity>: DeserializeOwned\"\n    ))]\n    inclusion_proofs: Vec<MerkleProof<P::Hasher, P::Arity, P::SubTreeArity, P::TopTreeArity>>,\n    pub comm_cs: Vec<<P::Hasher as Hasher>::Domain>,\n}\n\nimpl<P: MerkleProofTrait> Proof<P> {\n    pub fn leafs(&self) -> Vec<<P::Hasher as Hasher>::Domain> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProof::leaf)\n            .collect()\n    }\n\n    pub fn commitments(&self) -> Vec<<P::Hasher as Hasher>::Domain> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProof::root)\n            .collect()\n    }\n\n    #[allow(clippy::type_complexity)]\n    pub fn paths(&self) -> Vec<Vec<(Vec<<P::Hasher as Hasher>::Domain>, usize)>> {\n        self.inclusion_proofs\n            .iter()\n            .map(MerkleProof::path)\n            .collect()\n    }\n}\n\n#[derive(Debug, Clone)]\npub struct RationalPoSt<'a, Tree>\nwhere\n    Tree: MerkleTreeTrait,\n{\n    _t: PhantomData<&'a Tree>,\n}\n\nimpl<'a, Tree: 'a + MerkleTreeTrait> ProofScheme<'a> for RationalPoSt<'a, Tree> {\n    type PublicParams = PublicParams;\n    type SetupParams = SetupParams;\n    type PublicInputs = PublicInputs<<Tree::Hasher as Hasher>::Domain>;\n    type PrivateInputs = PrivateInputs<'a, Tree>;\n    type Proof = Proof<Tree::Proof>;\n    type Requirements = NoRequirements;\n\n    fn setup(sp: &Self::SetupParams) -> Result<Self::PublicParams> {\n        Ok(PublicParams {\n            sector_size: sp.sector_size,\n            challenges_count: sp.challenges_count,\n        })\n    }\n\n    fn prove<'b>(\n        _pub_params: &'b Self::PublicParams,\n        pub_inputs: &'b Self::PublicInputs,\n        priv_inputs: &'b Self::PrivateInputs,\n    ) -> Result<Self::Proof> {\n        ensure!(\n            pub_inputs.challenges.len() == pub_inputs.comm_rs.len(),\n            \"mismatched challenges and comm_rs\"\n        );\n        ensure!(\n            pub_inputs.challenges.len() == priv_inputs.comm_cs.len(),\n            \"mismatched challenges and comm_cs\"\n        );\n        ensure!(\n            pub_inputs.challenges.len() == priv_inputs.comm_r_lasts.len(),\n            \"mismatched challenges and comm_r_lasts\"\n        );\n        let challenges = &pub_inputs.challenges;\n\n        let proofs = challenges\n            .iter()\n            .zip(priv_inputs.comm_r_lasts.iter())\n            .map(|(challenge, comm_r_last)| {\n                let challenged_leaf = challenge.leaf;\n\n                if let Some(tree) = priv_inputs.trees.get(&challenge.sector) {\n                    ensure!(comm_r_last == &tree.root(), Error::InvalidCommitment);\n\n                    tree.gen_cached_proof(challenged_leaf as usize, None)\n                } else {\n                    bail!(Error::MalformedInput);\n                }\n            })\n            .collect::<Result<Vec<_>>>()?;\n\n        Ok(Proof {\n            inclusion_proofs: proofs,\n            comm_cs: priv_inputs.comm_cs.to_vec(),\n        })\n    }\n\n    fn verify(\n        pub_params: &Self::PublicParams,\n        pub_inputs: &Self::PublicInputs,\n        proof: &Self::Proof,\n    ) -> Result<bool> {\n        let challenges = &pub_inputs.challenges;\n\n        ensure!(\n            challenges.len() == pub_inputs.comm_rs.len() as usize,\n            Error::MalformedInput\n        );\n\n        ensure!(\n            challenges.len() == proof.inclusion_proofs.len(),\n            Error::MalformedInput\n        );\n\n        // validate each proof\n        for (((merkle_proof, challenge), comm_r), comm_c) in proof\n            .inclusion_proofs\n            .iter()\n            .zip(challenges.iter())\n            .zip(pub_inputs.comm_rs.iter())\n            .zip(proof.comm_cs.iter())\n        {\n            let challenged_leaf = challenge.leaf;\n\n            // verify that H(Comm_c || Comm_r_last) == Comm_R\n            // comm_r_last is the root of the proof\n            let comm_r_last = merkle_proof.root();\n\n            if AsRef::<[u8]>::as_ref(&<Tree::Hasher as Hasher>::Function::hash2(\n                comm_c,\n                &comm_r_last,\n            )) != AsRef::<[u8]>::as_ref(&comm_r)\n            {\n                return Ok(false);\n            }\n\n            // validate the path length\n            let expected_path_length =\n                merkle_proof.expected_len(pub_params.sector_size as usize / NODE_SIZE);\n\n            if expected_path_length != merkle_proof.path().len() {\n                return Ok(false);\n            }\n\n            if !merkle_proof.validate(challenged_leaf as usize) {\n                return Ok(false);\n            }\n        }\n\n        Ok(true)\n    }\n}\n\n/// A challenge specifying a sector and leaf.\n#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]\npub struct Challenge {\n    // The identifier of the challenged sector.\n    pub sector: SectorId,\n    // The leaf index this challenge points at.\n    pub leaf: u64,\n}\n\n/// Rational PoSt specific challenge derivation.\npub fn derive_challenges(\n    challenge_count: usize,\n    sector_size: u64,\n    sectors: &OrderedSectorSet,\n    seed: &[u8],\n    faults: &OrderedSectorSet,\n) -> Result<Vec<Challenge>> {\n    (0..challenge_count)\n        .map(|n| {\n            let mut attempt = 0;\n            let mut attempted_sectors = HashSet::new();\n            loop {\n                let c = derive_challenge(seed, n as u64, attempt, sector_size, sectors)?;\n\n                // check for faulty sector\n                if !faults.contains(&c.sector) {\n                    // valid challenge, not found\n                    return Ok(c);\n                } else {\n                    attempt += 1;\n                    attempted_sectors.insert(c.sector);\n\n                    ensure!(\n                        attempted_sectors.len() < sectors.len(),\n                        \"all sectors are faulty\"\n                    );\n                }\n            }\n        })\n        .collect()\n}\n\nfn derive_challenge(\n    seed: &[u8],\n    n: u64,\n    attempt: u64,\n    sector_size: u64,\n    sectors: &OrderedSectorSet,\n) -> Result<Challenge> {\n    let mut data = seed.to_vec();\n    data.extend_from_slice(&n.to_le_bytes()[..]);\n    data.extend_from_slice(&attempt.to_le_bytes()[..]);\n\n    let hash = blake2b(&data);\n    let challenge_bytes = hash.as_bytes();\n    let sector_challenge = LittleEndian::read_u64(&challenge_bytes[..8]);\n    let leaf_challenge = LittleEndian::read_u64(&challenge_bytes[8..16]);\n\n    let sector_index = (sector_challenge % sectors.len() as u64) as usize;\n    let sector = *sectors\n        .iter()\n        .nth(sector_index)\n        .context(\"invalid challenge generated\")?;\n\n    Ok(Challenge {\n        sector,\n        leaf: leaf_challenge % (sector_size / NODE_SIZE as u64),\n    })\n}\n\n#[cfg(test)]\nmod tests {\n    use super::*;\n\n    use std::collections::BTreeSet;\n\n    #[test]\n    fn test_derive_challenges_fails_on_all_faulty() {\n        let mut sectors = BTreeSet::new();\n        sectors.insert(SectorId::from(1));\n        sectors.insert(SectorId::from(2));\n\n        let mut faults = BTreeSet::new();\n        faults.insert(SectorId::from(1));\n        faults.insert(SectorId::from(2));\n\n        let seed = vec![0u8];\n\n        assert!(derive_challenges(10, 1024, &sectors, &seed, &faults).is_err());\n    }\n}\n"
  },
  {
    "path": "storage-proofs-post/tests/election_circuit.rs",
    "content": "use std::collections::BTreeMap;\nuse std::marker::PhantomData;\n\nuse bellperson::{\n    bls::{Bls12, Fr},\n    util_cs::test_cs::TestConstraintSystem,\n    Circuit,\n};\nuse ff::Field;\nuse filecoin_hashers::{poseidon::PoseidonHasher, Domain, HashFunction, Hasher};\nuse generic_array::typenum::{U0, U8};\nuse rand::SeedableRng;\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    compound_proof::CompoundProof,\n    merkle::{generate_tree, get_base_tree_count, LCTree, MerkleTreeTrait},\n    proof::ProofScheme,\n    sector::SectorId,\n    util::NODE_SIZE,\n    TEST_SEED,\n};\nuse storage_proofs_post::election::{\n    self, generate_candidates, ElectionPoSt, ElectionPoStCircuit, ElectionPoStCompound,\n};\nuse tempfile::tempdir;\n\n#[test]\nfn test_election_post_circuit_poseidon() {\n    test_election_post_circuit::<LCTree<PoseidonHasher, U8, U0, U0>>(22_940);\n}\n\nfn test_election_post_circuit<Tree: 'static + MerkleTreeTrait>(expected_constraints: usize) {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 64 * get_base_tree_count::<Tree>();\n    let sector_size = leaves * NODE_SIZE;\n\n    let randomness = <Tree::Hasher as Hasher>::Domain::random(rng);\n    let prover_id = <Tree::Hasher as Hasher>::Domain::random(rng);\n\n    let pub_params = election::PublicParams {\n        sector_size: sector_size as u64,\n        challenge_count: 20,\n        challenged_nodes: 1,\n    };\n\n    let mut sectors: Vec<SectorId> = Vec::new();\n    let mut trees = BTreeMap::new();\n\n    let temp_dir = tempdir().unwrap();\n    let temp_path = temp_dir.path();\n\n    for i in 0..5 {\n        sectors.push(i.into());\n        let (_data, tree) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n        trees.insert(i.into(), tree);\n    }\n\n    let candidates =\n        generate_candidates::<Tree>(&pub_params, &sectors, &trees, prover_id, randomness).unwrap();\n\n    let candidate = &candidates[0];\n    let tree = trees.remove(&candidate.sector_id).unwrap();\n    let comm_r_last = tree.root();\n    let comm_c = <Tree::Hasher as Hasher>::Domain::random(rng);\n    let comm_r = <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last);\n\n    let pub_inputs = election::PublicInputs {\n        randomness,\n        sector_id: candidate.sector_id,\n        prover_id,\n        comm_r,\n        partial_ticket: candidate.partial_ticket,\n        sector_challenge_index: 0,\n    };\n\n    let priv_inputs = election::PrivateInputs::<Tree> {\n        tree,\n        comm_c,\n        comm_r_last,\n    };\n\n    let proof = ElectionPoSt::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs)\n        .expect(\"proving failed\");\n\n    let is_valid = ElectionPoSt::<Tree>::verify(&pub_params, &pub_inputs, &proof)\n        .expect(\"verification failed\");\n    assert!(is_valid);\n\n    // actual circuit test\n\n    let paths = proof\n        .paths()\n        .iter()\n        .map(|p| {\n            p.iter()\n                .map(|v| {\n                    (\n                        v.0.iter().copied().map(Into::into).map(Some).collect(),\n                        Some(v.1),\n                    )\n                })\n                .collect::<Vec<_>>()\n        })\n        .collect();\n    let leafs: Vec<_> = proof.leafs().iter().map(|l| Some((*l).into())).collect();\n\n    let mut cs = TestConstraintSystem::<Bls12>::new();\n\n    let instance = ElectionPoStCircuit::<Tree> {\n        leafs,\n        paths,\n        comm_r: Some(comm_r.into()),\n        comm_c: Some(comm_c.into()),\n        comm_r_last: Some(comm_r_last.into()),\n        partial_ticket: Some(candidate.partial_ticket),\n        randomness: Some(randomness.into()),\n        prover_id: Some(prover_id.into()),\n        sector_id: Some(candidate.sector_id.into()),\n        _t: PhantomData,\n    };\n\n    instance\n        .synthesize(&mut cs)\n        .expect(\"failed to synthesize circuit\");\n\n    assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n    assert_eq!(cs.num_inputs(), 23, \"wrong number of inputs\");\n    assert_eq!(\n        cs.num_constraints(),\n        expected_constraints,\n        \"wrong number of constraints\"\n    );\n    assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n\n    let generated_inputs =\n        ElectionPoStCompound::<Tree>::generate_public_inputs(&pub_inputs, &pub_params, None)\n            .unwrap();\n    let expected_inputs = cs.get_inputs();\n\n    for ((input, label), generated_input) in\n        expected_inputs.iter().skip(1).zip(generated_inputs.iter())\n    {\n        assert_eq!(input, generated_input, \"{}\", label);\n    }\n\n    assert_eq!(\n        generated_inputs.len(),\n        expected_inputs.len() - 1,\n        \"inputs are not the same length\"\n    );\n}\n"
  },
  {
    "path": "storage-proofs-post/tests/election_compound.rs",
    "content": "use std::collections::BTreeMap;\n\nuse bellperson::{\n    util_cs::{metric_cs::MetricCS, test_cs::TestConstraintSystem},\n    Circuit,\n};\nuse filecoin_hashers::{poseidon::PoseidonHasher, Domain, HashFunction, Hasher};\nuse generic_array::typenum::{U0, U8};\nuse rand::SeedableRng;\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    compound_proof::{self, CompoundProof},\n    merkle::{generate_tree, get_base_tree_count, LCTree, MerkleTreeTrait},\n    proof::NoRequirements,\n    sector::SectorId,\n    util::NODE_SIZE,\n    TEST_SEED,\n};\nuse storage_proofs_post::election::{\n    generate_candidates, ElectionPoStCompound, PrivateInputs, PublicInputs, SetupParams,\n};\nuse tempfile::tempdir;\n\n#[ignore]\n#[test]\nfn test_election_post_compound_poseidon() {\n    test_election_post_compound::<LCTree<PoseidonHasher, U8, U0, U0>>();\n}\n\nfn test_election_post_compound<Tree: 'static + MerkleTreeTrait>() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 64 * get_base_tree_count::<Tree>();\n    let sector_size = (leaves * NODE_SIZE) as u64;\n    let randomness = <Tree::Hasher as Hasher>::Domain::random(rng);\n    let prover_id = <Tree::Hasher as Hasher>::Domain::random(rng);\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params: SetupParams {\n            sector_size,\n            challenge_count: 20,\n            challenged_nodes: 1,\n        },\n        partitions: None,\n        priority: true,\n    };\n\n    let mut sectors: Vec<SectorId> = Vec::new();\n    let mut trees = BTreeMap::new();\n\n    let temp_dir = tempdir().unwrap();\n    let temp_path = temp_dir.path();\n\n    for i in 0..5 {\n        sectors.push(i.into());\n        let (_data, tree) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n        trees.insert(i.into(), tree);\n    }\n\n    let pub_params = ElectionPoStCompound::<Tree>::setup(&setup_params).expect(\"setup failed\");\n\n    let candidates = generate_candidates::<Tree>(\n        &pub_params.vanilla_params,\n        &sectors,\n        &trees,\n        prover_id,\n        randomness,\n    )\n    .unwrap();\n\n    let candidate = &candidates[0];\n    let tree = trees.remove(&candidate.sector_id).unwrap();\n    let comm_r_last = tree.root();\n    let comm_c = <Tree::Hasher as Hasher>::Domain::random(rng);\n    let comm_r = <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last);\n\n    let pub_inputs = PublicInputs {\n        randomness,\n        sector_id: candidate.sector_id,\n        prover_id,\n        comm_r,\n        partial_ticket: candidate.partial_ticket,\n        sector_challenge_index: 0,\n    };\n\n    let priv_inputs = PrivateInputs::<Tree> {\n        tree,\n        comm_c,\n        comm_r_last,\n    };\n\n    {\n        let (circuit, inputs) =\n            ElectionPoStCompound::circuit_for_test(&pub_params, &pub_inputs, &priv_inputs).unwrap();\n\n        let mut cs = TestConstraintSystem::new();\n\n        circuit.synthesize(&mut cs).expect(\"failed to synthesize\");\n\n        if !cs.is_satisfied() {\n            panic!(\n                \"failed to satisfy: {:?}\",\n                cs.which_is_unsatisfied().unwrap()\n            );\n        }\n        assert!(\n            cs.verify(&inputs),\n            \"verification failed with TestContraintSystem and generated inputs\"\n        );\n    }\n\n    // Use this to debug differences between blank and regular circuit generation.\n    {\n        let (circuit1, _inputs) =\n            ElectionPoStCompound::circuit_for_test(&pub_params, &pub_inputs, &priv_inputs).unwrap();\n\n        let blank_circuit = ElectionPoStCompound::<Tree>::blank_circuit(&pub_params.vanilla_params);\n\n        let mut cs_blank = MetricCS::new();\n        blank_circuit\n            .synthesize(&mut cs_blank)\n            .expect(\"failed to synthesize\");\n\n        let a = cs_blank.pretty_print_list();\n\n        let mut cs1 = TestConstraintSystem::new();\n        circuit1.synthesize(&mut cs1).expect(\"failed to synthesize\");\n        let b = cs1.pretty_print_list();\n\n        for (i, (a, b)) in a.chunks(100).zip(b.chunks(100)).enumerate() {\n            assert_eq!(a, b, \"failed at chunk {}\", i);\n        }\n    }\n    let blank_groth_params =\n        ElectionPoStCompound::<Tree>::groth_params(Some(rng), &pub_params.vanilla_params)\n            .expect(\"failed to generate groth params\");\n\n    let proof =\n        ElectionPoStCompound::prove(&pub_params, &pub_inputs, &priv_inputs, &blank_groth_params)\n            .expect(\"failed while proving\");\n\n    let verified = ElectionPoStCompound::verify(&pub_params, &pub_inputs, &proof, &NoRequirements)\n        .expect(\"failed while verifying\");\n\n    assert!(verified);\n}\n"
  },
  {
    "path": "storage-proofs-post/tests/election_vanilla.rs",
    "content": "use std::collections::BTreeMap;\n\nuse filecoin_hashers::{poseidon::PoseidonHasher, Domain, HashFunction, Hasher};\nuse generic_array::typenum::{U0, U2, U8};\nuse rand::SeedableRng;\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    merkle::{generate_tree, get_base_tree_count, LCTree, MerkleTreeTrait},\n    proof::ProofScheme,\n    sector::SectorId,\n    util::NODE_SIZE,\n    TEST_SEED,\n};\nuse storage_proofs_post::election::{\n    generate_candidates, ElectionPoSt, PrivateInputs, PublicInputs, PublicParams,\n};\nuse tempfile::tempdir;\n\n#[test]\nfn test_election_post_poseidon_base_8() {\n    test_election_post::<LCTree<PoseidonHasher, U8, U0, U0>>();\n}\n\n#[test]\nfn test_election_post_poseidon_sub_8_8() {\n    test_election_post::<LCTree<PoseidonHasher, U8, U8, U0>>();\n}\n\n#[test]\nfn test_election_post_poseidon_top_8_8_2() {\n    test_election_post::<LCTree<PoseidonHasher, U8, U8, U2>>();\n}\n\nfn test_election_post<Tree: 'static + MerkleTreeTrait>() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 64 * get_base_tree_count::<Tree>();\n    let sector_size = leaves * NODE_SIZE;\n\n    let pub_params = PublicParams {\n        sector_size: sector_size as u64,\n        challenge_count: 40,\n        challenged_nodes: 1,\n    };\n\n    let randomness = <Tree::Hasher as Hasher>::Domain::random(rng);\n    let prover_id = <Tree::Hasher as Hasher>::Domain::random(rng);\n\n    let mut sectors: Vec<SectorId> = Vec::new();\n    let mut trees = BTreeMap::new();\n\n    let temp_dir = tempdir().expect(\"tempdir failure\");\n    let temp_path = temp_dir.path();\n\n    for i in 0..5 {\n        sectors.push(i.into());\n        let (_data, tree) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n        trees.insert(i.into(), tree);\n    }\n\n    let candidates =\n        generate_candidates::<Tree>(&pub_params, &sectors, &trees, prover_id, randomness)\n            .expect(\"generate candidates failure\");\n\n    let candidate = &candidates[0];\n    let tree = trees\n        .remove(&candidate.sector_id)\n        .expect(\"trees.remove failure\");\n    let comm_r_last = tree.root();\n    let comm_c = <Tree::Hasher as Hasher>::Domain::random(rng);\n    let comm_r = <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last);\n\n    let pub_inputs = PublicInputs {\n        randomness,\n        sector_id: candidate.sector_id,\n        prover_id,\n        comm_r,\n        partial_ticket: candidate.partial_ticket,\n        sector_challenge_index: 0,\n    };\n\n    let priv_inputs = PrivateInputs::<Tree> {\n        tree,\n        comm_c,\n        comm_r_last,\n    };\n\n    let proof = ElectionPoSt::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs)\n        .expect(\"proving failed\");\n\n    let is_valid = ElectionPoSt::<Tree>::verify(&pub_params, &pub_inputs, &proof)\n        .expect(\"verification failed\");\n\n    assert!(is_valid);\n}\n"
  },
  {
    "path": "storage-proofs-post/tests/fallback_circuit.rs",
    "content": "use bellperson::{\n    bls::{Bls12, Fr},\n    util_cs::{bench_cs::BenchCS, test_cs::TestConstraintSystem},\n    Circuit,\n};\nuse ff::Field;\nuse filecoin_hashers::{poseidon::PoseidonHasher, Domain, HashFunction, Hasher};\nuse generic_array::typenum::{U0, U2, U4, U8};\nuse rand::SeedableRng;\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    compound_proof::CompoundProof,\n    error::Result,\n    merkle::{generate_tree, get_base_tree_count, LCTree, MerkleTreeTrait, OctMerkleTree},\n    proof::ProofScheme,\n    util::NODE_SIZE,\n    TEST_SEED,\n};\nuse storage_proofs_post::fallback::{\n    self, FallbackPoSt, FallbackPoStCircuit, FallbackPoStCompound, PrivateSector, PublicSector,\n    Sector,\n};\nuse tempfile::tempdir;\n\n#[test]\nfn test_fallback_post_circuit_poseidon_single_partition_base_8() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(3, 3, 1, 19, 16_869);\n}\n\n#[test]\nfn test_fallback_post_circuit_poseidon_single_partition_sub_8_4() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(3, 3, 1, 19, 22_674);\n}\n\n#[test]\nfn test_fallback_post_circuit_poseidon_single_partition_top_8_4_2() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(3, 3, 1, 19, 27_384);\n}\n\n#[test]\nfn test_fallback_post_circuit_poseidon_two_partitions_base_8() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(4, 2, 2, 13, 11_246);\n}\n\n#[test]\nfn test_fallback_post_circuit_poseidon_single_partition_smaller_base_8() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(2, 3, 1, 19, 16_869);\n}\n\n#[test]\nfn test_fallback_post_circuit_poseidon_two_partitions_smaller_base_8() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 3, 2, 19, 16_869);\n}\n\nfn test_fallback_post<Tree: 'static + MerkleTreeTrait>(\n    total_sector_count: usize,\n    sector_count: usize,\n    partitions: usize,\n    expected_num_inputs: usize,\n    expected_constraints: usize,\n) where\n    Tree::Store: 'static,\n{\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 64 * get_base_tree_count::<Tree>();\n    let sector_size = leaves * NODE_SIZE;\n    let randomness = <Tree::Hasher as Hasher>::Domain::random(rng);\n    let prover_id = <Tree::Hasher as Hasher>::Domain::random(rng);\n\n    let pub_params = fallback::PublicParams {\n        sector_size: sector_size as u64,\n        challenge_count: 5,\n        sector_count,\n        api_version: ApiVersion::V1_1_0,\n    };\n\n    let temp_dir = tempdir().expect(\"tempdir failure\");\n    let temp_path = temp_dir.path();\n\n    let mut pub_sectors = Vec::new();\n    let mut priv_sectors = Vec::new();\n    let mut trees = Vec::new();\n\n    for _i in 0..total_sector_count {\n        let (_data, tree) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n        trees.push(tree);\n    }\n\n    for (i, tree) in trees.iter().enumerate() {\n        let comm_c = <Tree::Hasher as Hasher>::Domain::random(rng);\n        let comm_r_last = tree.root();\n\n        priv_sectors.push(PrivateSector {\n            tree,\n            comm_c,\n            comm_r_last,\n        });\n\n        let comm_r = <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last);\n        pub_sectors.push(PublicSector {\n            id: (i as u64).into(),\n            comm_r,\n        });\n    }\n\n    let pub_inputs = fallback::PublicInputs {\n        randomness,\n        prover_id,\n        sectors: pub_sectors.clone(),\n        k: None,\n    };\n\n    let priv_inputs = fallback::PrivateInputs::<Tree> {\n        sectors: &priv_sectors,\n    };\n\n    let proofs = FallbackPoSt::<Tree>::prove_all_partitions(\n        &pub_params,\n        &pub_inputs,\n        &priv_inputs,\n        partitions,\n    )\n    .expect(\"proving failed\");\n    assert_eq!(proofs.len(), partitions);\n\n    let is_valid = FallbackPoSt::<Tree>::verify_all_partitions(&pub_params, &pub_inputs, &proofs)\n        .expect(\"verification failed\");\n    assert!(is_valid);\n\n    // actual circuit test\n\n    for (j, proof) in proofs.iter().enumerate() {\n        // iterates over each partition\n        let circuit_sectors = proof\n            .sectors\n            .iter()\n            .enumerate()\n            .map(|(i, proof)| {\n                // index into sectors by the correct offset\n                let i = j * sector_count + i;\n\n                if i < pub_sectors.len() {\n                    Sector::circuit(&pub_sectors[i], proof)\n                } else {\n                    // duplicated last one\n                    let k = pub_sectors.len() - 1;\n                    Sector::circuit(&pub_sectors[k], proof)\n                }\n            })\n            .collect::<Result<_>>()\n            .expect(\"circuit sectors failure\");\n\n        let mut cs = TestConstraintSystem::<Bls12>::new();\n\n        let instance = FallbackPoStCircuit::<Tree> {\n            sectors: circuit_sectors,\n            prover_id: Some(prover_id.into()),\n        };\n\n        instance\n            .synthesize(&mut cs)\n            .expect(\"failed to synthesize circuit\");\n\n        assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n        assert_eq!(\n            cs.num_inputs(),\n            expected_num_inputs,\n            \"wrong number of inputs\"\n        );\n        assert_eq!(\n            cs.num_constraints(),\n            expected_constraints,\n            \"wrong number of constraints\"\n        );\n        assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n\n        let generated_inputs =\n            FallbackPoStCompound::<Tree>::generate_public_inputs(&pub_inputs, &pub_params, Some(j))\n                .expect(\"generate_public_inputs failure\");\n        let expected_inputs = cs.get_inputs();\n\n        for ((input, label), generated_input) in\n            expected_inputs.iter().skip(1).zip(generated_inputs.iter())\n        {\n            assert_eq!(input, generated_input, \"{}\", label);\n        }\n\n        assert_eq!(\n            generated_inputs.len(),\n            expected_inputs.len() - 1,\n            \"inputs are not the same length\"\n        );\n\n        assert!(\n            cs.verify(&generated_inputs),\n            \"verification failed with TestContraintSystem and generated inputs\"\n        );\n    }\n}\n\n#[test]\n#[ignore]\nfn test_fallback_post_circuit_poseidon_base_8_bench_cs() {\n    let params = fallback::SetupParams {\n        sector_size: 1024 * 1024 * 1024 * 32 as u64,\n        challenge_count: 10,\n        sector_count: 5,\n        api_version: ApiVersion::V1_1_0,\n    };\n\n    let pp = FallbackPoSt::<OctMerkleTree<PoseidonHasher>>::setup(&params)\n        .expect(\"fallback post setup failure\");\n\n    let mut cs = BenchCS::<Bls12>::new();\n    FallbackPoStCompound::<OctMerkleTree<PoseidonHasher>>::blank_circuit(&pp)\n        .synthesize(&mut cs)\n        .expect(\"blank circuit failure\");\n\n    assert_eq!(cs.num_constraints(), 266_665);\n}\n"
  },
  {
    "path": "storage-proofs-post/tests/fallback_compound.rs",
    "content": "use bellperson::{\n    util_cs::{metric_cs::MetricCS, test_cs::TestConstraintSystem},\n    Circuit,\n};\nuse filecoin_hashers::{poseidon::PoseidonHasher, Domain, HashFunction, Hasher};\nuse generic_array::typenum::{U0, U2, U4, U8};\nuse pretty_assertions::assert_eq;\nuse rand::SeedableRng;\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    compound_proof::{self, CompoundProof},\n    merkle::{generate_tree, get_base_tree_count, LCTree, MerkleTreeTrait},\n    util::NODE_SIZE,\n    TEST_SEED,\n};\nuse storage_proofs_post::fallback::{\n    ChallengeRequirements, FallbackPoStCompound, PrivateInputs, PrivateSector, PublicInputs,\n    PublicSector, SetupParams,\n};\nuse tempfile::tempdir;\n\n#[ignore]\n#[test]\nfn test_fallback_post_compound_poseidon_single_partition_base_8() {\n    fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(15, 15, 1, ApiVersion::V1_0_0);\n    fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(15, 15, 1, ApiVersion::V1_1_0);\n}\n\n#[ignore]\n#[test]\nfn test_fallback_post_compound_poseidon_single_partition_sub_8_4() {\n    fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(3, 3, 1, ApiVersion::V1_0_0);\n    fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(3, 3, 1, ApiVersion::V1_1_0);\n}\n\n#[ignore]\n#[test]\nfn test_fallback_post_compound_poseidon_single_partition_top_8_4_2() {\n    fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(3, 3, 1, ApiVersion::V1_0_0);\n    fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(3, 3, 1, ApiVersion::V1_1_0);\n}\n\n#[ignore]\n#[test]\nfn test_fallback_post_compound_poseidon_single_partition_smaller_base_8() {\n    fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(2, 3, 1, ApiVersion::V1_0_0);\n    fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(2, 3, 1, ApiVersion::V1_1_0);\n}\n\n#[ignore]\n#[test]\nfn test_fallback_post_compound_poseidon_two_partitions_base_8() {\n    fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(4, 2, 2, ApiVersion::V1_0_0);\n    fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(4, 2, 2, ApiVersion::V1_1_0);\n}\n\n#[ignore]\n#[test]\nfn test_fallback_post_compound_poseidon_two_partitions_smaller_base_8() {\n    fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 3, 2, ApiVersion::V1_0_0);\n    fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 3, 2, ApiVersion::V1_1_0);\n}\n\nfn fallback_post<Tree: 'static + MerkleTreeTrait>(\n    total_sector_count: usize,\n    sector_count: usize,\n    partitions: usize,\n    api_version: ApiVersion,\n) where\n    Tree::Store: 'static,\n{\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 64 * get_base_tree_count::<Tree>();\n    let sector_size = (leaves * NODE_SIZE) as u64;\n    let randomness = <Tree::Hasher as Hasher>::Domain::random(rng);\n    let prover_id = <Tree::Hasher as Hasher>::Domain::random(rng);\n    let challenge_count = 2;\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params: SetupParams {\n            sector_size: sector_size as u64,\n            challenge_count,\n            sector_count,\n            api_version,\n        },\n        partitions: Some(partitions),\n        priority: false,\n    };\n\n    let temp_dir = tempdir().unwrap();\n    let temp_path = temp_dir.path();\n\n    let mut pub_sectors = Vec::new();\n    let mut priv_sectors = Vec::new();\n    let mut trees = Vec::new();\n\n    for _i in 0..total_sector_count {\n        let (_data, tree) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n        trees.push(tree);\n    }\n    for (i, tree) in trees.iter().enumerate() {\n        let comm_c = <Tree::Hasher as Hasher>::Domain::random(rng);\n        let comm_r_last = tree.root();\n\n        priv_sectors.push(PrivateSector {\n            tree,\n            comm_c,\n            comm_r_last,\n        });\n\n        let comm_r = <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last);\n        pub_sectors.push(PublicSector {\n            id: (i as u64).into(),\n            comm_r,\n        });\n    }\n\n    let pub_params = FallbackPoStCompound::<Tree>::setup(&setup_params).expect(\"setup failed\");\n\n    let pub_inputs = PublicInputs {\n        randomness,\n        prover_id,\n        sectors: pub_sectors.clone(),\n        k: None,\n    };\n\n    let priv_inputs = PrivateInputs::<Tree> {\n        sectors: &priv_sectors,\n    };\n\n    // Use this to debug differences between blank and regular circuit generation.\n    {\n        let circuits =\n            FallbackPoStCompound::circuit_for_test_all(&pub_params, &pub_inputs, &priv_inputs)\n                .unwrap();\n        let blank_circuit = FallbackPoStCompound::<Tree>::blank_circuit(&pub_params.vanilla_params);\n\n        let mut cs_blank = MetricCS::new();\n        blank_circuit\n            .synthesize(&mut cs_blank)\n            .expect(\"failed to synthesize\");\n\n        let a = cs_blank.pretty_print_list();\n\n        for (circuit1, _inputs) in circuits.into_iter() {\n            let mut cs1 = TestConstraintSystem::new();\n            circuit1.synthesize(&mut cs1).expect(\"failed to synthesize\");\n            let b = cs1.pretty_print_list();\n\n            for (i, (a, b)) in a.chunks(100).zip(b.chunks(100)).enumerate() {\n                assert_eq!(a, b, \"failed at chunk {}\", i);\n            }\n        }\n    }\n\n    {\n        let circuits =\n            FallbackPoStCompound::circuit_for_test_all(&pub_params, &pub_inputs, &priv_inputs)\n                .unwrap();\n\n        for (circuit, inputs) in circuits.into_iter() {\n            let mut cs = TestConstraintSystem::new();\n\n            circuit.synthesize(&mut cs).expect(\"failed to synthesize\");\n\n            if !cs.is_satisfied() {\n                panic!(\n                    \"failed to satisfy: {:?}\",\n                    cs.which_is_unsatisfied().unwrap()\n                );\n            }\n            assert!(\n                cs.verify(&inputs),\n                \"verification failed with TestContraintSystem and generated inputs\"\n            );\n        }\n    }\n\n    let blank_groth_params =\n        FallbackPoStCompound::<Tree>::groth_params(Some(rng), &pub_params.vanilla_params)\n            .expect(\"failed to generate groth params\");\n\n    let proof =\n        FallbackPoStCompound::prove(&pub_params, &pub_inputs, &priv_inputs, &blank_groth_params)\n            .expect(\"failed while proving\");\n\n    let verified = FallbackPoStCompound::verify(\n        &pub_params,\n        &pub_inputs,\n        &proof,\n        &ChallengeRequirements {\n            minimum_challenge_count: total_sector_count * challenge_count,\n        },\n    )\n    .expect(\"failed while verifying\");\n\n    assert!(verified);\n}\n"
  },
  {
    "path": "storage-proofs-post/tests/fallback_vanilla.rs",
    "content": "use filecoin_hashers::{poseidon::PoseidonHasher, Domain, HashFunction, Hasher};\nuse generic_array::typenum::{U0, U2, U4, U8};\nuse rand::SeedableRng;\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    api_version::ApiVersion,\n    error::Error,\n    merkle::{generate_tree, get_base_tree_count, LCTree, MerkleTreeTrait},\n    proof::ProofScheme,\n    sector::SectorId,\n    util::NODE_SIZE,\n    TEST_SEED,\n};\nuse storage_proofs_post::fallback::{self, FallbackPoSt, PrivateSector, PublicSector};\nuse tempfile::tempdir;\n\n#[test]\nfn test_fallback_post_poseidon_single_partition_base_8() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 5, 1, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 5, 1, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_single_partition_smaller_base_8() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(3, 5, 1, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(3, 5, 1, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_two_partitions_base_8() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(4, 2, 2, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(4, 2, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_two_partitions_smaller_base_8() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 3, 2, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 3, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_single_partition_sub_8_4() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(5, 5, 1, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(5, 5, 1, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_single_partition_smaller_sub_8_4() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(3, 5, 1, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(3, 5, 1, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_two_partitions_sub_8_4() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(4, 2, 2, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(4, 2, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_two_partitions_sub_8_8() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U8, U0>>(4, 2, 2, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U8, U0>>(4, 2, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_two_partitions_smaller_sub_8_4() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(5, 3, 2, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(5, 3, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_two_partitions_smaller_sub_8_8() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U8, U0>>(5, 3, 2, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U8, U0>>(5, 3, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_single_partition_top_8_4_2() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(5, 5, 1, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(5, 5, 1, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_single_partition_top_8_8_2() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U8, U2>>(5, 5, 1, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U8, U2>>(5, 5, 1, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_single_partition_smaller_top_8_4_2() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(3, 5, 1, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(3, 5, 1, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_two_partitions_top_8_4_2() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(4, 2, 2, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(4, 2, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_two_partitions_smaller_top_8_4_2() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(5, 3, 2, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(5, 3, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_fallback_post_poseidon_two_partitions_smaller_top_8_8_2() {\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U8, U2>>(5, 3, 2, ApiVersion::V1_0_0);\n    test_fallback_post::<LCTree<PoseidonHasher, U8, U8, U2>>(5, 3, 2, ApiVersion::V1_1_0);\n}\n\nfn test_fallback_post<Tree: MerkleTreeTrait>(\n    total_sector_count: usize,\n    sector_count: usize,\n    partitions: usize,\n    api_version: ApiVersion,\n) where\n    Tree::Store: 'static,\n{\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 64 * get_base_tree_count::<Tree>();\n    let sector_size = leaves * NODE_SIZE;\n\n    let pub_params = fallback::PublicParams {\n        sector_size: sector_size as u64,\n        challenge_count: 10,\n        sector_count,\n        api_version,\n    };\n\n    let randomness = <Tree::Hasher as Hasher>::Domain::random(rng);\n    let prover_id = <Tree::Hasher as Hasher>::Domain::random(rng);\n\n    let temp_dir = tempdir().unwrap();\n    let temp_path = temp_dir.path();\n\n    let mut pub_sectors = Vec::new();\n    let mut priv_sectors = Vec::new();\n\n    let trees = (0..total_sector_count)\n        .map(|_| generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf())).1)\n        .collect::<Vec<_>>();\n\n    for (i, tree) in trees.iter().enumerate() {\n        let comm_c = <Tree::Hasher as Hasher>::Domain::random(rng);\n        let comm_r_last = tree.root();\n\n        priv_sectors.push(PrivateSector {\n            tree,\n            comm_c,\n            comm_r_last,\n        });\n\n        let comm_r = <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last);\n        pub_sectors.push(PublicSector {\n            id: (i as u64).into(),\n            comm_r,\n        });\n    }\n\n    let pub_inputs = fallback::PublicInputs {\n        randomness,\n        prover_id,\n        sectors: pub_sectors.clone(),\n        k: None,\n    };\n\n    let priv_inputs = fallback::PrivateInputs::<Tree> {\n        sectors: &priv_sectors[..],\n    };\n\n    let proof = FallbackPoSt::<Tree>::prove_all_partitions(\n        &pub_params,\n        &pub_inputs,\n        &priv_inputs,\n        partitions,\n    )\n    .expect(\"proving failed\");\n\n    let is_valid = FallbackPoSt::<Tree>::verify_all_partitions(&pub_params, &pub_inputs, &proof)\n        .expect(\"verification failed\");\n\n    assert!(is_valid);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_single_partition_base_8() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 5, 1, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 5, 1, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_single_partition_smaller_base_8() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(3, 5, 1, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(3, 5, 1, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_two_partitions_base_8() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(4, 2, 2, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(4, 2, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_two_partitions_smaller_base_8() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 3, 2, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U0, U0>>(5, 3, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_single_partition_sub_8_4() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(5, 5, 1, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(5, 5, 1, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_single_partition_smaller_sub_8_4() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(3, 5, 1, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(3, 5, 1, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_two_partitions_sub_8_4() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(4, 2, 2, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(4, 2, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_two_partitions_sub_8_8() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U8, U0>>(4, 2, 2, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U8, U0>>(4, 2, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_two_partitions_smaller_sub_8_4() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(5, 3, 2, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U0>>(5, 3, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_two_partitions_smaller_sub_8_8() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U8, U0>>(5, 3, 2, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U8, U0>>(5, 3, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_single_partition_top_8_4_2() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(5, 5, 1, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(5, 5, 1, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_single_partition_top_8_8_2() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U8, U2>>(5, 5, 1, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U8, U2>>(5, 5, 1, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_single_partition_smaller_top_8_4_2() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(3, 5, 1, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(3, 5, 1, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_two_partitions_top_8_4_2() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(4, 2, 2, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(4, 2, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_two_partitions_smaller_top_8_4_2() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(5, 3, 2, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U4, U2>>(5, 3, 2, ApiVersion::V1_1_0);\n}\n\n#[test]\nfn test_invalid_fallback_post_poseidon_two_partitions_smaller_top_8_8_2() {\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U8, U2>>(5, 3, 2, ApiVersion::V1_0_0);\n    test_invalid_fallback_post::<LCTree<PoseidonHasher, U8, U8, U2>>(5, 3, 2, ApiVersion::V1_1_0);\n}\n\nfn test_invalid_fallback_post<Tree: MerkleTreeTrait>(\n    total_sector_count: usize,\n    sector_count: usize,\n    partitions: usize,\n    api_version: ApiVersion,\n) where\n    Tree::Store: 'static,\n{\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 64 * get_base_tree_count::<Tree>();\n    let sector_size = leaves * NODE_SIZE;\n\n    let pub_params = fallback::PublicParams {\n        sector_size: sector_size as u64,\n        challenge_count: 10,\n        sector_count,\n        api_version,\n    };\n\n    let randomness = <Tree::Hasher as Hasher>::Domain::random(rng);\n    let prover_id = <Tree::Hasher as Hasher>::Domain::random(rng);\n\n    let temp_dir = tempdir().unwrap();\n    let temp_path = temp_dir.path();\n\n    let mut pub_sectors = Vec::new();\n    let mut priv_sectors = Vec::new();\n\n    let mut trees = Vec::new();\n\n    let mut faulty_sectors = Vec::<SectorId>::new();\n\n    for _i in 0..total_sector_count {\n        let (_data, tree) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n        trees.push(tree);\n    }\n\n    let faulty_denominator = 3;\n\n    let (_data, wrong_tree) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n\n    for (i, tree) in trees.iter().enumerate() {\n        let make_faulty = i % faulty_denominator == 0;\n\n        let comm_c = <Tree::Hasher as Hasher>::Domain::random(rng);\n        let comm_r_last = tree.root();\n\n        priv_sectors.push(PrivateSector {\n            tree: if make_faulty { &wrong_tree } else { tree },\n            comm_c,\n            comm_r_last,\n        });\n\n        let comm_r = <Tree::Hasher as Hasher>::Function::hash2(&comm_c, &comm_r_last);\n\n        if make_faulty {\n            faulty_sectors.push((i as u64).into());\n        }\n\n        pub_sectors.push(PublicSector {\n            id: (i as u64).into(),\n            comm_r,\n        });\n    }\n\n    let pub_inputs = fallback::PublicInputs {\n        randomness,\n        prover_id,\n        sectors: pub_sectors.clone(),\n        k: None,\n    };\n\n    let priv_inputs = fallback::PrivateInputs::<Tree> {\n        sectors: &priv_sectors[..],\n    };\n\n    let proof = FallbackPoSt::<Tree>::prove_all_partitions(\n        &pub_params,\n        &pub_inputs,\n        &priv_inputs,\n        partitions,\n    );\n\n    match proof {\n        Ok(proof) => {\n            let is_valid =\n                FallbackPoSt::<Tree>::verify_all_partitions(&pub_params, &pub_inputs, &proof)\n                    .expect(\"verification failed\");\n            assert!(!is_valid, \"PoSt returned a valid proof with invalid input\");\n        }\n        Err(e) => match e.downcast::<Error>() {\n            Err(_) => panic!(\"failed to downcast to Error\"),\n            Ok(Error::FaultySectors(sector_ids)) => assert_eq!(faulty_sectors, sector_ids),\n            Ok(_) => panic!(\"PoSt failed to return FaultySectors error.\"),\n        },\n    };\n}\n"
  },
  {
    "path": "storage-proofs-post/tests/rational_circuit.rs",
    "content": "use std::collections::BTreeMap;\nuse std::marker::PhantomData;\n\nuse bellperson::{\n    bls::{Bls12, Fr},\n    util_cs::test_cs::TestConstraintSystem,\n    Circuit,\n};\nuse ff::Field;\nuse filecoin_hashers::{poseidon::PoseidonHasher, Domain, HashFunction, Hasher};\nuse rand::{Rng, SeedableRng};\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    compound_proof::CompoundProof,\n    merkle::{generate_tree, get_base_tree_count, BinaryMerkleTree, MerkleTreeTrait},\n    proof::ProofScheme,\n    sector::OrderedSectorSet,\n    util::NODE_SIZE,\n    TEST_SEED,\n};\nuse storage_proofs_post::rational::{\n    self, derive_challenges, RationalPoSt, RationalPoStCircuit, RationalPoStCompound,\n};\nuse tempfile::tempdir;\n\n#[test]\nfn test_rational_post_circuit_poseidon() {\n    test_rational_post_circuit::<BinaryMerkleTree<PoseidonHasher>>(3_770);\n}\n\nfn test_rational_post_circuit<Tree: 'static + MerkleTreeTrait>(expected_constraints: usize) {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 32 * get_base_tree_count::<Tree>();\n    let sector_size = (leaves * NODE_SIZE) as u64;\n    let challenges_count = 2;\n\n    let pub_params = rational::PublicParams {\n        sector_size,\n        challenges_count,\n    };\n\n    let temp_dir = tempdir().unwrap();\n    let temp_path = temp_dir.path();\n\n    let (_data1, tree1) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n    let (_data2, tree2) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n\n    let faults = OrderedSectorSet::new();\n    let mut sectors = OrderedSectorSet::new();\n    sectors.insert(0.into());\n    sectors.insert(1.into());\n\n    let seed = (0..leaves).map(|_| rng.gen()).collect::<Vec<u8>>();\n    let challenges =\n        derive_challenges(challenges_count, sector_size, &sectors, &seed, &faults).unwrap();\n    let comm_r_lasts_raw = vec![tree1.root(), tree2.root()];\n    let comm_r_lasts: Vec<_> = challenges\n        .iter()\n        .map(|c| comm_r_lasts_raw[u64::from(c.sector) as usize])\n        .collect();\n\n    let comm_cs: Vec<<Tree::Hasher as Hasher>::Domain> = challenges\n        .iter()\n        .map(|_c| <Tree::Hasher as Hasher>::Domain::random(rng))\n        .collect();\n\n    let comm_rs: Vec<_> = comm_cs\n        .iter()\n        .zip(comm_r_lasts.iter())\n        .map(|(comm_c, comm_r_last)| <Tree::Hasher as Hasher>::Function::hash2(comm_c, comm_r_last))\n        .collect();\n\n    let pub_inputs = rational::PublicInputs {\n        challenges: challenges.clone(),\n        faults: faults.clone(),\n        comm_rs: comm_rs.clone(),\n    };\n\n    let mut trees = BTreeMap::new();\n    trees.insert(0.into(), &tree1);\n    trees.insert(1.into(), &tree2);\n\n    let priv_inputs = rational::PrivateInputs::<Tree> {\n        trees: &trees,\n        comm_cs: &comm_cs,\n        comm_r_lasts: &comm_r_lasts,\n    };\n\n    let proof = RationalPoSt::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs)\n        .expect(\"proving failed\");\n\n    let is_valid = RationalPoSt::<Tree>::verify(&pub_params, &pub_inputs, &proof)\n        .expect(\"verification failed\");\n    assert!(is_valid);\n\n    // actual circuit test\n\n    let paths: Vec<_> = proof\n        .paths()\n        .iter()\n        .map(|p| {\n            p.iter()\n                .map(|v| {\n                    (\n                        v.0.iter().copied().map(Into::into).map(Some).collect(),\n                        Some(v.1),\n                    )\n                })\n                .collect::<Vec<_>>()\n        })\n        .collect();\n    let leafs: Vec<_> = proof.leafs().iter().map(|l| Some((*l).into())).collect();\n\n    let mut cs = TestConstraintSystem::<Bls12>::new();\n\n    let instance = RationalPoStCircuit::<Tree> {\n        leafs,\n        paths,\n        comm_rs: comm_rs.iter().copied().map(|c| Some(c.into())).collect(),\n        comm_cs: comm_cs.into_iter().map(|c| Some(c.into())).collect(),\n        comm_r_lasts: comm_r_lasts.into_iter().map(|c| Some(c.into())).collect(),\n        _t: PhantomData,\n    };\n\n    instance\n        .synthesize(&mut cs)\n        .expect(\"failed to synthesize circuit\");\n\n    assert!(cs.is_satisfied(), \"constraints not satisfied\");\n\n    assert_eq!(cs.num_inputs(), 5, \"wrong number of inputs\");\n    assert_eq!(\n        cs.num_constraints(),\n        expected_constraints,\n        \"wrong number of constraints\"\n    );\n    assert_eq!(cs.get_input(0, \"ONE\"), Fr::one());\n\n    let generated_inputs =\n        RationalPoStCompound::<Tree>::generate_public_inputs(&pub_inputs, &pub_params, None)\n            .unwrap();\n    let expected_inputs = cs.get_inputs();\n\n    for ((input, label), generated_input) in\n        expected_inputs.iter().skip(1).zip(generated_inputs.iter())\n    {\n        assert_eq!(input, generated_input, \"{}\", label);\n    }\n\n    assert_eq!(\n        generated_inputs.len(),\n        expected_inputs.len() - 1,\n        \"inputs are not the same length\"\n    );\n}\n"
  },
  {
    "path": "storage-proofs-post/tests/rational_compound.rs",
    "content": "use std::collections::BTreeMap;\n\nuse bellperson::{util_cs::test_cs::TestConstraintSystem, Circuit};\nuse filecoin_hashers::{poseidon::PoseidonHasher, Domain, HashFunction, Hasher};\nuse rand::{Rng, SeedableRng};\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    compound_proof::{self, CompoundProof},\n    merkle::{generate_tree, get_base_tree_count, BinaryMerkleTree, MerkleTreeTrait},\n    proof::NoRequirements,\n    sector::OrderedSectorSet,\n    util::NODE_SIZE,\n    TEST_SEED,\n};\nuse storage_proofs_post::rational::{self, derive_challenges, RationalPoStCompound};\nuse tempfile::tempdir;\n\n#[ignore]\n#[test]\nfn test_rational_post_compound_poseidon() {\n    test_rational_post_compound::<BinaryMerkleTree<PoseidonHasher>>();\n}\n\nfn test_rational_post_compound<Tree: 'static + MerkleTreeTrait>() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 32 * get_base_tree_count::<Tree>();\n    let sector_size = (leaves * NODE_SIZE) as u64;\n    let challenges_count = 2;\n\n    let setup_params = compound_proof::SetupParams {\n        vanilla_params: rational::SetupParams {\n            sector_size,\n            challenges_count,\n        },\n        partitions: None,\n        priority: true,\n    };\n\n    let pub_params = RationalPoStCompound::<Tree>::setup(&setup_params).expect(\"setup failed\");\n\n    let temp_dir = tempdir().unwrap();\n    let temp_path = temp_dir.path();\n\n    let (_data1, tree1) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n    let (_data2, tree2) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n\n    let faults = OrderedSectorSet::new();\n    let mut sectors = OrderedSectorSet::new();\n    sectors.insert(0.into());\n    sectors.insert(1.into());\n\n    let seed = (0..leaves).map(|_| rng.gen()).collect::<Vec<u8>>();\n    let challenges =\n        derive_challenges(challenges_count, sector_size, &sectors, &seed, &faults).unwrap();\n\n    let comm_r_lasts_raw = vec![tree1.root(), tree2.root()];\n    let comm_r_lasts: Vec<_> = challenges\n        .iter()\n        .map(|c| comm_r_lasts_raw[u64::from(c.sector) as usize])\n        .collect();\n\n    let comm_cs: Vec<<Tree::Hasher as Hasher>::Domain> = challenges\n        .iter()\n        .map(|_c| <Tree::Hasher as Hasher>::Domain::random(rng))\n        .collect();\n\n    let comm_rs: Vec<_> = comm_cs\n        .iter()\n        .zip(comm_r_lasts.iter())\n        .map(|(comm_c, comm_r_last)| <Tree::Hasher as Hasher>::Function::hash2(comm_c, comm_r_last))\n        .collect();\n\n    let pub_inputs = rational::PublicInputs {\n        challenges: challenges.clone(),\n        faults: faults.clone(),\n        comm_rs: comm_rs.clone(),\n    };\n\n    let mut trees = BTreeMap::new();\n    trees.insert(0.into(), &tree1);\n    trees.insert(1.into(), &tree2);\n\n    let priv_inputs = rational::PrivateInputs::<Tree> {\n        trees: &trees,\n        comm_r_lasts: &comm_r_lasts,\n        comm_cs: &comm_cs,\n    };\n\n    let gparams = RationalPoStCompound::<Tree>::groth_params(Some(rng), &pub_params.vanilla_params)\n        .expect(\"failed to create groth params\");\n\n    let proof =\n        RationalPoStCompound::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs, &gparams)\n            .expect(\"proving failed\");\n\n    let (circuit, inputs) =\n        RationalPoStCompound::<Tree>::circuit_for_test(&pub_params, &pub_inputs, &priv_inputs)\n            .unwrap();\n\n    {\n        let mut cs = TestConstraintSystem::new();\n\n        circuit.synthesize(&mut cs).expect(\"failed to synthesize\");\n        assert!(cs.is_satisfied());\n        assert!(cs.verify(&inputs));\n    }\n\n    let verified =\n        RationalPoStCompound::<Tree>::verify(&pub_params, &pub_inputs, &proof, &NoRequirements)\n            .expect(\"failed while verifying\");\n\n    assert!(verified);\n}\n"
  },
  {
    "path": "storage-proofs-post/tests/rational_vanilla.rs",
    "content": "use std::collections::BTreeMap;\n\nuse filecoin_hashers::{\n    blake2s::Blake2sHasher, poseidon::PoseidonHasher, sha256::Sha256Hasher, Domain, HashFunction,\n    Hasher,\n};\nuse generic_array::typenum::{U0, U2, U8};\nuse rand::{Rng, SeedableRng};\nuse rand_xorshift::XorShiftRng;\nuse storage_proofs_core::{\n    merkle::{generate_tree, get_base_tree_count, LCTree, MerkleTreeTrait},\n    proof::ProofScheme,\n    sector::OrderedSectorSet,\n    TEST_SEED,\n};\nuse storage_proofs_post::rational::{self, derive_challenges, RationalPoSt};\nuse tempfile::tempdir;\n\n#[test]\nfn test_rational_post_sha256_base_8() {\n    test_rational_post::<LCTree<Sha256Hasher, U8, U0, U0>>();\n}\n\n#[test]\nfn test_rational_post_blake2s_base_8() {\n    test_rational_post::<LCTree<Blake2sHasher, U8, U0, U0>>();\n}\n\n#[test]\nfn test_rational_post_poseidon_base_8() {\n    test_rational_post::<LCTree<PoseidonHasher, U8, U0, U0>>();\n}\n\n#[test]\nfn test_rational_post_poseidon_sub_8_8() {\n    test_rational_post::<LCTree<PoseidonHasher, U8, U8, U0>>();\n}\n\n#[test]\nfn test_rational_post_poseidon_top_8_8_2() {\n    test_rational_post::<LCTree<PoseidonHasher, U8, U8, U2>>();\n}\n\nfn test_rational_post<Tree: MerkleTreeTrait>()\nwhere\n    Tree::Store: 'static,\n{\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 64 * get_base_tree_count::<Tree>();\n    let sector_size = leaves as u64 * 32;\n    let challenges_count = 8;\n\n    let pub_params = rational::PublicParams {\n        sector_size,\n        challenges_count,\n    };\n\n    let temp_dir = tempdir().unwrap();\n    let temp_path = temp_dir.path();\n\n    let (_data1, tree1) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n    let (_data2, tree2) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n\n    let seed = (0..leaves).map(|_| rng.gen()).collect::<Vec<u8>>();\n    let mut faults = OrderedSectorSet::new();\n    faults.insert(139.into());\n    faults.insert(1.into());\n    faults.insert(32.into());\n\n    let mut sectors = OrderedSectorSet::new();\n    sectors.insert(891.into());\n    sectors.insert(139.into());\n    sectors.insert(32.into());\n    sectors.insert(1.into());\n\n    let mut trees = BTreeMap::new();\n    trees.insert(139.into(), &tree1); // faulty with tree\n    trees.insert(891.into(), &tree2);\n    // other two faults don't have a tree available\n\n    let challenges =\n        derive_challenges(challenges_count, sector_size, &sectors, &seed, &faults).unwrap();\n\n    // the only valid sector to challenge is 891\n    assert!(\n        challenges.iter().all(|c| c.sector == 891.into()),\n        \"invalid challenge generated\"\n    );\n\n    let comm_r_lasts = challenges\n        .iter()\n        .map(|c| trees.get(&c.sector).unwrap().root())\n        .collect::<Vec<_>>();\n\n    let comm_cs: Vec<<Tree::Hasher as Hasher>::Domain> = challenges\n        .iter()\n        .map(|_c| <Tree::Hasher as Hasher>::Domain::random(rng))\n        .collect();\n\n    let comm_rs: Vec<<Tree::Hasher as Hasher>::Domain> = comm_cs\n        .iter()\n        .zip(comm_r_lasts.iter())\n        .map(|(comm_c, comm_r_last)| <Tree::Hasher as Hasher>::Function::hash2(comm_c, comm_r_last))\n        .collect();\n\n    let pub_inputs = rational::PublicInputs {\n        challenges: challenges.clone(),\n        comm_rs: comm_rs.clone(),\n        faults: faults.clone(),\n    };\n\n    let priv_inputs = rational::PrivateInputs::<Tree> {\n        trees: &trees,\n        comm_cs: &comm_cs,\n        comm_r_lasts: &comm_r_lasts,\n    };\n\n    let proof = RationalPoSt::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs)\n        .expect(\"proving failed\");\n\n    let is_valid = RationalPoSt::<Tree>::verify(&pub_params, &pub_inputs, &proof)\n        .expect(\"verification failed\");\n\n    assert!(is_valid);\n}\n\n#[test]\nfn test_rational_post_validates_challenge_sha256_base_8() {\n    test_rational_post_validates_challenge::<LCTree<Sha256Hasher, U8, U0, U0>>();\n}\n\n#[test]\nfn test_rational_post_validates_challenge_blake2s_base_8() {\n    test_rational_post_validates_challenge::<LCTree<Blake2sHasher, U8, U0, U0>>();\n}\n\n#[test]\nfn test_rational_post_validates_challenge_poseidon_base_8() {\n    test_rational_post_validates_challenge::<LCTree<PoseidonHasher, U8, U0, U0>>();\n}\n\n#[test]\nfn test_rational_post_validates_challenge_poseidon_sub_8_8() {\n    test_rational_post_validates_challenge::<LCTree<PoseidonHasher, U8, U8, U0>>();\n}\n\n#[test]\nfn test_rational_post_validates_challenge_poseidon_top_8_8_2() {\n    test_rational_post_validates_challenge::<LCTree<PoseidonHasher, U8, U8, U2>>();\n}\n\nfn test_rational_post_validates_challenge<Tree: 'static + MerkleTreeTrait>() {\n    let rng = &mut XorShiftRng::from_seed(TEST_SEED);\n\n    let leaves = 64 * get_base_tree_count::<Tree>();\n    let sector_size = leaves as u64 * 32;\n    let challenges_count = 2;\n\n    let pub_params = rational::PublicParams {\n        sector_size,\n        challenges_count,\n    };\n\n    let temp_dir = tempdir().unwrap();\n    let temp_path = temp_dir.path();\n\n    let (_data, tree) = generate_tree::<Tree, _>(rng, leaves, Some(temp_path.to_path_buf()));\n    let seed = (0..leaves).map(|_| rng.gen()).collect::<Vec<u8>>();\n    let mut faults = OrderedSectorSet::new();\n    faults.insert(1.into());\n    let mut sectors = OrderedSectorSet::new();\n    sectors.insert(0.into());\n    sectors.insert(1.into());\n\n    let mut trees = BTreeMap::new();\n    trees.insert(0.into(), &tree);\n\n    let challenges =\n        derive_challenges(challenges_count, sector_size, &sectors, &seed, &faults).unwrap();\n    let comm_r_lasts = challenges\n        .iter()\n        .map(|c| trees.get(&c.sector).unwrap().root())\n        .collect::<Vec<_>>();\n\n    let comm_cs: Vec<<Tree::Hasher as Hasher>::Domain> = challenges\n        .iter()\n        .map(|_c| <Tree::Hasher as Hasher>::Domain::random(rng))\n        .collect();\n\n    let comm_rs: Vec<<Tree::Hasher as Hasher>::Domain> = comm_cs\n        .iter()\n        .zip(comm_r_lasts.iter())\n        .map(|(comm_c, comm_r_last)| <Tree::Hasher as Hasher>::Function::hash2(comm_c, comm_r_last))\n        .collect();\n\n    let pub_inputs = rational::PublicInputs {\n        challenges: challenges.clone(),\n        faults: faults.clone(),\n        comm_rs: comm_rs.clone(),\n    };\n\n    let priv_inputs = rational::PrivateInputs::<Tree> {\n        trees: &trees,\n        comm_cs: &comm_cs,\n        comm_r_lasts: &comm_r_lasts,\n    };\n\n    let proof = RationalPoSt::<Tree>::prove(&pub_params, &pub_inputs, &priv_inputs)\n        .expect(\"proving failed\");\n\n    let seed = (0..leaves).map(|_| rng.gen()).collect::<Vec<u8>>();\n    let challenges =\n        derive_challenges(challenges_count, sector_size, &sectors, &seed, &faults).unwrap();\n    let comm_r_lasts = challenges.iter().map(|_c| tree.root()).collect::<Vec<_>>();\n\n    let comm_cs: Vec<<Tree::Hasher as Hasher>::Domain> = challenges\n        .iter()\n        .map(|_c| <Tree::Hasher as Hasher>::Domain::random(rng))\n        .collect();\n\n    let comm_rs: Vec<<Tree::Hasher as Hasher>::Domain> = comm_cs\n        .iter()\n        .zip(comm_r_lasts.iter())\n        .map(|(comm_c, comm_r_last)| <Tree::Hasher as Hasher>::Function::hash2(comm_c, comm_r_last))\n        .collect();\n\n    let different_pub_inputs = rational::PublicInputs {\n        challenges,\n        faults,\n        comm_rs,\n    };\n\n    let verified = RationalPoSt::<Tree>::verify(&pub_params, &different_pub_inputs, &proof)\n        .expect(\"verification failed\");\n\n    // A proof created with a the wrong challenge not be verified!\n    assert!(!verified);\n}\n"
  }
]